Пример #1
0
static int update_core_config(unsigned int cpunumber, bool up)
{
	int ret = -EINVAL;
	unsigned int nr_cpus = num_online_cpus();
	int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
	int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);

	if (cpq_state == TEGRA_CPQ_DISABLED || cpunumber >= nr_cpu_ids)
		return ret;

	if (up) {
		if(is_lp_cluster()) {
			cpumask_set_cpu(cpunumber, &cr_online_requests);
			ret = -EBUSY;
		} else {
			if (tegra_cpu_edp_favor_up(nr_cpus, mp_overhead) &&
			    nr_cpus < max_cpus)
				ret = cpu_up(cpunumber);
		}
	} else {
		if (is_lp_cluster()) {
			ret = -EBUSY;
		} else {
			if (nr_cpus > min_cpus)
				ret = cpu_down(cpunumber);
		}
	}

	return ret;
}
static int gk20a_scale_qos_notify(struct notifier_block *nb,
				  unsigned long n, void *p)
{
	struct gk20a_scale_profile *profile =
		container_of(nb, struct gk20a_scale_profile,
			     qos_notify_block);
	struct gk20a_platform *platform = platform_get_drvdata(profile->pdev);
	struct gk20a *g = get_gk20a(profile->pdev);
	unsigned long freq;

	if (!platform->postscale)
		return NOTIFY_OK;

	/* get the frequency requirement. if devfreq is enabled, check if it
	 * has higher demand than qos */
	freq = gk20a_clk_round_rate(g, pm_qos_request(platform->qos_id));
	if (g->devfreq)
		freq = max(g->devfreq->previous_freq, freq);

	/* Update gpu load because we may scale the emc target
	 * if the gpu load changed. */
	gk20a_pmu_load_update(g);
	platform->postscale(profile->pdev, freq);

	return NOTIFY_OK;
}
Пример #3
0
unsigned int cpufreq_get_touch_boost_press(void)
{

	touch_boost_press_value = pm_qos_request(PM_QOS_TOUCH_PRESS);

	return touch_boost_press_value;
}
Пример #4
0
/**
 * ladder_select_state - selects the next state to enter
 * @drv: cpuidle driver
 * @dev: the CPU
 */
static int ladder_select_state(struct cpuidle_driver *drv,
				struct cpuidle_device *dev)
{
	struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
	struct ladder_device_state *last_state;
	int last_residency, last_idx = ldev->last_state_idx;
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0)) {
		ladder_do_selection(ldev, last_idx, 0);
		return 0;
	}

	last_state = &ldev->states[last_idx];

	last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;

	/* consider promotion */
	if (last_idx < drv->state_count - 1 &&
	    !drv->states[last_idx + 1].disabled &&
	    !dev->states_usage[last_idx + 1].disable &&
	    last_residency > last_state->threshold.promotion_time &&
	    drv->states[last_idx + 1].exit_latency <= latency_req) {
		last_state->stats.promotion_count++;
		last_state->stats.demotion_count = 0;
		if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
			ladder_do_selection(ldev, last_idx, last_idx + 1);
			return last_idx + 1;
		}
	}

	/* consider demotion */
	if (last_idx > CPUIDLE_DRIVER_STATE_START &&
	    (drv->states[last_idx].disabled ||
	    dev->states_usage[last_idx].disable ||
	    drv->states[last_idx].exit_latency > latency_req)) {
		int i;

		for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
			if (drv->states[i].exit_latency <= latency_req)
				break;
		}
		ladder_do_selection(ldev, last_idx, i);
		return i;
	}

	if (last_idx > CPUIDLE_DRIVER_STATE_START &&
	    last_residency < last_state->threshold.demotion_time) {
		last_state->stats.demotion_count++;
		last_state->stats.promotion_count = 0;
		if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
			ladder_do_selection(ldev, last_idx, last_idx - 1);
			return last_idx - 1;
		}
	}

	/* otherwise remain at the current state */
	return last_idx;
}
Пример #5
0
static ssize_t show_bus_int_freq_min(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	unsigned int ret = 0;
	ret =  sprintf(buf, "%d\n", pm_qos_request(PM_QOS_DEVICE_THROUGHPUT));
	return ret;
}
Пример #6
0
static ssize_t show_bimc_freq_min(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	unsigned int ret = 0;
	ret =  sprintf(buf, "%d\n", pm_qos_request(PM_QOS_BIMC_FREQ_MIN));
	return ret;
}
static noinline int tegra_cpu_speed_balance(void)
{
    unsigned long highest_speed = tegra_cpu_highest_speed();
    unsigned long balanced_speed = highest_speed * balance_level / 100;
    unsigned long skewed_speed = balanced_speed / 2;
    unsigned int nr_cpus = num_online_cpus();
    unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
    unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);

    /* balanced: freq targets for all CPUs are above 50% of highest speed
       biased: freq target for at least one CPU is below 50% threshold
       skewed: freq targets for at least 2 CPUs are below 25% threshold */
    if (((tegra_count_slow_cpus(skewed_speed) >= 2) ||
            tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
            (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) &&
            (nr_cpus > min_cpus))
        return TEGRA_CPU_SPEED_SKEWED;

    if (((tegra_count_slow_cpus(balanced_speed) >= 1) ||
            (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
            (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) &&
            (nr_cpus >= min_cpus))
        return TEGRA_CPU_SPEED_BIASED;

    return TEGRA_CPU_SPEED_BALANCED;
}
Пример #8
0
static void update_runnables_state(void)
{
	unsigned int nr_cpus = num_online_cpus();
	int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
	int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
	unsigned int avg_nr_run = avg_nr_running();
	unsigned int nr_run;

	if (runnables_state == DISABLED)
		return;

	for (nr_run = 1; nr_run < ARRAY_SIZE(nr_run_thresholds); nr_run++) {
		unsigned int nr_threshold = nr_run_thresholds[nr_run - 1];
		if (nr_run_last <= nr_run)
			nr_threshold += NR_FSHIFT / nr_run_hysteresis;
		if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT_EXP)))
			break;
	}
	nr_run_last = nr_run;

	if ((nr_cpus > max_cpus || nr_run < nr_cpus) && nr_cpus >= min_cpus) {
		runnables_state = DOWN;
	} else if (nr_cpus < min_cpus || nr_run > nr_cpus) {
		runnables_state =  UP;
	} else {
		runnables_state = IDLE;
	}
}
Пример #9
0
static ssize_t show_cpu_online_min(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	unsigned int ret = 0;
	ret =  sprintf(buf, "%d\n", pm_qos_request(PM_QOS_CPU_ONLINE_MIN));
	return ret;
}
static int devfreq_simple_ondemand_func(struct devfreq *df,
					unsigned long *freq)
{
	struct devfreq_dev_status stat;
	int err;
	unsigned long long a, b;
	unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
	unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
	unsigned int dfso_multiplication_weight = DFSO_WEIGHT;
	struct devfreq_simple_ondemand_data *data = df->data;
	unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
	unsigned long pm_qos_min = 0;

	if (data) {
		pm_qos_min = pm_qos_request(data->pm_qos_class);
		if (pm_qos_min >= data->cal_qos_max) {
			*freq = pm_qos_min;
			return 0;
		}
	}

	if (df->profile->get_dev_status) {
		err = df->profile->get_dev_status(df->dev.parent, &stat);
	} else {
		*freq = pm_qos_min;
		return 0;
	}

	if (err)
		return err;

	if (data) {
		if (data->upthreshold)
			dfso_upthreshold = data->upthreshold;
		if (data->downdifferential)
			dfso_downdifferential = data->downdifferential;
		if (data->multiplication_weight)
			dfso_multiplication_weight = data->multiplication_weight;
	}
	if (dfso_upthreshold > 100 ||
	    dfso_upthreshold < dfso_downdifferential)
		return -EINVAL;

	if (data && data->cal_qos_max)
		max = (df->max_freq) ? df->max_freq : 0;

	/* Assume MAX if it is going to be divided by zero */
	if (stat.total_time == 0) {
		if (data && data->cal_qos_max)
			max = max3(max, data->cal_qos_max, pm_qos_min);
		*freq = max;
		return 0;
	}

	/* Prevent overflow */
	if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
		stat.busy_time >>= 7;
		stat.total_time >>= 7;
	}
Пример #11
0
int __cpuinit cpu_up(unsigned int cpu)
{
	int err = 0;

#ifdef	CONFIG_MEMORY_HOTPLUG
	int nid;
	pg_data_t	*pgdat;
#endif

	if (num_online_cpus() >= pm_qos_request(PM_QOS_CPU_ONLINE_MAX))
		return 0;

	if (!cpu_possible(cpu)) {
		printk(KERN_ERR "can't online cpu %d because it is not "
			"configured as may-hotadd at boot time\n", cpu);
#if defined(CONFIG_IA64)
		printk(KERN_ERR "please check additional_cpus= boot "
				"parameter\n");
#endif
		return -EINVAL;
	}

#ifdef	CONFIG_MEMORY_HOTPLUG
	nid = cpu_to_node(cpu);
	if (!node_online(nid)) {
		err = mem_online_node(nid);
		if (err)
			return err;
	}

	pgdat = NODE_DATA(nid);
	if (!pgdat) {
		printk(KERN_ERR
			"Can't online cpu %d due to NULL pgdat\n", cpu);
		return -ENOMEM;
	}

	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
		mutex_lock(&zonelists_mutex);
		build_all_zonelists(NULL);
		mutex_unlock(&zonelists_mutex);
	}
#endif

	cpu_maps_update_begin();

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_up(cpu, 0);

out:
	cpu_maps_update_done();
	return err;
}
Пример #12
0
static ssize_t show_cpufreq_max(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	unsigned int ret = 0;
	max_cpu_freq = pm_qos_request(PM_QOS_CPU_FREQ_MAX);
	ret +=  snprintf(buf + ret, PAGE_SIZE - ret, "%d\n", max_cpu_freq);

	return ret;
}
Пример #13
0
static ssize_t store_cpu_online_max(struct kobject *a, struct attribute *b,
				  const char *buf, size_t count)
{
	set_pmqos_data(cpu_online_max_qos_array, PM_QOS_CPU_ONLINE_MAX, buf);
	if (num_online_cpus() > pm_qos_request(PM_QOS_CPU_ONLINE_MAX))
		cpu_down(1);

	return count;
}
Пример #14
0
static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
{
	struct acpi_processor *pr = seq->private;
	unsigned int i;


	if (!pr)
		goto end;

	seq_printf(seq, "active state:            C%zd\n"
		   "max_cstate:              C%d\n"
		   "maximum allowed latency: %d usec\n",
		   pr->power.state ? pr->power.state - pr->power.states : 0,
		   max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));

	seq_puts(seq, "states:\n");

	for (i = 1; i <= pr->power.count; i++) {
		seq_printf(seq, "   %cC%d:                  ",
			   (&pr->power.states[i] ==
			    pr->power.state ? '*' : ' '), i);

		if (!pr->power.states[i].valid) {
			seq_puts(seq, "<not supported>\n");
			continue;
		}

		switch (pr->power.states[i].type) {
		case ACPI_STATE_C1:
			seq_printf(seq, "type[C1] ");
			break;
		case ACPI_STATE_C2:
			seq_printf(seq, "type[C2] ");
			break;
		case ACPI_STATE_C3:
			seq_printf(seq, "type[C3] ");
			break;
		default:
			seq_printf(seq, "type[--] ");
			break;
		}

		seq_puts(seq, "promotion[--] ");

		seq_puts(seq, "demotion[--] ");

		seq_printf(seq, "latency[%03d] usage[%08d] duration[%020Lu]\n",
			   pr->power.states[i].latency,
			   pr->power.states[i].usage,
			   us_to_pm_timer_ticks(pr->power.states[i].time));
	}

      end:
	return 0;
}
Пример #15
0
static ssize_t store_cpu_online_min(struct kobject *a, struct attribute *b,
				  const char *buf, size_t count)
{
	set_pmqos_data(cpu_online_min_qos_array, PM_QOS_CPU_ONLINE_MIN, buf);

	if (num_online_cpus() < pm_qos_request(PM_QOS_CPU_ONLINE_MIN)) {
		pr_info("%s cpu_up\n", __FUNCTION__);
		cpu_up(1);
	}
	return count;
}
Пример #16
0
/*
 * find_couple_state - Find the maximum state platform can enter
 *
 * @index: pointer to variable which stores the maximum state
 * @cluster: cluster number
 *
 * Must be called with function holds mmp_lpm_lock
 */
static void find_coupled_state(int *index, int cluster)
{
    int i;
    int platform_lpm = DEFAULT_LPM_FLAG;

    for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++)
        platform_lpm &= mmp_enter_lpm[cluster][i];

    *index = min(find_first_zero_bit((void *)&platform_lpm, LPM_NUM),
                 pm_qos_request(PM_QOS_CPUIDLE_BLOCK)) - 1;

}
Пример #17
0
static noinline int tegra_cpu_speed_balance(void)
{
	unsigned long highest_speed = tegra_cpu_highest_speed();
	unsigned long balanced_speed = highest_speed * balance_level / 100;
	unsigned long skewed_speed = balanced_speed / 2;
	unsigned int nr_cpus = num_online_cpus();
	unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
	unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
	unsigned int avg_nr_run = avg_nr_running();
	unsigned int nr_run;

	/* Evaluate:
	 * - distribution of freq targets for already on-lined CPUs
	 * - average number of runnable threads
	 * - effective MIPS available within EDP frequency limits,
	 * and return:
	 * TEGRA_CPU_SPEED_BALANCED to bring one more CPU core on-line
	 * TEGRA_CPU_SPEED_BIASED to keep CPU core composition unchanged
	 * TEGRA_CPU_SPEED_SKEWED to remove CPU core off-line
	 */

	unsigned int *current_profile = rt_profiles[rt_profile_sel];
	for (nr_run = 1; nr_run < ARRAY_SIZE(rt_profile_default); nr_run++) {
		unsigned int nr_threshold = current_profile[nr_run - 1];
		if (nr_run_last <= nr_run)
			nr_threshold += nr_run_hysteresis;
		if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT)))
			break;
	}
	nr_run_last = nr_run;

//                           
#ifdef CONFIG_MACH_X3
	if(threads_count_hotplug_control_enable == 0 && highest_speed >= 640000 )
		nr_run++;
#endif

	if (((tegra_count_slow_cpus(skewed_speed) >= 2) ||
	     (nr_run < nr_cpus) ||
	     tegra_cpu_edp_favor_down(nr_cpus, mp_overhead) ||
	     (highest_speed <= idle_bottom_freq) || (nr_cpus > max_cpus)) &&
	    (nr_cpus > min_cpus))
		return TEGRA_CPU_SPEED_SKEWED;

	if (((tegra_count_slow_cpus(balanced_speed) >= 1) ||
	     (nr_run <= nr_cpus) ||
	     (!tegra_cpu_edp_favor_up(nr_cpus, mp_overhead)) ||
	     (highest_speed <= idle_bottom_freq) || (nr_cpus == max_cpus)) &&
	    (nr_cpus >= min_cpus))
		return TEGRA_CPU_SPEED_BIASED;

	return TEGRA_CPU_SPEED_BALANCED;
}
Пример #18
0
static ssize_t __ref store_cpu_online_min(struct kobject *a, struct attribute *b,
				  const char *buf, size_t count)
{
	int ret = 0;
	ret = set_pmqos_data(cpu_online_min_qos_array, PM_QOS_CPU_ONLINE_MIN, buf);
	if (ret)
		return ret;

	if (num_online_cpus() < pm_qos_request(PM_QOS_CPU_ONLINE_MIN))
		cpu_up(1);

	return count;
}
static int debugfs_request_value(int users)
{
	struct debugfs_pm_qos_user *user = NULL;
	struct pm_qos_request *req;

	if (users > pm_qos_users) {
		pr_info("[DDR DEVFREQ DEBUGFS] no such user\n");
		return -1;
	}

	users--;
	user = pm_qos_user[users];
	req = &user->req;
	return pm_qos_request(req->pm_qos_class);
}
Пример #20
0
static void tegra_cpuquiet_work_func(struct work_struct *work)
{
	int device_busy = -1;

	mutex_lock(tegra3_cpu_lock);

	switch(cpq_state) {
		case TEGRA_CPQ_DISABLED:
		case TEGRA_CPQ_IDLE:
			break;
		case TEGRA_CPQ_SWITCH_TO_G:
			if (is_lp_cluster()) {
				if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
					/*catch-up with governor target speed */
					tegra_cpu_set_speed_cap(NULL);
					/* process pending core requests*/
					device_busy = 0;
				}
			}
			break;
		case TEGRA_CPQ_SWITCH_TO_LP:
			if (!is_lp_cluster() && !no_lp &&
				!pm_qos_request(PM_QOS_MIN_ONLINE_CPUS)
				&& num_online_cpus() == 1) {
				if (!clk_set_parent(cpu_clk, cpu_lp_clk)) {
					/*catch-up with governor target speed*/
					tegra_cpu_set_speed_cap(NULL);
					device_busy = 1;
				}
			}
			break;
		default:
			pr_err("%s: invalid tegra hotplug state %d\n",
		       __func__, cpq_state);
	}

	mutex_unlock(tegra3_cpu_lock);

	if (device_busy == 1) {
		cpuquiet_device_busy();
	} else if (!device_busy) {
		apply_core_config();
		cpuquiet_device_free();
	}
}
Пример #21
0
void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
{
	if (!is_g_cluster_present())
		return;

	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	if (suspend) {
		cpq_state = TEGRA_CPQ_IDLE;

		/* Switch to G-mode if suspend rate is high enough */
		if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
			clk_set_parent(cpu_clk, cpu_g_clk);
			cpuquiet_device_free();
		}
		return;
	}

	if (is_lp_cluster() && pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
		if (cpq_state != TEGRA_CPQ_SWITCH_TO_G) {
			/* Force switch */
			cpq_state = TEGRA_CPQ_SWITCH_TO_G;
			queue_delayed_work(
				cpuquiet_wq, &cpuquiet_work, up_delay);
		}
		return;
	}

	if (is_lp_cluster() && (cpu_freq >= idle_top_freq || no_lp)) {
		cpq_state = TEGRA_CPQ_SWITCH_TO_G;
		queue_delayed_work(cpuquiet_wq, &cpuquiet_work, up_delay);
	} else if (!is_lp_cluster() && !no_lp &&
		   cpu_freq <= idle_bottom_freq) {
		cpq_state = TEGRA_CPQ_SWITCH_TO_LP;
		queue_delayed_work(cpuquiet_wq, &cpuquiet_work, down_delay);
	} else {
		cpq_state = TEGRA_CPQ_IDLE;
	}
}
Пример #22
0
static void min_max_constraints_workfunc(struct work_struct *work)
{
	int count = -1;
	bool up = false;
	unsigned int cpu;

	int nr_cpus = num_online_cpus();
	int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS) ? : 4;
	int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);

	if (cpq_state == TEGRA_CPQ_DISABLED)
		return;

	if (is_lp_cluster())
		return;

	if (nr_cpus < min_cpus) {
		up = true;
		count = min_cpus - nr_cpus;
	} else if (nr_cpus > max_cpus && max_cpus >= min_cpus) {
		count = nr_cpus - max_cpus;
	}

	for (;count > 0; count--) {
		if (up) {
			cpu = cpumask_next_zero(0, cpu_online_mask);
			if (cpu < nr_cpu_ids)
				cpu_up(cpu);
			else
				break;
		} else {
			cpu = cpumask_next(0, cpu_online_mask);
			if (cpu < nr_cpu_ids)
				cpu_down(cpu);
			else
				break;
		}
	}
}
Пример #23
0
int __ref cpu_down(unsigned int cpu)
{
	int err;

	if (num_online_cpus() <= pm_qos_request(PM_QOS_CPU_ONLINE_MIN))
		return 0;

	cpu_maps_update_begin();
	sec_debug_task_log_msg(cpu, "cpudown+");

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0);

out:
	pr_info("_cpu_down ret=%d\n", err);
	sec_debug_task_log_msg(cpu, "cpudown-");
	cpu_maps_update_done();
	return err;
}
Пример #24
0
static int nvhost_scale_qos_notify(struct notifier_block *nb,
				   unsigned long n, void *p)
{
	struct nvhost_device_profile *profile =
		container_of(nb, struct nvhost_device_profile,
			     qos_notify_block);
	struct nvhost_device_data *pdata = platform_get_drvdata(profile->pdev);
	unsigned long freq;

	if (!pdata->scaling_post_cb)
		return NOTIFY_OK;

	/* get the frequency requirement. if devfreq is enabled, check if it
	 * has higher demand than qos */
	freq = clk_round_rate(clk_get_parent(profile->clk),
			      pm_qos_request(pdata->qos_id));
	if (pdata->power_manager)
		freq = max(pdata->power_manager->previous_freq, freq);

	pdata->scaling_post_cb(profile, freq);

	return NOTIFY_OK;
}
static int max_cpus_get(void *data, u64 *val)
{
    *val = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
    return 0;
}
void tegra_auto_hotplug_governor(unsigned int cpu_freq, bool suspend)
{
    unsigned long up_delay, top_freq, bottom_freq;

    if (!is_g_cluster_present())
        return;

    if (hp_state == TEGRA_HP_DISABLED)
        return;

    if (suspend) {
        hp_state = TEGRA_HP_IDLE;

        /* Switch to G-mode if suspend rate is high enough */
        if (is_lp_cluster() && (cpu_freq >= idle_bottom_freq)) {
            if (!clk_set_parent(cpu_clk, cpu_g_clk)) {
                hp_stats_update(CONFIG_NR_CPUS, false);
                hp_stats_update(0, true);
            }
        }
        return;
    }

    if (is_lp_cluster()) {
        up_delay = up2g0_delay;
        top_freq = idle_top_freq;
        bottom_freq = 0;
    } else {
        up_delay = up2gn_delay;
        top_freq = idle_bottom_freq;
        bottom_freq = idle_bottom_freq;
    }

    if (pm_qos_request(PM_QOS_MIN_ONLINE_CPUS) >= 2) {
        if (hp_state != TEGRA_HP_UP) {
            hp_state = TEGRA_HP_UP;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, up_delay);
        }
        return;
    }

    switch (hp_state) {
    case TEGRA_HP_IDLE:
        if (cpu_freq > top_freq) {
            hp_state = TEGRA_HP_UP;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, up_delay);
        } else if (cpu_freq <= bottom_freq) {
            hp_state = TEGRA_HP_DOWN;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, down_delay);
        }
        break;
    case TEGRA_HP_DOWN:
        if (cpu_freq > top_freq) {
            hp_state = TEGRA_HP_UP;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, up_delay);
        } else if (cpu_freq > bottom_freq) {
            hp_state = TEGRA_HP_IDLE;
        }
        break;
    case TEGRA_HP_UP:
        if (cpu_freq <= bottom_freq) {
            hp_state = TEGRA_HP_DOWN;
            queue_delayed_work(
                hotplug_wq, &hotplug_work, down_delay);
        } else if (cpu_freq <= top_freq) {
            hp_state = TEGRA_HP_IDLE;
        }
        break;
    default:
        pr_err("%s: invalid tegra hotplug state %d\n",
               __func__, hp_state);
        BUG();
    }
}
static void tegra_auto_hotplug_work_func(struct work_struct *work)
{
    bool up = false;
    unsigned int cpu = nr_cpu_ids;
    unsigned long now = jiffies;
    static unsigned long last_change_time;

    mutex_lock(tegra3_cpu_lock);

    switch (hp_state) {
    case TEGRA_HP_DISABLED:
    case TEGRA_HP_IDLE:
        break;
    case TEGRA_HP_DOWN:
        cpu = tegra_get_slowest_cpu_n();
        if (cpu < nr_cpu_ids) {
            up = false;
        } else if (!is_lp_cluster() && !no_lp &&
                   !pm_qos_request(PM_QOS_MIN_ONLINE_CPUS)) {
            if(!clk_set_parent(cpu_clk, cpu_lp_clk)) {
                hp_stats_update(CONFIG_NR_CPUS, true);
                hp_stats_update(0, false);
                /* catch-up with governor target speed */
                tegra_cpu_set_speed_cap(NULL);
                break;
            }
        }
        queue_delayed_work(
            hotplug_wq, &hotplug_work, down_delay);
        break;
    case TEGRA_HP_UP:
        if (is_lp_cluster() && !no_lp) {
            if(!clk_set_parent(cpu_clk, cpu_g_clk)) {
                hp_stats_update(CONFIG_NR_CPUS, false);
                hp_stats_update(0, true);
                /* catch-up with governor target speed */
                tegra_cpu_set_speed_cap(NULL);
            }
        } else {
            switch (tegra_cpu_speed_balance()) {
            /* cpu speed is up and balanced - one more on-line */
            case TEGRA_CPU_SPEED_BALANCED:
                cpu = cpumask_next_zero(0, cpu_online_mask);
                if (cpu < nr_cpu_ids)
                    up = true;
                break;
            /* cpu speed is up, but skewed - remove one core */
            case TEGRA_CPU_SPEED_SKEWED:
                cpu = tegra_get_slowest_cpu_n();
                if (cpu < nr_cpu_ids)
                    up = false;
                break;
            /* cpu speed is up, but under-utilized - do nothing */
            case TEGRA_CPU_SPEED_BIASED:
            default:
                break;
            }
        }
        queue_delayed_work(
            hotplug_wq, &hotplug_work, up2gn_delay);
        break;
    default:
        pr_err("%s: invalid tegra hotplug state %d\n",
               __func__, hp_state);
    }

    if (!up && ((now - last_change_time) < down_delay))
        cpu = nr_cpu_ids;

    if (cpu < nr_cpu_ids) {
        last_change_time = now;
        hp_stats_update(cpu, up);
    }
    mutex_unlock(tegra3_cpu_lock);

    if (cpu < nr_cpu_ids) {
        if (up) {
            printk("cpu_up(%u)+\n",cpu);
            cpu_up(cpu);
            printk("cpu_up(%u)-\n",cpu);
        } else {
            printk("cpu_down(%u)+\n",cpu);
            cpu_down(cpu);
            printk("cpu_down(%u)-\n",cpu);
        }
    }
}
Пример #28
0
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int i;
	int multiplier;
	struct timespec t;

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/*
	 * Force the result of multiplication to be 64 bits even if both
	 * operands are 32 bits.
	 * Make sure to round up for half microseconds.
	 */
	data->predicted_us = div_round64((uint64_t)data->expected_us *
					 data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	get_typical_interval(data);

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5 &&
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		data->last_state_idx = i;
		data->exit_us = s->exit_latency;
	}

	return data->last_state_idx;
}
static int devfreq_simple_usage_func(struct devfreq *df, unsigned long *freq)
{
	struct devfreq_dev_status stat;
	int err = df->profile->get_dev_status(df->dev.parent, &stat);
	unsigned long long a, b;
	unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
	unsigned int dfso_target_percentage = DFSO_TARGET_PERCENTAGE;
	unsigned int dfso_proportional = DFSO_PROPORTIONAL;
	unsigned int dfso_multiplication_weight = DFSO_WEIGHT;
	struct devfreq_simple_usage_data *data = df->data;
	unsigned long max = (df->max_freq) ? df->max_freq : 0;
	unsigned long pm_qos_min;

	if (!data)
		return -EINVAL;

	if (!df->disabled_pm_qos)
		pm_qos_min = pm_qos_request(data->pm_qos_class);

	if (err)
		return err;

	if (data->upthreshold)
		dfso_upthreshold = data->upthreshold;
	if (data->target_percentage)
		dfso_target_percentage = data->target_percentage;
	if (data->proportional)
		dfso_proportional = data->proportional;
	if (data->multiplication_weight)
		dfso_multiplication_weight = data->multiplication_weight;

	a = stat.busy_time * dfso_multiplication_weight;
	a = div64_u64(a, 100);
	a = a * dfso_proportional;
	b = div64_u64(a, stat.total_time);

	/* If percentage is larger than upthreshold, set with max freq */
	if (b >= data->upthreshold) {
		max = max(data->cal_qos_max, pm_qos_min);
		*freq = max;

		if (*freq > df->max_freq)
			*freq = df->max_freq;

		return 0;
	}

	b *= stat.current_frequency;

	a = div64_u64(b, dfso_target_percentage);

	if (a > data->cal_qos_max)
		a = data->cal_qos_max;

	*freq = (unsigned long) a;

	if (pm_qos_min && *freq < pm_qos_min)
		*freq = pm_qos_min;

	return 0;
}
Пример #30
0
/**
 * menu_select - selects the next idle state to enter
 * @drv: cpuidle driver containing state data
 * @dev: the CPU
 */
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
	struct menu_device *data = &__get_cpu_var(menu_devices);
	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
	int i;
	int multiplier;
	struct timespec t;

	if (data->needs_update) {
		menu_update(drv, dev);
		data->needs_update = 0;
	}

	data->last_state_idx = 0;
	data->exit_us = 0;

	/* Special case when user has set very strict latency requirement */
	if (unlikely(latency_req == 0))
		return 0;

	/* determine the expected residency time, round up */
	t = ktime_to_timespec(tick_nohz_get_sleep_length());
	data->expected_us =
		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;


	data->bucket = which_bucket(data->expected_us);

	multiplier = performance_multiplier();

	/*
	 * if the correction factor is 0 (eg first time init or cpu hotplug
	 * etc), we actually want to start out with a unity factor.
	 */
	if (data->correction_factor[data->bucket] == 0)
		data->correction_factor[data->bucket] = RESOLUTION * DECAY;

	/* Make sure to round up for half microseconds */
#ifdef CONFIG_SKIP_IDLE_CORRELATION
	if (dev->skip_idle_correlation)
		data->predicted_us = data->expected_us;
	else
#endif
	data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
					 RESOLUTION * DECAY);

	/* This patch is not checked */
#ifndef CONFIG_CPU_THERMAL_IPA
	get_typical_interval(data);
#else
	/*
	 * HACK - Ignore repeating patterns when we're
	 * forecasting a very large idle period.
	 */
	if(data->predicted_us < MAX_INTERESTING)
		get_typical_interval(data);
#endif

	/*
	 * We want to default to C1 (hlt), not to busy polling
	 * unless the timer is happening really really soon.
	 */
	if (data->expected_us > 5 &&
	    !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
		dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
		data->last_state_idx = CPUIDLE_DRIVER_STATE_START;

	/*
	 * Find the idle state with the lowest power while satisfying
	 * our constraints.
	 */
	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
		struct cpuidle_state *s = &drv->states[i];
		struct cpuidle_state_usage *su = &dev->states_usage[i];

		if (s->disabled || su->disable)
			continue;
		if (s->target_residency > data->predicted_us)
			continue;
		if (s->exit_latency > latency_req)
			continue;
		if (s->exit_latency * multiplier > data->predicted_us)
			continue;

		data->last_state_idx = i;
		data->exit_us = s->exit_latency;
	}

	return data->last_state_idx;
}