Example #1
0
void nvhost_scale3d_notify_busy(struct nvhost_module *mod)
{
    unsigned long idle;
    unsigned long short_term_idle;
    ktime_t t;

    mutex_lock(&scale3d.lock);

    if (!scale3d.enable)
        goto done;

    cancel_delayed_work(&scale3d.idle_timer);

    t = ktime_get();

    if (scale3d.is_idle) {
        idle = (unsigned long)
               ktime_us_delta(t, scale3d.last_idle);
        scale3d.idle_total += idle;
        short_term_idle =
            ktime_us_delta(t, scale3d.last_short_term_idle);
        scale3d.idle_short_term_total += short_term_idle;
        scale3d.is_idle = 0;
    }

    scaling_state_check(t);

done:
    mutex_unlock(&scale3d.lock);
}
Example #2
0
void nvhost_scale3d_notify_idle(struct nvhost_module *mod)
{
    ktime_t t;
    unsigned long dt;

    mutex_lock(&scale3d.lock);

    if (!scale3d.enable)
        goto done;

    t = ktime_get();

    if (scale3d.is_idle) {
        dt = ktime_us_delta(t, scale3d.last_idle);
        scale3d.idle_total += dt;
        dt = ktime_us_delta(t, scale3d.last_short_term_idle);
        scale3d.idle_short_term_total += dt;
    } else
        scale3d.is_idle = 1;

    scale3d.last_idle = t;
    scale3d.last_short_term_idle = t;

    scaling_state_check(scale3d.last_idle);

    /* delay idle_max % of 2 * fast_response time (given in microseconds) */
    schedule_delayed_work(&scale3d.idle_timer,
                          msecs_to_jiffies((scale3d.idle_max * scale3d.fast_response)
                                           / 50000));

done:
    mutex_unlock(&scale3d.lock);
}
Example #3
0
void nvhost_scale3d_notify_idle(struct nvhost_device *dev)
{
	ktime_t t;
	unsigned long dt;

	if (!scale3d.enable)
		return;

	/* if throughput hint enabled, and last hint is recent enough, return */
	if (scale3d.p_use_throughput_hint) {
		t = ktime_get();
		if (ktime_us_delta(t, scale3d.last_throughput_hint) < 1000000)
			return;
	}

	mutex_lock(&scale3d.lock);

	t = ktime_get();

	if (scale3d.is_idle) {
		dt = ktime_us_delta(t, scale3d.last_idle);
		scale3d.idle_total += dt;
		dt = ktime_us_delta(t, scale3d.last_short_term_idle);
		scale3d.idle_short_term_total += dt;
	} else {
		scale3d.is_idle = 1;
		gpu_loading[curr_idx].busy_time +=
			ktime_us_delta(t, scale3d.last_busy);
	}

	scale3d.last_idle = t;
	scale3d.last_short_term_idle = t;

	scaling_state_check(scale3d.last_idle);

	/* delay idle_max % of 2 * fast_response time (given in microseconds) */
	schedule_delayed_work(&scale3d.idle_timer,
		msecs_to_jiffies((scale3d.idle_max * scale3d.fast_response)
			/ 50000));

	mutex_unlock(&scale3d.lock);
}
Example #4
0
void nvhost_scale3d_notify_busy(struct nvhost_device *dev)
{
	unsigned long idle;
	unsigned long short_term_idle;
	ktime_t t;

	if (!scale3d.enable)
		return;

	/* if throughput hint enabled, and last hint is recent enough, return */
	if (scale3d.p_use_throughput_hint) {
		t = ktime_get();
		if (ktime_us_delta(t, scale3d.last_throughput_hint) < 1000000)
			return;
	}

	mutex_lock(&scale3d.lock);

	cancel_delayed_work(&scale3d.idle_timer);

	t = ktime_get();

	if (scale3d.is_idle) {
		idle = (unsigned long)
			ktime_us_delta(t, scale3d.last_idle);
		scale3d.idle_total += idle;
		short_term_idle =
			ktime_us_delta(t, scale3d.last_short_term_idle);
		scale3d.idle_short_term_total += short_term_idle;
		scale3d.is_idle = 0;
	} else {
		gpu_loading[curr_idx].busy_time +=
			ktime_us_delta(t, scale3d.last_busy);
	}

	scale3d.last_busy = t;
	scaling_state_check(t);

	mutex_unlock(&scale3d.lock);
}
Example #5
0
static int nvhost_pod_estimate_freq(struct devfreq *df,
				    unsigned long *freq)
{
	struct podgov_info_rec *podgov = df->data;
	struct devfreq_dev_status dev_stat;
	struct nvhost_devfreq_ext_stat *ext_stat;
	long delay;
	int current_event;
	int stat;
	ktime_t now;

	stat = df->profile->get_dev_status(df->dev.parent, &dev_stat);
	if (stat < 0)
		return stat;

	/* Ensure maximal clock when scaling is disabled */
	if (!podgov->enable) {
		*freq = df->max_freq;
		return 0;
	}

	if (podgov->p_user) {
		*freq = podgov->p_freq_request;
		return 0;
	}

	current_event = DEVICE_IDLE;
	stat = 0;
	now = ktime_get();

	/* Local adjustments (i.e. requests from kernel threads) are
	 * handled here */

	if (podgov->adjustment_type == ADJUSTMENT_LOCAL) {

		podgov->adjustment_type = ADJUSTMENT_DEVICE_REQ;

		/* Do not do unnecessary scaling */
		scaling_limit(df, &podgov->adjustment_frequency);
		if (df->previous_freq == podgov->adjustment_frequency)
			return GET_TARGET_FREQ_DONTSCALE;

		trace_podgov_estimate_freq(df->previous_freq,
			podgov->adjustment_frequency);

		*freq = podgov->adjustment_frequency;
		return 0;
	}

	/* Retrieve extended data */
	ext_stat = dev_stat.private_data;
	if (!ext_stat)
		return -EINVAL;

	current_event = ext_stat->busy;
	*freq = dev_stat.current_frequency;
	df->min_freq = ext_stat->min_freq;
	df->max_freq = ext_stat->max_freq;

	/* Sustain local variables */
	podgov->last_event_type = current_event;
	podgov->total_idle += (dev_stat.total_time - dev_stat.busy_time);
	podgov->last_total_idle += (dev_stat.total_time - dev_stat.busy_time);

	/* update the load estimate based on idle time */
	update_load_estimate(df);

	/* if throughput hint enabled, and last hint is recent enough, return */
	if (podgov->p_use_throughput_hint &&
		ktime_us_delta(now, podgov->last_throughput_hint) < 1000000)
		return GET_TARGET_FREQ_DONTSCALE;

	switch (current_event) {

	case DEVICE_IDLE:
		/* delay idle_max % of 2 * fast_response time (given in
		 * microseconds) */
		*freq = scaling_state_check(df, now);
		delay = (podgov->idle_max * podgov->p_estimation_window)
			/ 500000;
		schedule_delayed_work(&podgov->idle_timer,
			msecs_to_jiffies(delay));
		break;
	case DEVICE_BUSY:
		cancel_delayed_work(&podgov->idle_timer);
		*freq = scaling_state_check(df, now);
		break;
	case DEVICE_UNKNOWN:
		*freq = scaling_state_check(df, now);
		break;

	}

	if (!(*freq) || (*freq == df->previous_freq))
		return GET_TARGET_FREQ_DONTSCALE;

	trace_podgov_estimate_freq(df->previous_freq, *freq);


	return 0;
}
Example #6
0
static int nvhost_pod_estimate_freq(struct devfreq *df,
				    unsigned long *freq)
{
	struct podgov_info_rec *podgov = df->data;
	struct devfreq_dev_status dev_stat;
	struct nvhost_devfreq_ext_stat *ext_stat;
	int current_event;
	int stat;
	ktime_t now;

	stat = df->profile->get_dev_status(df->dev.parent, &dev_stat);
	if (stat < 0)
		return stat;

	/* Ensure maximal clock when scaling is disabled */
	if (!podgov->enable) {
		*freq = df->max_freq;
		return 0;
	}

	if (podgov->p_user) {
		*freq = podgov->p_freq_request;
		return 0;
	}

	current_event = DEVICE_IDLE;
	stat = 0;
	now = ktime_get();

	/* Local adjustments (i.e. requests from kernel threads) are
	 * handled here */

	if (podgov->adjustment_type == ADJUSTMENT_LOCAL) {

		podgov->adjustment_type = ADJUSTMENT_DEVICE_REQ;

		/* Do not do unnecessary scaling */
		scaling_limit(df, &podgov->adjustment_frequency);

		/* Round the frequency and check if we're already there */
		if (freqlist_up(podgov, podgov->adjustment_frequency, 0) ==
		    dev_stat.current_frequency)
			return GET_TARGET_FREQ_DONTSCALE;

		trace_podgov_estimate_freq(df->previous_freq,
			podgov->adjustment_frequency);

		*freq = podgov->adjustment_frequency;
		return 0;
	}

	/* Retrieve extended data */
	ext_stat = dev_stat.private_data;
	if (!ext_stat)
		return -EINVAL;

	current_event = ext_stat->busy;
	*freq = dev_stat.current_frequency;
	df->min_freq = ext_stat->min_freq;
	df->max_freq = ext_stat->max_freq;

	/* Sustain local variables */
	podgov->last_event_type = current_event;
	podgov->idle = 1000 * (dev_stat.total_time - dev_stat.busy_time);
	podgov->idle = podgov->idle / dev_stat.total_time;
	podgov->idle_avg = (podgov->p_smooth * podgov->idle_avg) +
		podgov->idle;
	podgov->idle_avg = podgov->idle_avg / (podgov->p_smooth + 1);

	/* if throughput hint enabled, and last hint is recent enough, return */
	if (podgov->p_use_throughput_hint &&
		ktime_us_delta(now, podgov->last_throughput_hint) < 1000000)
		return GET_TARGET_FREQ_DONTSCALE;

	switch (current_event) {

	case DEVICE_IDLE:
		/* Launch a work to slowdown the gpu */
		*freq = scaling_state_check(df, now);
		schedule_delayed_work(&podgov->idle_timer,
			msecs_to_jiffies(podgov->p_slowdown_delay));
		break;
	case DEVICE_BUSY:
		cancel_delayed_work(&podgov->idle_timer);
		*freq = scaling_state_check(df, now);
		break;
	}

	if (!(*freq) ||
	    (freqlist_up(podgov, *freq, 0) == dev_stat.current_frequency))
		return GET_TARGET_FREQ_DONTSCALE;

	podgov->last_scale = now;

	trace_podgov_estimate_freq(df->previous_freq, *freq);


	return 0;
}