Example #1
0
static cputime_t get_vtime_delta(struct task_struct *tsk)
{
    unsigned long long delta = vtime_delta(tsk);

    WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
    tsk->vtime_snap += delta;

    /* CHECKME: always safe to convert nsecs to cputime? */
    return nsecs_to_cputime(delta);
}
Example #2
0
/*
 * Adjust tick based cputime random precision against scheduler
 * runtime accounting.
 */
static void cputime_adjust(struct task_cputime *curr,
                           struct cputime *prev,
                           cputime_t *ut, cputime_t *st)
{
    cputime_t rtime, stime, total;

    if (vtime_accounting_enabled()) {
        *ut = curr->utime;
        *st = curr->stime;
        return;
    }

    stime = curr->stime;
    total = stime + curr->utime;

    /*
     * Tick based cputime accounting depend on random scheduling
     * timeslices of a task to be interrupted or not by the timer.
     * Depending on these circumstances, the number of these interrupts
     * may be over or under-optimistic, matching the real user and system
     * cputime with a variable precision.
     *
     * Fix this by scaling these tick based values against the total
     * runtime accounted by the CFS scheduler.
     */
    rtime = nsecs_to_cputime(curr->sum_exec_runtime);

    if (!rtime) {
        stime = 0;
    } else if (!total) {
        stime = rtime;
    } else {
        stime = scale_stime((__force u64)stime,
                            (__force u64)rtime, (__force u64)total);
    }

    /*
     * If the tick based count grows faster than the scheduler one,
     * the result of the scaling may go backward.
     * Let's enforce monotonicity.
     */
    prev->stime = max(prev->stime, stime);
    prev->utime = max(prev->utime, rtime - prev->stime);

    *ut = prev->utime;
    *st = prev->stime;
}
Example #3
0
/*
 * When a guest is interrupted for a longer amount of time, missed clock
 * ticks are not redelivered later. Due to that, this function may on
 * occasion account more time than the calling functions think elapsed.
 */
static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
{
#ifdef CONFIG_PARAVIRT
	if (static_key_false(&paravirt_steal_enabled)) {
		cputime_t steal_cputime;
		u64 steal;

		steal = paravirt_steal_clock(smp_processor_id());
		steal -= this_rq()->prev_steal_time;

		steal_cputime = min(nsecs_to_cputime(steal), maxtime);
		account_steal_time(steal_cputime);
		this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);

		return steal_cputime;
	}
#endif
	return 0;
}