Пример #1
0
/*
 * Account a tick to a process and cpustat
 * @p: the process that the cpu time gets accounted to
 * @user_tick: is the tick from userspace
 * @rq: the pointer to rq
 *
 * Tick demultiplexing follows the order
 * - pending hardirq update
 * - pending softirq update
 * - user_time
 * - idle_time
 * - system time
 *   - check for guest_time
 *   - else account as system_time
 *
 * Check for hardirq is done both for system and user time as there is
 * no timer going off while we are on hardirq and hence we may never get an
 * opportunity to update it solely in system time.
 * p->stime and friends are only updated on system time and not on irq
 * softirq as those do not count in task exec_runtime any more.
 */
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
						struct rq *rq)
{
	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
	u64 *cpustat = kcpustat_this_cpu->cpustat;

	if (steal_account_process_tick())
		return;

	if (irqtime_account_hi_update()) {
		cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
	} else if (irqtime_account_si_update()) {
		cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
	} else if (this_cpu_ksoftirqd() == p) {
		/*
		 * ksoftirqd time do not get accounted in cpu_softirq_time.
		 * So, we have to handle it separately here.
		 * Also, p->stime needs to be updated for ksoftirqd.
		 */
		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
					CPUTIME_SOFTIRQ);
	} else if (user_tick) {
		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
	} else if (p == rq->idle) {
		account_idle_time(cputime_one_jiffy);
	} else if (p->flags & PF_VCPU) { /* System time or guest time */
		account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
	} else {
		__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
					CPUTIME_SYSTEM);
	}
}
Пример #2
0
/*
 * Update process times based on virtual cpu times stored by entry.S
 * to the lowcore fields user_timer, system_timer & steal_clock.
 */
void account_process_tick(struct task_struct *tsk, int user_tick)
{
	cputime_t cputime;
	__u64 timer, clock;
	int rcu_user_flag;

	timer = S390_lowcore.last_update_timer;
	clock = S390_lowcore.last_update_clock;
	asm volatile ("  STPT %0\n"    /* Store current cpu timer value */
		      "  STCK %1"      /* Store current tod clock value */
		      : "=m" (S390_lowcore.last_update_timer),
		        "=m" (S390_lowcore.last_update_clock) );
	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
	S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;

	cputime = S390_lowcore.user_timer >> 12;
	rcu_user_flag = cputime != 0;
	S390_lowcore.user_timer -= cputime << 12;
	S390_lowcore.steal_clock -= cputime << 12;
	account_user_time(tsk, cputime);

	cputime =  S390_lowcore.system_timer >> 12;
	S390_lowcore.system_timer -= cputime << 12;
	S390_lowcore.steal_clock -= cputime << 12;
	account_system_time(tsk, HARDIRQ_OFFSET, cputime);

	cputime = S390_lowcore.steal_clock;
	if ((__s64) cputime > 0) {
		cputime >>= 12;
		S390_lowcore.steal_clock -= cputime << 12;
		account_steal_time(tsk, cputime);
	}
Пример #3
0
/*
 * Called from the timer interrupt handler to charge accumulated user time
 * to the current process.  Must be called with interrupts disabled.
 */
void account_process_tick(struct task_struct *p, int user_tick)
{
    struct thread_info *ti = task_thread_info(p);
    cputime_t delta_utime;

    if (ti->ac_utime) {
        delta_utime = cycle_to_cputime(ti->ac_utime);
        account_user_time(p, delta_utime, delta_utime);
        ti->ac_utime = 0;
    }
}
Пример #4
0
void vtime_account_user(struct task_struct *tsk)
{
	cputime_t delta_utime;
	struct thread_info *ti = task_thread_info(tsk);

	if (ti->ac_utime) {
		delta_utime = cycle_to_cputime(ti->ac_utime);
		account_user_time(tsk, delta_utime, delta_utime);
		ti->ac_utime = 0;
	}
}
Пример #5
0
void vtime_account_user(struct task_struct *tsk)
{
    cputime_t delta_cpu;

    if (!vtime_accounting_enabled())
        return;

    delta_cpu = get_vtime_delta(tsk);

    write_seqlock(&tsk->vtime_seqlock);
    tsk->vtime_snap_whence = VTIME_SYS;
    account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
    write_sequnlock(&tsk->vtime_seqlock);
}
Пример #6
0
/*
 * Account a single tick of cpu time.
 * @p: the process that the cpu time gets accounted to
 * @user_tick: indicates if the tick is a user or a system tick
 */
void account_process_tick(struct task_struct *p, int user_tick)
{
	cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
	struct rq *rq = this_rq();

	if (sched_clock_irqtime) {
		irqtime_account_process_tick(p, user_tick, rq);
		return;
	}

	if (steal_account_process_tick())
		return;

	if (user_tick)
		account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
		account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
				    one_jiffy_scaled);
	else
		account_idle_time(cputime_one_jiffy);
}
Пример #7
0
/*
 * Called from the context switch with interrupts disabled, to charge all
 * accumulated times to the current process, and to prepare accounting on
 * the next process.
 */
void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
{
    struct thread_info *pi = task_thread_info(prev);
    struct thread_info *ni = task_thread_info(next);
    cputime_t delta_stime, delta_utime;
    __u64 now;

    now = ia64_get_itc();

    delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
    if (idle_task(smp_processor_id()) != prev)
        account_system_time(prev, 0, delta_stime, delta_stime);
    else
        account_idle_time(delta_stime);

    if (pi->ac_utime) {
        delta_utime = cycle_to_cputime(pi->ac_utime);
        account_user_time(prev, delta_utime, delta_utime);
    }

    pi->ac_stamp = ni->ac_stamp = now;
    ni->ac_stime = ni->ac_utime = 0;
}
Пример #8
0
void vtime_flush(struct task_struct *tsk)
{
	struct thread_info *ti = task_thread_info(tsk);
	u64 delta;

	if (ti->utime)
		account_user_time(tsk, cycle_to_nsec(ti->utime));

	if (ti->gtime)
		account_guest_time(tsk, cycle_to_nsec(ti->gtime));

	if (ti->idle_time)
		account_idle_time(cycle_to_nsec(ti->idle_time));

	if (ti->stime) {
		delta = cycle_to_nsec(ti->stime);
		account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
	}

	if (ti->hardirq_time) {
		delta = cycle_to_nsec(ti->hardirq_time);
		account_system_index_time(tsk, delta, CPUTIME_IRQ);
	}

	if (ti->softirq_time) {
		delta = cycle_to_nsec(ti->softirq_time);
		account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
	}

	ti->utime = 0;
	ti->gtime = 0;
	ti->idle_time = 0;
	ti->stime = 0;
	ti->hardirq_time = 0;
	ti->softirq_time = 0;
}
Пример #9
0
void tick_nohz_restart_sched_tick(void)
#endif
{
	int cpu = smp_processor_id();
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	unsigned long ticks;
#endif
	ktime_t now;

	local_irq_disable();
	if (ts->idle_active || (ts->inidle && ts->tick_stopped))
		now = ktime_get();

	if (ts->idle_active)
		tick_nohz_stop_idle(cpu, now);

	if (!ts->inidle || !ts->tick_stopped) {
		ts->inidle = 0;
#ifdef CONFIG_DATAPLANE
		if (!user_idle)
#endif
		local_irq_enable();
		return;
	}

	ts->inidle = 0;

	rcu_exit_nohz();

	/* Update jiffies first */
	select_nohz_load_balancer(0);
	tick_do_update_jiffies64(now);
	cpumask_clear_cpu(cpu, nohz_cpu_mask);

#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	/*
	 * We stopped the tick in idle. Update process times would miss the
	 * time we slept as update_process_times does only a 1 tick
	 * accounting. Enforce that this is accounted to idle !
	 */
	ticks = jiffies - ts->idle_jiffies;
	/*
	 * We might be one off. Do not randomly account a huge number of ticks!
	 */
	if (ticks && ticks < LONG_MAX)
#ifdef CONFIG_DATAPLANE
	{
		if (user_idle) {
			cputime_t cpu_time = jiffies_to_cputime(ticks);
			account_user_time(current, cpu_time,
					  cputime_to_scaled(cpu_time));
		} else {
			account_idle_ticks(ticks);
		}
	}
#else
		account_idle_ticks(ticks);
#endif
#endif

	touch_softlockup_watchdog();
	/*
	 * Cancel the scheduled timer and restore the tick
	 */
	ts->tick_stopped  = 0;
	ts->idle_exittime = now;

	tick_nohz_restart(ts, now);

#ifdef CONFIG_DATAPLANE
	if (!user_idle)
#endif
	local_irq_enable();
}