/**
 * tick_nohz_idle_exit - restart the idle tick from the idle task
 *
 * Restart the idle tick when the CPU is woken up from idle
 * This also exit the RCU extended quiescent state. The CPU
 * can use RCU again after this function is called.
 */
void tick_nohz_idle_exit(void)
{
	int cpu = smp_processor_id();
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	unsigned long ticks;
#endif
	ktime_t now;

	local_irq_disable();

	WARN_ON_ONCE(!ts->inidle);

	ts->inidle = 0;

	if (ts->idle_active || ts->tick_stopped)
		now = ktime_get();

	if (ts->idle_active)
		tick_nohz_stop_idle(cpu, now);

	if (!ts->tick_stopped) {
		local_irq_enable();
		return;
	}

	/* Update jiffies first */
	select_nohz_load_balancer(0);
	tick_do_update_jiffies64(now);
#ifdef CONFIG_MTK_SCHED_CMP
	update_cpu_load_nohz();
#endif

#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	/*
	 * We stopped the tick in idle. Update process times would miss the
	 * time we slept as update_process_times does only a 1 tick
	 * accounting. Enforce that this is accounted to idle !
	 */
	ticks = jiffies - ts->idle_jiffies;
	/*
	 * We might be one off. Do not randomly account a huge number of ticks!
	 */
	if (ticks && ticks < LONG_MAX)
		account_idle_ticks(ticks);
#endif

	calc_load_exit_idle();
	touch_softlockup_watchdog();
	/*
	 * Cancel the scheduled timer and restore the tick
	 */
	ts->tick_stopped  = 0;
	ts->idle_exittime = now;

	tick_nohz_restart(ts, now);

	local_irq_enable();
}
Beispiel #2
0
void tick_nohz_idle_exit(void)
{
	int cpu = smp_processor_id();
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	unsigned long ticks;
#endif
	ktime_t now;

	local_irq_disable();

	WARN_ON_ONCE(!ts->inidle);

	ts->inidle = 0;

	/* Cancel the timer because CPU already waken up from the C-states*/
	menu_hrtimer_cancel();
	if (ts->idle_active || ts->tick_stopped)
		now = ktime_get();

	if (ts->idle_active)
		tick_nohz_stop_idle(cpu, now);

	if (!ts->tick_stopped) {
		local_irq_enable();
		return;
	}

	
	select_nohz_load_balancer(0);
	tick_do_update_jiffies64(now);

#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	ticks = jiffies - ts->idle_jiffies;
	if (ticks && ticks < LONG_MAX)
		account_idle_ticks(ticks);
#endif

	calc_load_exit_idle();
	touch_softlockup_watchdog();
	ts->tick_stopped  = 0;
	ts->idle_exittime = now;

	tick_nohz_restart(ts, now);

	local_irq_enable();
}
Beispiel #3
0
static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	unsigned long ticks;
	/*
	 * We stopped the tick in idle. Update process times would miss the
	 * time we slept as update_process_times does only a 1 tick
	 * accounting. Enforce that this is accounted to idle !
	 */
	ticks = jiffies - ts->idle_jiffies;
	/*
	 * We might be one off. Do not randomly account a huge number of ticks!
	 */
	if (ticks && ticks < LONG_MAX)
		account_idle_ticks(ticks);
#endif
}
static void do_stolen_accounting(void)
{
	struct vcpu_runstate_info state;
	struct vcpu_runstate_info *snap;
	s64 blocked, runnable, offline, stolen;
	cputime_t ticks;

	get_runstate_snapshot(&state);

	WARN_ON(state.state != RUNSTATE_running);

	snap = &__get_cpu_var(xen_runstate_snapshot);

	/* work out how much time the VCPU has not been runn*ing*  */
	blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
	runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
	offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];

	*snap = state;

	/* Add the appropriate number of ticks of stolen time,
	   including any left-overs from last time. */
	stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);

	if (stolen < 0)
		stolen = 0;

	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
	__this_cpu_write(xen_residual_stolen, stolen);
	account_steal_ticks(ticks);

	/* Add the appropriate number of ticks of blocked time,
	   including any left-overs from last time. */
	blocked += __this_cpu_read(xen_residual_blocked);

	if (blocked < 0)
		blocked = 0;

	ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
	__this_cpu_write(xen_residual_blocked, blocked);
	account_idle_ticks(ticks);
}
static void do_stolen_accounting(void)
{
	struct vcpu_runstate_info state;
	struct vcpu_runstate_info *snap;
	s64 blocked, runnable, offline, stolen;
	cputime_t ticks;

	get_runstate_snapshot(&state);

	WARN_ON(state.state != RUNSTATE_running);

	snap = &__get_cpu_var(xen_runstate_snapshot);

	
	blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
	runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
	offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];

	*snap = state;

	stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);

	if (stolen < 0)
		stolen = 0;

	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
	__this_cpu_write(xen_residual_stolen, stolen);
	account_steal_ticks(ticks);

	blocked += __this_cpu_read(xen_residual_blocked);

	if (blocked < 0)
		blocked = 0;

	ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
	__this_cpu_write(xen_residual_blocked, blocked);
	account_idle_ticks(ticks);
}
static unsigned long
consider_steal_time(unsigned long new_itm)
{
	unsigned long stolen, blocked;
	unsigned long delta_itm = 0, stolentick = 0;
	int cpu = smp_processor_id();
	struct vcpu_runstate_info runstate;
	struct task_struct *p = current;

	get_runstate_snapshot(&runstate);

	/*
	 * Check for vcpu migration effect
	 * In this case, itc value is reversed.
	 * This causes huge stolen value.
	 * This function just checks and reject this effect.
	 */
	if (!time_after_eq(runstate.time[RUNSTATE_blocked],
			   per_cpu(xen_blocked_time, cpu)))
		blocked = 0;

	if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
			   runstate.time[RUNSTATE_offline],
			   per_cpu(xen_stolen_time, cpu)))
		stolen = 0;

	if (!time_after(delta_itm + new_itm, ia64_get_itc()))
		stolentick = ia64_get_itc() - new_itm;

	do_div(stolentick, NS_PER_TICK);
	stolentick++;

	do_div(stolen, NS_PER_TICK);

	if (stolen > stolentick)
		stolen = stolentick;

	stolentick -= stolen;
	do_div(blocked, NS_PER_TICK);

	if (blocked > stolentick)
		blocked = stolentick;

	if (stolen > 0 || blocked > 0) {
		account_steal_ticks(stolen);
		account_idle_ticks(blocked);
		run_local_timers();

		rcu_check_callbacks(cpu, user_mode(get_irq_regs()));

		scheduler_tick();
		run_posix_cpu_timers(p);
		delta_itm += local_cpu_data->itm_delta * (stolen + blocked);

		if (cpu == time_keeper_id)
			xtime_update(stolen + blocked);

		local_cpu_data->itm_next = delta_itm + new_itm;

		per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
		per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
	}
	return delta_itm;
}
Beispiel #7
0
void tick_nohz_restart_sched_tick(void)
#endif
{
	int cpu = smp_processor_id();
	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	unsigned long ticks;
#endif
	ktime_t now;

	local_irq_disable();
	if (ts->idle_active || (ts->inidle && ts->tick_stopped))
		now = ktime_get();

	if (ts->idle_active)
		tick_nohz_stop_idle(cpu, now);

	if (!ts->inidle || !ts->tick_stopped) {
		ts->inidle = 0;
#ifdef CONFIG_DATAPLANE
		if (!user_idle)
#endif
		local_irq_enable();
		return;
	}

	ts->inidle = 0;

	rcu_exit_nohz();

	/* Update jiffies first */
	select_nohz_load_balancer(0);
	tick_do_update_jiffies64(now);
	cpumask_clear_cpu(cpu, nohz_cpu_mask);

#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	/*
	 * We stopped the tick in idle. Update process times would miss the
	 * time we slept as update_process_times does only a 1 tick
	 * accounting. Enforce that this is accounted to idle !
	 */
	ticks = jiffies - ts->idle_jiffies;
	/*
	 * We might be one off. Do not randomly account a huge number of ticks!
	 */
	if (ticks && ticks < LONG_MAX)
#ifdef CONFIG_DATAPLANE
	{
		if (user_idle) {
			cputime_t cpu_time = jiffies_to_cputime(ticks);
			account_user_time(current, cpu_time,
					  cputime_to_scaled(cpu_time));
		} else {
			account_idle_ticks(ticks);
		}
	}
#else
		account_idle_ticks(ticks);
#endif
#endif

	touch_softlockup_watchdog();
	/*
	 * Cancel the scheduled timer and restore the tick
	 */
	ts->tick_stopped  = 0;
	ts->idle_exittime = now;

	tick_nohz_restart(ts, now);

#ifdef CONFIG_DATAPLANE
	if (!user_idle)
#endif
	local_irq_enable();
}