Beispiel #1
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	current_thread_info()->status |= TS_POLLING;

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_stop_sched_tick();
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (rcu_pending(cpu))
				rcu_check_callbacks(cpu, 0);

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			pm_idle();
			start_critical_timings();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
asmlinkage void do_softirq(void)
{
    unsigned int i, cpu;
    unsigned long pending;

    for ( ; ; )
    {
        /*
         * Initialise @cpu on every iteration: SCHEDULE_SOFTIRQ may move
         * us to another processor.
         */
        cpu = smp_processor_id();

        if ( rcu_pending(cpu) )
            rcu_check_callbacks(cpu);

        if ( (pending = softirq_pending(cpu)) == 0 )
            break;

        i = find_first_set_bit(pending);
        clear_bit(i, &softirq_pending(cpu));
        (*softirq_handlers[i])();
    }
}
static unsigned long
consider_steal_time(unsigned long new_itm)
{
	unsigned long stolen, blocked;
	unsigned long delta_itm = 0, stolentick = 0;
	int cpu = smp_processor_id();
	struct vcpu_runstate_info runstate;
	struct task_struct *p = current;

	get_runstate_snapshot(&runstate);

	/*
	 * Check for vcpu migration effect
	 * In this case, itc value is reversed.
	 * This causes huge stolen value.
	 * This function just checks and reject this effect.
	 */
	if (!time_after_eq(runstate.time[RUNSTATE_blocked],
			   per_cpu(xen_blocked_time, cpu)))
		blocked = 0;

	if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
			   runstate.time[RUNSTATE_offline],
			   per_cpu(xen_stolen_time, cpu)))
		stolen = 0;

	if (!time_after(delta_itm + new_itm, ia64_get_itc()))
		stolentick = ia64_get_itc() - new_itm;

	do_div(stolentick, NS_PER_TICK);
	stolentick++;

	do_div(stolen, NS_PER_TICK);

	if (stolen > stolentick)
		stolen = stolentick;

	stolentick -= stolen;
	do_div(blocked, NS_PER_TICK);

	if (blocked > stolentick)
		blocked = stolentick;

	if (stolen > 0 || blocked > 0) {
		account_steal_ticks(stolen);
		account_idle_ticks(blocked);
		run_local_timers();

		rcu_check_callbacks(cpu, user_mode(get_irq_regs()));

		scheduler_tick();
		run_posix_cpu_timers(p);
		delta_itm += local_cpu_data->itm_delta * (stolen + blocked);

		if (cpu == time_keeper_id)
			xtime_update(stolen + blocked);

		local_cpu_data->itm_next = delta_itm + new_itm;

		per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
		per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
	}
	return delta_itm;
}