コード例 #1
0
ファイル: idle.c プロジェクト: dduval/kernel-rhel3
int idled(void)
{
	int do_power_save = 0;

	/* Check if CPU can powersave (get rid of that soon!) */
	if (cur_cpu_spec[smp_processor_id()]->cpu_features &
		(CPU_FTR_CAN_DOZE | CPU_FTR_CAN_NAP))
		do_power_save = 1;

	/* endless loop with no priority at all */
	for (;;) {
#ifdef CONFIG_SMP
		if (!do_power_save) {
			/*
			 * Deal with another CPU just having chosen a thread to
			 * run here:
			 */
			int oldval = xchg(&current->need_resched, -1);

			if (!oldval) {
				while(current->need_resched == -1)
					; /* Do Nothing */
			}
		}
#endif
#ifdef CONFIG_6xx
		if (do_power_save && !current->need_resched)
			power_save_6xx();
#endif /* CONFIG_6xx */			
		schedule();
		check_pgt_cache();
	}
	return 0;
}
コード例 #2
0
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	
	boot_init_stack_canary();

	current_thread_info()->status |= TS_POLLING;

	
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			
			stop_critical_timings();
			pm_idle();
			start_critical_timings();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
コード例 #3
0
ファイル: process_32.c プロジェクト: maraz/linux-2.6
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	current_thread_info()->status |= TS_POLLING;

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_stop_sched_tick();
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (rcu_pending(cpu))
				rcu_check_callbacks(cpu, 0);

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			pm_idle();
			start_critical_timings();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
コード例 #4
0
/*
 * The idle loop on a uniprocessor i386..
 */ 
static int cpu_idle(void *unused)
{
	int work = 1;
	unsigned long start_idle = 0;

	/* endless idle loop with no priority at all */
	current->priority = 0;
	current->counter = -100;

	for (;;) {
		if (work)
			start_idle = jiffies;

		if (jiffies - start_idle > HARD_IDLE_TIMEOUT) 
			hard_idle();
		else  {
			if (boot_cpu_data.hlt_works_ok && !hlt_counter && !current->need_resched)
		        	__asm__("hlt");
		}

		work = current->need_resched;
		schedule();
		check_pgt_cache();
	}
}
コード例 #5
0
ファイル: idle.c プロジェクト: 0xroot/Blackphone-BP1-Kernel
void cpu_idle(void)
{
	set_thread_flag(TIF_POLLING_NRFLAG);

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			clear_thread_flag(TIF_POLLING_NRFLAG);

			local_irq_disable();
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			if (!need_resched() && powersave != NULL)
				powersave();
			start_critical_timings();
			local_irq_enable();
			set_thread_flag(TIF_POLLING_NRFLAG);
		}

		rcu_idle_exit();
		tick_nohz_idle_exit();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
コード例 #6
0
asmlinkage int sys_idle(void)
{
	unsigned long start_idle = 0;

	if (current->pid != 0)
		return -EPERM;

	/* endless idle loop with no priority at all */
	current->priority = 0;
	current->counter = 0;
	for (;;) {
		/*
		 * R4[36]00 have wait, R4[04]00 don't.
		 * FIXME: We should save power by reducing the clock where
		 *        possible.  Thiss will cut down the power consuption
		 *        of R4200 systems to about 1/16th of normal, the
		 *        same for logic clocked with the processor generated
		 *        clocks.
		 */
		if (!start_idle) {
			check_pgt_cache();
			start_idle = jiffies;
		}
		if (wait_available && !current->need_resched)
			__asm__(".set\tmips3\n\t"
				"wait\n\t"
				".set\tmips0");
		run_task_queue(&tq_scheduler);
		if (current->need_resched)
			start_idle = 0;
		schedule();
	}

	return 0;
}
コード例 #7
0
ファイル: idle.c プロジェクト: BWhitten/linux-stable
/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void do_idle(void)
{
	/*
	 * If the arch has a polling bit, we maintain an invariant:
	 *
	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
	 * rq->idle). This means that, if rq->idle has the polling bit set,
	 * then setting need_resched is guaranteed to cause the CPU to
	 * reschedule.
	 */

	__current_set_polling();
	tick_nohz_idle_enter();

	while (!need_resched()) {
		check_pgt_cache();
		rmb();

		if (cpu_is_offline(smp_processor_id())) {
			cpuhp_report_idle_dead();
			arch_cpu_idle_dead();
		}

		local_irq_disable();
		arch_cpu_idle_enter();

		/*
		 * In poll mode we reenable interrupts and spin. Also if we
		 * detected in the wakeup from idle path that the tick
		 * broadcast device expired for us, we don't want to go deep
		 * idle as we know that the IPI is going to arrive right away.
		 */
		if (cpu_idle_force_poll || tick_check_broadcast_expired())
			cpu_idle_poll();
		else
			cpuidle_idle_call();
		arch_cpu_idle_exit();
	}

	/*
	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
	 * be set, propagate it into PREEMPT_NEED_RESCHED.
	 *
	 * This is required because for polling idle loops we will not have had
	 * an IPI to fold the state for us.
	 */
	preempt_set_need_resched();
	tick_nohz_idle_exit();
	__current_clr_polling();

	/*
	 * We promise to call sched_ttwu_pending() and reschedule if
	 * need_resched() is set while polling is set. That means that clearing
	 * polling needs to be visible before doing these things.
	 */
	smp_mb__after_atomic();

	sched_ttwu_pending();
	schedule_preempt_disabled();
}
コード例 #8
0
ファイル: process.c プロジェクト: b3rnik/dsl-n55u-bender
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
    int cpu = smp_processor_id();

    current_thread_info()->status |= TS_POLLING;

    /* endless idle loop with no priority at all */
    while (1) {
        tick_nohz_stop_sched_tick();
        while (!need_resched()) {
            void (*idle)(void);

            if (__get_cpu_var(cpu_idle_state))
                __get_cpu_var(cpu_idle_state) = 0;

            check_pgt_cache();
            rmb();
            idle = pm_idle;

            if (!idle)
                idle = default_idle;

            if (cpu_is_offline(cpu))
                play_dead();

            __get_cpu_var(irq_stat).idle_timestamp = jiffies;
            idle();
        }
        tick_nohz_restart_sched_tick();
        preempt_enable_no_resched();
        schedule();
        preempt_disable();
    }
}
コード例 #9
0
/*
 * Generic idle loop implementation
 */
static void cpu_idle_loop(void)
{
	while (1) {
		tick_nohz_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			if (cpu_is_offline(smp_processor_id()))
				arch_cpu_idle_dead();

			local_irq_disable();
			arch_cpu_idle_enter();

			/*
			 * In poll mode we reenable interrupts and spin.
			 *
			 * Also if we detected in the wakeup from idle
			 * path that the tick broadcast device expired
			 * for us, we don't want to go deep idle as we
			 * know that the IPI is going to arrive right
			 * away
			 */
			if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
				cpu_idle_poll();
			} else {
				if (!current_clr_polling_and_test()) {
					stop_critical_timings();
					rcu_idle_enter();
					arch_cpu_idle();
					WARN_ON_ONCE(irqs_disabled());
					rcu_idle_exit();
					start_critical_timings();
				} else {
					local_irq_enable();
				}
				__current_set_polling();
			}
			arch_cpu_idle_exit();
		}

		/*
		 * Since we fell out of the loop above, we know
		 * TIF_NEED_RESCHED must be set, propagate it into
		 * PREEMPT_NEED_RESCHED.
		 *
		 * This is required because for polling idle loops we will
		 * not have had an IPI to fold the state for us.
		 */
		preempt_set_need_resched();
		tick_nohz_idle_exit();
		schedule_preempt_disabled();
	}
}
コード例 #10
0
/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void cpu_idle_loop(void)
{
	while (1) {
		/*
		 * If the arch has a polling bit, we maintain an invariant:
		 *
		 * Our polling bit is clear if we're not scheduled (i.e. if
		 * rq->curr != rq->idle).  This means that, if rq->idle has
		 * the polling bit set, then setting need_resched is
		 * guaranteed to cause the cpu to reschedule.
		 */

		__current_set_polling();
		tick_nohz_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			local_irq_disable();
			arch_cpu_idle_enter();

			/*
			 * In poll mode we reenable interrupts and spin.
			 *
			 * Also if we detected in the wakeup from idle
			 * path that the tick broadcast device expired
			 * for us, we don't want to go deep idle as we
			 * know that the IPI is going to arrive right
			 * away
			 */
			if (cpu_idle_force_poll ||
			    tick_check_broadcast_expired() ||
			    __get_cpu_var(idle_force_poll))
				cpu_idle_poll();
			else
				cpuidle_idle_call();

			arch_cpu_idle_exit();
		}
		tick_nohz_idle_exit();
		__current_clr_polling();

		/*
		 * We promise to reschedule if need_resched is set while
		 * polling is set.  That means that clearing polling
		 * needs to be visible before rescheduling.
		 */
		smp_mb__after_atomic();

		schedule_preempt_disabled();
		if (cpu_is_offline(smp_processor_id()))
			arch_cpu_idle_dead();
	}
}
コード例 #11
0
ファイル: process.c プロジェクト: fgeraci/cs518-sched
/* This is being executed in task 0 'user space'. */
int cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	while(1) {
		if(current->need_resched) {
			schedule();
			check_pgt_cache();
		}
		barrier(); /* or else gcc optimizes... */
	}
}
コード例 #12
0
ファイル: process.c プロジェクト: dduval/kernel-rhel3
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	init_idle();

	while (1) {
		while (!current->need_resched) {
		}
		schedule();
		check_pgt_cache();
	}
}
コード例 #13
0
ファイル: memory.c プロジェクト: davidbau/davej
/*
 * This function clears all user-level page tables of a process - this
 * is needed by execve(), so that old pages aren't in the way.
 */
void clear_page_tables(struct mm_struct *mm, unsigned long first, int nr)
{
	pgd_t * page_dir = mm->pgd;

	page_dir += first;
	do {
		free_one_pgd(page_dir);
		page_dir++;
	} while (--nr);

	/* keep the page table cache within bounds */
	check_pgt_cache();
}
コード例 #14
0
ファイル: process.c プロジェクト: dduval/kernel-rhel3
ATTRIB_NORET void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	init_idle();

	while (1) {
		while (!current->need_resched)
			if (cpu_wait)
				(*cpu_wait)();
		schedule();
		check_pgt_cache();
	}
}
コード例 #15
0
ファイル: process.c プロジェクト: 33d/linux-2.6.21-hh20
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	set_thread_flag(TIF_POLLING_NRFLAG);

	/* endless idle loop with no priority at all */
	while (1) {
		while (!need_resched())
			barrier();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
		check_pgt_cache();
	}
}
コード例 #16
0
void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	current->nice = 20;
	current->counter = -100;
	init_idle();

	while (1) {
		while (!current->need_resched)
			if (cpu_wait)
				(*cpu_wait)();
		schedule();
		check_pgt_cache();
	}
}
コード例 #17
0
ファイル: idle.c プロジェクト: Menpiko/SnaPKernel-N6P
/*
 * Generic idle loop implementation
 */
static void cpu_idle_loop(void)
{
	while (1) {
		tick_nohz_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			local_irq_disable();
			arch_cpu_idle_enter();

			/*
			 * In poll mode we reenable interrupts and spin.
			 *
			 * Also if we detected in the wakeup from idle
			 * path that the tick broadcast device expired
			 * for us, we don't want to go deep idle as we
			 * know that the IPI is going to arrive right
			 * away
			 */
			if (cpu_idle_force_poll ||
			    tick_check_broadcast_expired() ||
			    __get_cpu_var(idle_force_poll)) {
				cpu_idle_poll();
			} else {
				if (!current_clr_polling_and_test()) {
					stop_critical_timings();
					rcu_idle_enter();
					arch_cpu_idle();
					WARN_ON_ONCE(irqs_disabled());
					rcu_idle_exit();
					start_critical_timings();
				} else {
					local_irq_enable();
				}
				__current_set_polling();
			}
			arch_cpu_idle_exit();
		}
		tick_nohz_idle_exit();
		schedule_preempt_disabled();
		if (cpu_is_offline(smp_processor_id()))
			arch_cpu_idle_dead();

	}
}
コード例 #18
0
ファイル: process_32.c プロジェクト: OESF/linux-linaro-natty
/*
 * the idle loop on a Sparc... ;)
 */
void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	for (;;) {
		if (ARCH_SUN4C) {
			static int count = HZ;
			static unsigned long last_jiffies;
			static unsigned long last_faults;
			static unsigned long fps;
			unsigned long now;
			unsigned long faults;

			extern unsigned long sun4c_kernel_faults;
			extern void sun4c_grow_kernel_ring(void);

			local_irq_disable();
			now = jiffies;
			count -= (now - last_jiffies);
			last_jiffies = now;
			if (count < 0) {
				count += HZ;
				faults = sun4c_kernel_faults;
				fps = (fps + (faults - last_faults)) >> 1;
				last_faults = faults;
#if 0
				printk("kernel faults / second = %ld\n", fps);
#endif
				if (fps >= SUN4C_FAULT_HIGH) {
					sun4c_grow_kernel_ring();
				}
			}
			local_irq_enable();
		}

		if (pm_idle) {
			while (!need_resched())
				(*pm_idle)();
		} else {
			while (!need_resched())
				cpu_relax();
		}
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
		check_pgt_cache();
	}
コード例 #19
0
ファイル: process.c プロジェクト: nhanh0/hah
/*
 * the idle loop on a Sparc... ;)
 */
int cpu_idle(void)
{
	int ret = -EPERM;

	if (current->pid != 0)
		goto out;

	/* endless idle loop with no priority at all */
	current->nice = 20;
	current->counter = -100;
	init_idle();

	for (;;) {
		if (ARCH_SUN4C_SUN4) {
			static int count = HZ;
			static unsigned long last_jiffies = 0;
			static unsigned long last_faults = 0;
			static unsigned long fps = 0;
			unsigned long now;
			unsigned long faults;
			unsigned long flags;

			extern unsigned long sun4c_kernel_faults;
			extern void sun4c_grow_kernel_ring(void);

			save_and_cli(flags);
			now = jiffies;
			count -= (now - last_jiffies);
			last_jiffies = now;
			if (count < 0) {
				count += HZ;
				faults = sun4c_kernel_faults;
				fps = (fps + (faults - last_faults)) >> 1;
				last_faults = faults;
#if 0
				printk("kernel faults / second = %d\n", fps);
#endif
				if (fps >= SUN4C_FAULT_HIGH) {
					sun4c_grow_kernel_ring();
				}
			}
			restore_flags(flags);
		}
		check_pgt_cache();
		schedule();
	}
コード例 #20
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	/*
	 * If we're the non-boot CPU, nothing set the stack canary up
	 * for us.  CPU0 already has it initialized but no harm in
	 * doing it again.  This is a good place for updating it, as
	 * we wont ever return from this function (so the invalid
	 * canaries already on the stack wont ever trigger).
	 */
	boot_init_stack_canary();

	current_thread_info()->status |= TS_POLLING;

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			enter_idle();
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			pm_idle();
			start_critical_timings();

			/*
			 * In many cases the interrupt that ended idle
			 * has already called exit_idle. But some idle
			 * loops can be woken up without interrupt.
			 */
			__exit_idle();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
コード例 #21
0
ファイル: process.c プロジェクト: longqzh/chronnOS
void cpu_idle(void)
{
	set_thread_flag(TIF_POLLING_NRFLAG);

	/* endless idle loop with no priority at all */
	while (1) {
		void (*idle)(void) = pm_idle;

		if (!idle)
			idle = default_idle;

		tick_nohz_stop_sched_tick(1);
		while (!need_resched())
			idle();
		tick_nohz_restart_sched_tick();

		schedule_preempt_disabled();
		check_pgt_cache();
	}
}
コード例 #22
0
int cpu_idle(void *unused)
{
	/* endless idle loop with no priority at all */
	current->priority = 0;
	current->counter = -100;

	while(1) {
		if (current_cpu_data.hlt_works_ok && !hlt_counter &&
				 !current->need_resched)
			__asm__("hlt");
		/*
		 * although we are an idle CPU, we do not want to
		 * get into the scheduler unnecessarily.
		 */
		if (current->need_resched) {
			schedule();
			check_pgt_cache();
		}
	}
}
コード例 #23
0
ファイル: idle.c プロジェクト: AiWinters/linux
/*
 * The idle thread. There's no useful work to be done, so just try to conserve
 * power and have a low exit latency (ie sit in a loop waiting for somebody to
 * say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	unsigned int cpu = smp_processor_id();

	set_thread_flag(TIF_POLLING_NRFLAG);

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			if (cpuidle_idle_call())
				sh_idle();
			/*
			 * Sanity check to ensure that sh_idle() returns
			 * with IRQs enabled
			 */
			WARN_ON(irqs_disabled());
			start_critical_timings();
		}

		rcu_idle_exit();
		tick_nohz_idle_exit();
		schedule_preempt_disabled();
	}
}
コード例 #24
0
ファイル: idle.c プロジェクト: dwander/eas-backports
/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void cpu_idle_loop(void)
{
	while (1) {
		/*
		 * If the arch has a polling bit, we maintain an invariant:
		 *
		 * Our polling bit is clear if we're not scheduled (i.e. if
		 * rq->curr != rq->idle).  This means that, if rq->idle has
		 * the polling bit set, then setting need_resched is
		 * guaranteed to cause the cpu to reschedule.
		 */

		__current_set_polling();
		tick_nohz_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			if (cpu_is_offline(smp_processor_id()))
				arch_cpu_idle_dead();

			local_irq_disable();
			arch_cpu_idle_enter();

			/*
			 * In poll mode we reenable interrupts and spin.
			 *
			 * Also if we detected in the wakeup from idle
			 * path that the tick broadcast device expired
			 * for us, we don't want to go deep idle as we
			 * know that the IPI is going to arrive right
			 * away
			 */
			if (cpu_idle_force_poll || tick_check_broadcast_expired())
				cpu_idle_poll();
			else
				cpuidle_idle_call();

			arch_cpu_idle_exit();
			/*
			 * We need to test and propagate the TIF_NEED_RESCHED
			 * bit here because we might not have send the
			 * reschedule IPI to idle tasks.
			 */
			if (tif_need_resched())
				set_preempt_need_resched();
		}
		tick_nohz_idle_exit();
		__current_clr_polling();

		/*
		 * We promise to call sched_ttwu_pending and reschedule
		 * if need_resched is set while polling is set.  That
		 * means that clearing polling needs to be visible
		 * before doing these things.
		 */
		smp_mb();

		sched_ttwu_pending();
		schedule_preempt_disabled();
	}
}
コード例 #25
0
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = alloc_pgd_table(GFP_KERNEL);
	if (!new_pgd)
		goto no_pgd;

	/*
	 * This lock is here just to satisfy pmd_alloc and pte_lock
	 */
	spin_lock(&mm->page_table_lock);

	/*
	 * On ARM, first page must always be allocated since it contains
	 * the machine vectors.
	 */
	new_pmd = pmd_alloc(mm, new_pgd, 0);
	if (!new_pmd)
		goto no_pmd;

	new_pte = pte_alloc(mm, new_pmd, 0);
	if (!new_pte)
		goto no_pte;

	init_pgd = pgd_offset_k(0);
	init_pmd = pmd_offset(init_pgd, 0);
	init_pte = pte_offset(init_pmd, 0);

	set_pte(new_pte, *init_pte);

	/*
	 * most of the page table entries are zeroed
	 * wne the table is created.
	 */
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	spin_unlock(&mm->page_table_lock);

	/* update MEMC tables */
	cpu_memc_update_all(new_pgd);
	return new_pgd;

no_pte:
	spin_unlock(&mm->page_table_lock);
	pmd_free(new_pmd);
	check_pgt_cache();
	free_pgd_slow(new_pgd);
	return NULL;

no_pmd:
	spin_unlock(&mm->page_table_lock);
	free_pgd_slow(new_pgd);
	return NULL;

no_pgd:
	return NULL;
}
コード例 #26
0
ファイル: idle.c プロジェクト: andy-shev/linux
/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void cpu_idle_loop(void)
{
	while (1) {
		/*
		 * If the arch has a polling bit, we maintain an invariant:
		 *
		 * Our polling bit is clear if we're not scheduled (i.e. if
		 * rq->curr != rq->idle).  This means that, if rq->idle has
		 * the polling bit set, then setting need_resched is
		 * guaranteed to cause the cpu to reschedule.
		 */

		__current_set_polling();
		quiet_vmstat();
		tick_nohz_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			if (cpu_is_offline(smp_processor_id())) {
				rcu_cpu_notify(NULL, CPU_DYING_IDLE,
					       (void *)(long)smp_processor_id());
				smp_mb(); /* all activity before dead. */
				this_cpu_write(cpu_dead_idle, true);
				arch_cpu_idle_dead();
			}

			local_irq_disable();
			arch_cpu_idle_enter();

			/*
			 * In poll mode we reenable interrupts and spin.
			 *
			 * Also if we detected in the wakeup from idle
			 * path that the tick broadcast device expired
			 * for us, we don't want to go deep idle as we
			 * know that the IPI is going to arrive right
			 * away
			 */
			if (cpu_idle_force_poll || tick_check_broadcast_expired())
				cpu_idle_poll();
			else
				cpuidle_idle_call();

			arch_cpu_idle_exit();
		}

		/*
		 * Since we fell out of the loop above, we know
		 * TIF_NEED_RESCHED must be set, propagate it into
		 * PREEMPT_NEED_RESCHED.
		 *
		 * This is required because for polling idle loops we will
		 * not have had an IPI to fold the state for us.
		 */
		preempt_set_need_resched();
		tick_nohz_idle_exit();
		__current_clr_polling();

		/*
		 * We promise to call sched_ttwu_pending and reschedule
		 * if need_resched is set while polling is set.  That
		 * means that clearing polling needs to be visible
		 * before doing these things.
		 */
		smp_mb__after_atomic();

		sched_ttwu_pending();
		schedule_preempt_disabled();
	}
}