Пример #1
0
/*
 * The idle thread. There's no useful work to be done, so just try to conserve
 * power and have a low exit latency (ie sit in a loop waiting for somebody to
 * say that they'd like to reschedule)
 */
void __noreturn cpu_idle(void)
{
	int cpu;

	/* CPU is going idle. */
	cpu = smp_processor_id();

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched() && cpu_online(cpu)) {
#ifdef CONFIG_MIPS_MT_SMTC
			extern void smtc_idle_loop_hook(void);

			smtc_idle_loop_hook();
#endif
			if (cpu_wait)
				(*cpu_wait)();
		}
#ifdef CONFIG_HOTPLUG_CPU
		if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
		    (system_state == SYSTEM_RUNNING ||
		     system_state == SYSTEM_BOOTING))
			play_dead();
#endif
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #2
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
    int cpu = smp_processor_id();

    current_thread_info()->status |= TS_POLLING;

    /* endless idle loop with no priority at all */
    while (1) {
        tick_nohz_stop_sched_tick();
        while (!need_resched()) {
            void (*idle)(void);

            if (__get_cpu_var(cpu_idle_state))
                __get_cpu_var(cpu_idle_state) = 0;

            check_pgt_cache();
            rmb();
            idle = pm_idle;

            if (!idle)
                idle = default_idle;

            if (cpu_is_offline(cpu))
                play_dead();

            __get_cpu_var(irq_stat).idle_timestamp = jiffies;
            idle();
        }
        tick_nohz_restart_sched_tick();
        preempt_enable_no_resched();
        schedule();
        preempt_disable();
    }
}
Пример #3
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	current_thread_info()->status |= TS_POLLING;

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_stop_sched_tick();
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (rcu_pending(cpu))
				rcu_check_callbacks(cpu, 0);

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			pm_idle();
			start_critical_timings();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #4
0
static void xen_restore_fl(unsigned long flags)
{
	struct vcpu_info *vcpu;

	/* convert from IF type flag */
	flags = !(flags & X86_EFLAGS_IF);

	/* There's a one instruction preempt window here.  We need to
	   make sure we're don't switch CPUs between getting the vcpu
	   pointer and updating the mask. */
	preempt_disable();
	vcpu = this_cpu_read(xen_vcpu);
	vcpu->evtchn_upcall_mask = flags;
	preempt_enable_no_resched();

	/* Doesn't matter if we get preempted here, because any
	   pending event will get dealt with anyway. */

	if (flags == 0) {
		preempt_check_resched();
		barrier(); /* unmask then check (avoid races) */
		if (unlikely(vcpu->evtchn_upcall_pending))
			xen_force_evtchn_callback();
	}
}
Пример #5
0
void cpu_idle(void)
{
	set_thread_flag(TIF_POLLING_NRFLAG);

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_idle_enter();
		rcu_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			clear_thread_flag(TIF_POLLING_NRFLAG);

			local_irq_disable();
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			if (!need_resched() && powersave != NULL)
				powersave();
			start_critical_timings();
			local_irq_enable();
			set_thread_flag(TIF_POLLING_NRFLAG);
		}

		rcu_idle_exit();
		tick_nohz_idle_exit();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #6
0
static noinline void __init_refok rest_init(void)
{
	int pid;

	rcu_scheduler_starting();
	/*
	 * We need to spawn init first so that it obtains pid 1, however
	 * the init task will end up wanting to create kthreads, which, if
	 * we schedule it before we create kthreadd, will OOPS.
	 */
	kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
	numa_default_policy();
	pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
	rcu_read_lock();
	kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
	rcu_read_unlock();
	complete(&kthreadd_done);

	/*
	 * The boot idle thread must execute schedule()
	 * at least once to get things moving:
	 */
	init_idle_bootup_task(current);
	preempt_enable_no_resched();
	schedule();

	/* Call into cpu_idle with preempt disabled */
	preempt_disable();
	cpu_idle();
}
Пример #7
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	current_thread_info()->status |= TS_POLLING;


	/* endless idle loop with no priority at all */
	while (1) {
		while (!need_resched()) {

			if (__get_cpu_var(cpu_idle_state))
				__get_cpu_var(cpu_idle_state) = 0;

			rmb();

			if (cpu_is_offline(cpu))
				play_dead();

			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
			xen_idle();
		}
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #8
0
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	
	boot_init_stack_canary();

	current_thread_info()->status |= TS_POLLING;

	
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			
			stop_critical_timings();
			pm_idle();
			start_critical_timings();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
	int cpu;
	cpumask_t mask = { CPU_BITS_NONE };

	if (!erratum_a15_798181())
		return;

	preempt_disable();
	dummy_flush_tlb_a15_erratum();
	for_each_online_cpu(cpu) {
		if (cpu == smp_processor_id())
			continue;
		/*
		 * We only need to send an IPI if the other CPUs are running
		 * the same mm (and ASID) as the one being invalidated. There
		 * is no need for locking around the current_mm check since
		 * the switch_mm() function has a dmb() for this erratum in
		 * case a context switch happens on another CPU after the
		 * condition below.
		 */
		if (mm == per_cpu(current_mm, cpu))
			cpumask_set_cpu(cpu, &mask);
	}
	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
	preempt_enable_no_resched();
}
Пример #10
0
static void iseries_shared_idle(void)
{
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched() && !hvlpevent_is_pending()) {
			local_irq_disable();
			ppc64_runlatch_off();

			/* Recheck with irqs off */
			if (!need_resched() && !hvlpevent_is_pending())
				yield_shared_processor();

			HMT_medium();
			local_irq_enable();
		}

		ppc64_runlatch_on();
		tick_nohz_restart_sched_tick();

		if (hvlpevent_is_pending())
			process_iSeries_events();

		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #11
0
static void iseries_dedicated_idle(void)
{
	set_thread_flag(TIF_POLLING_NRFLAG);

	while (1) {
		tick_nohz_stop_sched_tick(1);
		if (!need_resched()) {
			while (!need_resched()) {
				ppc64_runlatch_off();
				HMT_low();

				if (hvlpevent_is_pending()) {
					HMT_medium();
					ppc64_runlatch_on();
					process_iSeries_events();
				}
			}

			HMT_medium();
		}

		ppc64_runlatch_on();
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #12
0
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
	rwlock_release(&lock->dep_map, 1, _RET_IP_);
	_raw_write_unlock(lock);
	preempt_enable_no_resched();
	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
Пример #13
0
static int ksoftirqd(void * __bind_cpu)
{
	set_user_nice(current, 19);
	current->flags |= PF_NOFREEZE;

	set_current_state(TASK_INTERRUPTIBLE);

	while (!kthread_should_stop()) {
		preempt_disable();
		if (!local_softirq_pending()) {
			preempt_enable_no_resched();
			schedule();
			preempt_disable();
		}

		__set_current_state(TASK_RUNNING);

		while (local_softirq_pending()) {
			/* Preempt disable stops cpu going offline.
			   If already offline, we'll be on wrong CPU:
			   don't process */
			if (cpu_is_offline((long)__bind_cpu))
				goto wait_to_die;
			do_softirq();
			preempt_enable_no_resched();
			cond_resched();
			preempt_disable();
			rcu_qsctr_inc((long)__bind_cpu);
		}
		preempt_enable();
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;

wait_to_die:
	preempt_enable();
	/* Wait for kthread_stop */
	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
		schedule();
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}
Пример #14
0
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
    account_system_vtime(current);
    sub_preempt_count(IRQ_EXIT_OFFSET);
    if (!in_interrupt() && local_softirq_pending())
        invoke_softirq();
    preempt_enable_no_resched();
}
Пример #15
0
static void
restore_wp(
    const unsigned long 	cr0
)
{
    write_cr0( cr0 );
    preempt_enable_no_resched();
}
Пример #16
0
static inline void play_dead(void)
{
	idle_task_exit();
	local_irq_disable();
	cpu_clear(smp_processor_id(), cpu_initialized);
	preempt_enable_no_resched();
	HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
	cpu_bringup();
}
Пример #17
0
static void xen_irq_disable(void)
{
	/* There's a one instruction preempt window here.  We need to
	   make sure we're don't switch CPUs between getting the vcpu
	   pointer and updating the mask. */
	preempt_disable();
	this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
	preempt_enable_no_resched();
}
Пример #18
0
void raw_local_irq_disable(void)
{
	struct vcpu_info *_vcpu;

	preempt_disable();
	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
	_vcpu->evtchn_upcall_mask = 1;
	preempt_enable_no_resched();
}
Пример #19
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	while (1) {
		idle(); //default idle
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #20
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
    while (1) {
        while (!need_resched())
            idle();
        preempt_enable_no_resched();
        schedule();
        preempt_disable();
    }
}
Пример #21
0
/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
int raw_irqs_disabled(void)
{
	struct vcpu_info *_vcpu;
	int disabled;

	preempt_disable();
	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
	disabled = (_vcpu->evtchn_upcall_mask != 0);
	preempt_enable_no_resched();
	return disabled;
}
static void broadcast_tlb_a15_erratum(void)
{
	if (!erratum_a15_798181())
		return;

	preempt_disable();
	dummy_flush_tlb_a15_erratum();
	smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
			       NULL, 1);
	preempt_enable_no_resched();
}
Пример #23
0
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
	local_bh_disable();
	preempt_disable();
	if (_raw_spin_trylock(lock))
		return 1;

	preempt_enable_no_resched();
	local_bh_enable();
	return 0;
}
Пример #24
0
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
	struct kprobe *p;
	void *addr = (void *)regs->pc;
	int ret = 0;

	pr_debug("kprobe_handler: kprobe_running=%p\n",
		 kprobe_running());

	/*
	 * We don't want to be preempted for the entire
	 * duration of kprobe processing
	 */
	preempt_disable();

	/* Check that we're not recursing */
	if (kprobe_running()) {
		p = get_kprobe(addr);
		if (p) {
			if (kprobe_status == KPROBE_HIT_SS) {
				printk("FIXME: kprobe hit while single-stepping!\n");
				goto no_kprobe;
			}

			printk("FIXME: kprobe hit while handling another kprobe\n");
			goto no_kprobe;
		} else {
			p = kprobe_running();
			if (p->break_handler && p->break_handler(p, regs))
				goto ss_probe;
		}
		/* If it's not ours, can't be delete race, (we hold lock). */
		goto no_kprobe;
	}

	p = get_kprobe(addr);
	if (!p)
		goto no_kprobe;

	kprobe_status = KPROBE_HIT_ACTIVE;
	set_current_kprobe(p);
	if (p->pre_handler && p->pre_handler(p, regs))
		/* handler has already set things up, so skip ss setup */
		return 1;

ss_probe:
	prepare_singlestep(p, regs);
	kprobe_status = KPROBE_HIT_SS;
	return 1;

no_kprobe:
	preempt_enable_no_resched();
	return ret;
}
Пример #25
0
/*
 * The idle thread. There's no useful work to be done, so just try to conserve
 * power and have a low exit latency (ie sit in a loop waiting for somebody to
 * say that they'd like to reschedule)
 */
ATTRIB_NORET void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	while (1) {
		while (!need_resched())
			if (cpu_wait)
				(*cpu_wait)();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #26
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle (void)
{
	/* endless idle loop with no priority at all */
	while (1) {
		while (!need_resched())
			(*idle) ();

		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #27
0
void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	while (1) {
		/* TODO: Enter sleep mode */
		while (!need_resched())
			cpu_relax();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #28
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void __noreturn cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	while (1) {
		while (!need_resched())
			barrier();

		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #29
0
/*
 * The body of the idle task.
 */
void cpu_idle(void)
{
	if (ppc_md.idle_loop)
		ppc_md.idle_loop();	/* doesn't return */

	set_thread_flag(TIF_POLLING_NRFLAG);
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched() && !cpu_should_die()) {
			ppc64_runlatch_off();

			if (ppc_md.power_save) {
				clear_thread_flag(TIF_POLLING_NRFLAG);
				/*
				 * smp_mb is so clearing of TIF_POLLING_NRFLAG
				 * is ordered w.r.t. need_resched() test.
				 */
				smp_mb();
				local_irq_disable();

				/* Don't trace irqs off for idle */
				stop_critical_timings();

				/* check again after disabling irqs */
				if (!need_resched() && !cpu_should_die())
					ppc_md.power_save();

				start_critical_timings();

				local_irq_enable();
				set_thread_flag(TIF_POLLING_NRFLAG);

			} else {
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}
		}

		HMT_medium();
		ppc64_runlatch_on();
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		if (cpu_should_die())
			cpu_die();
		schedule();
		preempt_disable();
	}
}
Пример #30
0
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
	local_bh_disable();
	preempt_disable();
	if (_raw_spin_trylock(lock)) {
		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
		return 1;
	}

	preempt_enable_no_resched();
	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
	return 0;
}