Пример #1
0
static int default_idle(void)
{
	long oldval;
	unsigned int cpu = smp_processor_id();

	while (1) {
		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			while (!need_resched() && !cpu_is_offline(cpu)) {
				barrier();
				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();
			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		schedule();
		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}

	return 0;
}
Пример #2
0
static int pseries_dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;

			while (!need_resched() && !cpu_is_offline(cpu)) {
				ppc64_runlatch_off();

				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay != 0 &&
				    __get_tb() > start_snooze) {
					HMT_medium();
					dedicated_idle_sleep(cpu);
				}

			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
}
void dual_boost(unsigned int boost_on)
{
	if (boost_on)
	{	
		if (is_dual_locked != 0)
			return;

#ifndef DUALBOOST_DEFERED_QUEUE
		cpu_hotplug_driver_lock();
		if (cpu_is_offline(NON_BOOT_CPU))
		{
			ssize_t ret;
			struct sys_device *cpu_sys_dev;
		
			ret = cpu_up(NON_BOOT_CPU); // it takes 60ms
			if (!ret)
			{
				cpu_sys_dev = get_cpu_sysdev(NON_BOOT_CPU);
				if (cpu_sys_dev)
				{
					kobject_uevent(&cpu_sys_dev->kobj, KOBJ_ONLINE);
					stall_mpdecision = 1;
				}
			}
		}
		cpu_hotplug_driver_unlock();
#else	
		if (cpu_is_offline(NON_BOOT_CPU))
			schedule_work_on(BOOT_CPU, &dvfs_hotplug_work);
#endif
		is_dual_locked = 1;
	}
	else
	{
		if (stall_mpdecision == 1)
		{
			struct sys_device *cpu_sys_dev;

#ifdef DUALBOOST_DEFERED_QUEUE
			flush_work(&dvfs_hotplug_work);
#endif
			cpu_hotplug_driver_lock();	
			cpu_sys_dev = get_cpu_sysdev(NON_BOOT_CPU);
			if (cpu_sys_dev)
			{
				kobject_uevent(&cpu_sys_dev->kobj, KOBJ_ONLINE);
				stall_mpdecision = 0;
			}
			cpu_hotplug_driver_unlock();
		}
		
		is_dual_locked = 0;
	}
}
Пример #4
0
static void pseries_dedicated_idle_sleep(void)
{ 
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
	unsigned long in_purr, out_purr;

	/*
	 * Indicate to the HV that we are idle. Now would be
	 * a good time to find other work to dispatch.
	 */
	get_lppaca()->idle = 1;
	get_lppaca()->cpuctls_task_attrs = 1;
	in_purr = mfspr(SPRN_PURR);

	/*
	 * We come in with interrupts disabled, and need_resched()
	 * has been checked recently.  If we should poll for a little
	 * while, do so.
	 */
	if (*smt_snooze_delay) {
		start_snooze = get_tb() +
			*smt_snooze_delay * tb_ticks_per_usec;
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);

		while (get_tb() < start_snooze) {
			if (need_resched() || cpu_is_offline(cpu))
				goto out;
			ppc64_runlatch_off();
			HMT_low();
			HMT_very_low();
		}

		HMT_medium();
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb();
		local_irq_disable();
		if (need_resched() || cpu_is_offline(cpu))
			goto out;
	}

	cede_processor();

out:
	HMT_medium();
	get_lppaca()->cpuctls_task_attrs = 0;
	out_purr = mfspr(SPRN_PURR);
	get_lppaca()->wait_state_cycles += out_purr - in_purr;
	get_lppaca()->idle = 0;
}
Пример #5
0
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	
	boot_init_stack_canary();

	current_thread_info()->status |= TS_POLLING;

	
	while (1) {
		tick_nohz_stop_sched_tick(1);
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			
			stop_critical_timings();
			pm_idle();
			start_critical_timings();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
static int __devinit ram_console_probe_oneplus(struct platform_device *pdev)
{
    INIT_WORK(&(optimize_data.work), optimize_ramconsole_oneplus_func);
    optimize_data.pdev = pdev;
    schedule_work_on(cpu_is_offline(2)?0:2,&(optimize_data.work));
    return 0;
}
Пример #7
0
static void do_tasklet_work(unsigned int cpu, struct list_head *list)
{
    struct tasklet *t;

    if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) )
        return;

    t = list_entry(list->next, struct tasklet, list);
    list_del_init(&t->list);

    BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu));
    t->scheduled_on = -1;
    t->is_running = 1;

    spin_unlock_irq(&tasklet_lock);
    sync_local_execstate();
    t->func(t->data);
    spin_lock_irq(&tasklet_lock);

    t->is_running = 0;

    if ( t->scheduled_on >= 0 )
    {
        BUG_ON(t->is_dead || !list_empty(&t->list));
        tasklet_enqueue(t);
    }
}
Пример #8
0
/* Activate a secondary processor. */
int __devinit start_secondary(void *unused)
{
	unsigned int cpu = smp_processor_id();
	struct device_node *l2_cache;
	int i, base;

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;

	smp_store_cpu_info(cpu);
	set_dec(tb_ticks_per_jiffy);
	preempt_disable();
	cpu_callin_map[cpu] = 1;

	smp_ops->setup_cpu(cpu);
	if (smp_ops->take_timebase)
		smp_ops->take_timebase();

	if (system_state > SYSTEM_BOOTING)
		snapshot_timebase();

	secondary_cpu_time_init();

	ipi_call_lock();
	notify_cpu_starting(cpu);
	cpu_set(cpu, cpu_online_map);
	/* Update sibling maps */
	base = cpu_first_thread_in_core(cpu);
	for (i = 0; i < threads_per_core; i++) {
		if (cpu_is_offline(base + i))
			continue;
		cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
		cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));

		/* cpu_core_map should be a superset of
		 * cpu_sibling_map even if we don't have cache
		 * information, so update the former here, too.
		 */
		cpu_set(cpu, per_cpu(cpu_core_map, base +i));
		cpu_set(base + i, per_cpu(cpu_core_map, cpu));
	}
	l2_cache = cpu_to_l2cache(cpu);
	for_each_online_cpu(i) {
		struct device_node *np = cpu_to_l2cache(i);
		if (!np)
			continue;
		if (np == l2_cache) {
			cpu_set(cpu, per_cpu(cpu_core_map, i));
			cpu_set(i, per_cpu(cpu_core_map, cpu));
		}
		of_node_put(np);
	}
	of_node_put(l2_cache);
	ipi_call_unlock();

	local_irq_enable();

	cpu_idle();
	return 0;
}
Пример #9
0
static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
{
	if (!rdp->beenonline)
		return;
	seq_printf(m, "%d,%s,%lu,%lu,%d,%lu,%d",
		   rdp->cpu,
		   cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"",
		   rdp->completed, rdp->gpnum,
		   rdp->passed_quiesc, rdp->passed_quiesc_completed,
		   rdp->qs_pending);
#ifdef CONFIG_NO_HZ
	seq_printf(m, ",%d,%d,%d,%lu",
		   rdp->dynticks->dynticks,
		   rdp->dynticks->dynticks_nesting,
		   rdp->dynticks->dynticks_nmi,
		   rdp->dynticks_fqs);
#endif /* #ifdef CONFIG_NO_HZ */
	seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
	seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen,
		   ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
			rdp->nxttail[RCU_NEXT_TAIL]],
		   ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
			rdp->nxttail[RCU_NEXT_READY_TAIL]],
		   ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
			rdp->nxttail[RCU_WAIT_TAIL]],
		   ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]],
		   per_cpu(rcu_cpu_has_work, rdp->cpu),
		   convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
					  rdp->cpu)),
		   rdp->blimit);
	seq_printf(m, ",%lu,%lu,%lu\n",
		   rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
Пример #10
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
    int cpu = smp_processor_id();

    current_thread_info()->status |= TS_POLLING;

    /* endless idle loop with no priority at all */
    while (1) {
        tick_nohz_stop_sched_tick();
        while (!need_resched()) {
            void (*idle)(void);

            if (__get_cpu_var(cpu_idle_state))
                __get_cpu_var(cpu_idle_state) = 0;

            check_pgt_cache();
            rmb();
            idle = pm_idle;

            if (!idle)
                idle = default_idle;

            if (cpu_is_offline(cpu))
                play_dead();

            __get_cpu_var(irq_stat).idle_timestamp = jiffies;
            idle();
        }
        tick_nohz_restart_sched_tick();
        preempt_enable_no_resched();
        schedule();
        preempt_disable();
    }
}
Пример #11
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	current_thread_info()->status |= TS_POLLING;

	/* endless idle loop with no priority at all */
	while (1) {
		tick_nohz_stop_sched_tick();
		while (!need_resched()) {

			check_pgt_cache();
			rmb();

			if (rcu_pending(cpu))
				rcu_check_callbacks(cpu, 0);

			if (cpu_is_offline(cpu))
				play_dead();

			local_irq_disable();
			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
			/* Don't trace irqs off for idle */
			stop_critical_timings();
			pm_idle();
			start_critical_timings();
		}
		tick_nohz_restart_sched_tick();
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #12
0
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
void cpu_idle(void)
{
	int cpu = smp_processor_id();

	current_thread_info()->status |= TS_POLLING;


	/* endless idle loop with no priority at all */
	while (1) {
		while (!need_resched()) {

			if (__get_cpu_var(cpu_idle_state))
				__get_cpu_var(cpu_idle_state) = 0;

			rmb();

			if (cpu_is_offline(cpu))
				play_dead();

			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
			xen_idle();
		}
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}
Пример #13
0
static inline void cpus_online_all(void)
{
	unsigned int cpu;
	switch(endurance_level) {
	case 1:
		if(resume_cpu_num > (NR_CPUS / 2) - 1 || resume_cpu_num == 1)
			resume_cpu_num = ((NR_CPUS / 2) - 1);
	break;
	case 2:
		if( NR_CPUS >= 4 && resume_cpu_num > ((NR_CPUS / 4) - 1))
			resume_cpu_num = ((NR_CPUS / 4) - 1);
	break;
	case 0:
			resume_cpu_num = (NR_CPUS - 1);
	break;
	default:
	break;
	}

	if(DEBUG)
		   pr_info("%s: resume_cpu_num = %d\n",THUNDERPLUG, resume_cpu_num);

	for (cpu = 1; cpu <= resume_cpu_num; cpu++) {
		if (cpu_is_offline(cpu))
			cpu_up(cpu);
	}

	pr_info("%s: all cpus were onlined\n", THUNDERPLUG);
}
Пример #14
0
/** Initialize performance sampling
 * Call this during probe initialization to set up performance event sampling
 * for all online cpus.  Returns non-zero on error.
 *
 * @param stp Handle for the event to be registered.
 */
static long _stp_perf_init (struct stap_perf_probe *stp)
{
	int cpu;

	/* allocate space for the event descriptor for each cpu */
	stp->events = _stp_alloc_percpu (sizeof(struct perf_event*));
	if (stp->events == NULL) {
		return -ENOMEM;
	}

	/* initialize event on each processor */
	stp_for_each_cpu(cpu) {
		struct perf_event **event = per_cpu_ptr (stp->events, cpu);
		if (cpu_is_offline(cpu)) {
			*event = NULL;
			continue;
		}
		*event = perf_event_create_kernel_counter(&stp->attr,
							  cpu, -1,
							  stp->callback);

		if (IS_ERR(*event)) {
			long rc = PTR_ERR(*event);
			*event = NULL;
			_stp_perf_del(stp);
			return rc;
		}
	}
	return 0;
}
Пример #15
0
/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void do_idle(void)
{
	/*
	 * If the arch has a polling bit, we maintain an invariant:
	 *
	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
	 * rq->idle). This means that, if rq->idle has the polling bit set,
	 * then setting need_resched is guaranteed to cause the CPU to
	 * reschedule.
	 */

	__current_set_polling();
	tick_nohz_idle_enter();

	while (!need_resched()) {
		check_pgt_cache();
		rmb();

		if (cpu_is_offline(smp_processor_id())) {
			cpuhp_report_idle_dead();
			arch_cpu_idle_dead();
		}

		local_irq_disable();
		arch_cpu_idle_enter();

		/*
		 * In poll mode we reenable interrupts and spin. Also if we
		 * detected in the wakeup from idle path that the tick
		 * broadcast device expired for us, we don't want to go deep
		 * idle as we know that the IPI is going to arrive right away.
		 */
		if (cpu_idle_force_poll || tick_check_broadcast_expired())
			cpu_idle_poll();
		else
			cpuidle_idle_call();
		arch_cpu_idle_exit();
	}

	/*
	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
	 * be set, propagate it into PREEMPT_NEED_RESCHED.
	 *
	 * This is required because for polling idle loops we will not have had
	 * an IPI to fold the state for us.
	 */
	preempt_set_need_resched();
	tick_nohz_idle_exit();
	__current_clr_polling();

	/*
	 * We promise to call sched_ttwu_pending() and reschedule if
	 * need_resched() is set while polling is set. That means that clearing
	 * polling needs to be visible before doing these things.
	 */
	smp_mb__after_atomic();

	sched_ttwu_pending();
	schedule_preempt_disabled();
}
Пример #16
0
static inline void cpus_online_all(void)
{
	unsigned int cpu;
	switch(endurance_level) {
	case 1:
		if(resume_cpu_num > 3 || resume_cpu_num == 1)
			resume_cpu_num = 3;
	break;
	case 2:
		if(resume_cpu_num > 1)
			resume_cpu_num = 1;
	break;
	case 0:
		if(resume_cpu_num < 7)
			resume_cpu_num = 7;
	break;
	default:
	break;
	}

	for (cpu = 1; cpu <= resume_cpu_num; cpu++) {
		if (cpu_is_offline(cpu))
			cpu_up(cpu);
	}

	pr_info("%s: all cpus were onlined\n", THUNDERPLUG);
}
Пример #17
0
static irqreturn_t
timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{
    unsigned long new_itm;

    if (unlikely(cpu_is_offline(smp_processor_id()))) {
        return IRQ_HANDLED;
    }

    platform_timer_interrupt(irq, dev_id, regs);

    new_itm = local_cpu_data->itm_next;

    if (!time_after(ia64_get_itc(), new_itm))
        printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
               ia64_get_itc(), new_itm);

    profile_tick(CPU_PROFILING, regs);

    while (1) {
        update_process_times(user_mode(regs));

        new_itm += local_cpu_data->itm_delta;

        if (smp_processor_id() == TIME_KEEPER_ID) {
            /*
             * Here we are in the timer irq handler. We have irqs locally
             * disabled, but we don't know if the timer_bh is running on
             * another CPU. We need to avoid to SMP race by acquiring the
             * xtime_lock.
             */
            write_seqlock(&xtime_lock);
            do_timer(regs);
            local_cpu_data->itm_next = new_itm;
            write_sequnlock(&xtime_lock);
        } else
            local_cpu_data->itm_next = new_itm;

        if (time_after(new_itm, ia64_get_itc()))
            break;
    }

    do {
        /*
         * If we're too close to the next clock tick for
         * comfort, we increase the safety margin by
         * intentionally dropping the next tick(s).  We do NOT
         * update itm.next because that would force us to call
         * do_timer() which in turn would let our clock run
         * too fast (with the potentially devastating effect
         * of losing monotony of time).
         */
        while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
            new_itm += local_cpu_data->itm_delta;
        ia64_set_itm(new_itm);
        /* double check, in case we got hit by a (slow) PMI: */
    } while (time_after_eq(ia64_get_itc(), new_itm));
    return IRQ_HANDLED;
}
Пример #18
0
static void __ref tplug_boost_work_fn(struct work_struct *work)
{
	int cpu;
	for(cpu = 1; cpu < 4; cpu++) {
		if(cpu_is_offline(cpu))
			cpu_up(cpu);
	}
}
Пример #19
0
/* trigger timers on remote cpu */
void smp_send_pull_timers(int cpu)
{
	if (unlikely(cpu_is_offline(cpu))) {
		WARN_ON(1);
		return;
	}
	apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR);
}
Пример #20
0
static void print_cpus_all(void)
{
    unsigned int cpu;

    for (cpu = 0; cpu < core_limit; cpu++) {
        pr_info("%s: [%d]: %d\n", V4TKPLUG, cpu, cpu_is_offline(cpu)?0:1);
    }
}
Пример #21
0
/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
static void native_smp_send_reschedule(int cpu)
{
	if (unlikely(cpu_is_offline(cpu))) {
		WARN_ON(1);
		return;
	}
	apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
}
Пример #22
0
/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
static void native_smp_send_reschedule(int cpu)
{
	if (unlikely(cpu_is_offline(cpu))) {
		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
		return;
	}
	apic->send_IPI(cpu, RESCHEDULE_VECTOR);
}
Пример #23
0
static int pseries_shared_idle(void)
{
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		while (!need_resched() && !cpu_is_offline(cpu)) {
			local_irq_disable();
			ppc64_runlatch_off();

			/*
			 * Yield the processor to the hypervisor.  We return if
			 * an external interrupt occurs (which are driven prior
			 * to returning here) or if a prod occurs from another
			 * processor. When returning here, external interrupts
			 * are enabled.
			 *
			 * Check need_resched() again with interrupts disabled
			 * to avoid a race.
			 */
			if (!need_resched())
				cede_processor();
			else
				local_irq_enable();

			HMT_medium();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}

	return 0;
}
Пример #24
0
static void dvfs_hotplug_callback(struct work_struct *unused)
{
	cpu_hotplug_driver_lock();
	if (cpu_is_offline(NON_BOOT_CPU))
	{	
		cpu_up(NON_BOOT_CPU); // it takes 60ms
	}
	cpu_hotplug_driver_unlock();
}
Пример #25
0
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
{
	unsigned long new_itm;

	if (cpu_is_offline(smp_processor_id())) {
		return IRQ_HANDLED;
	}

	platform_timer_interrupt(irq, dev_id);

	new_itm = local_cpu_data->itm_next;

	if (!time_after(ia64_get_itc(), new_itm))
		printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
		       ia64_get_itc(), new_itm);

	profile_tick(CPU_PROFILING);

	while (1) {
		update_process_times(user_mode(get_irq_regs()));

		new_itm += local_cpu_data->itm_delta;

		if (smp_processor_id() == time_keeper_id)
			xtime_update(1);

		local_cpu_data->itm_next = new_itm;

		if (time_after(new_itm, ia64_get_itc()))
			break;

		/*
		 * Allow IPIs to interrupt the timer loop.
		 */
		local_irq_enable();
		local_irq_disable();
	}

	do {
		/*
		 * If we're too close to the next clock tick for
		 * comfort, we increase the safety margin by
		 * intentionally dropping the next tick(s).  We do NOT
		 * update itm.next because that would force us to call
		 * xtime_update() which in turn would let our clock run
		 * too fast (with the potentially devastating effect
		 * of losing monotony of time).
		 */
		while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
			new_itm += local_cpu_data->itm_delta;
		ia64_set_itm(new_itm);
		/* double check, in case we got hit by a (slow) PMI: */
	} while (time_after_eq(ia64_get_itc(), new_itm));
	return IRQ_HANDLED;
}
Пример #26
0
static int rcu_implicit_offline_qs(struct rcu_data *rdp)
{
	if (cpu_is_offline(rdp->cpu) &&
	    ULONG_CMP_LT(rdp->rsp->gp_start + 2, jiffies)) {
		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
		rdp->offline_fqs++;
		return 1;
	}
	return 0;
}
Пример #27
0
/* Bring online each possible CPU up to max_online cores */
static inline void up_all(void)
{
	unsigned int cpu;

	for_each_possible_cpu(cpu)
		if (cpu_is_offline(cpu) && num_online_cpus() < max_online)
			cpu_up(cpu);

	down_timer = 0;
}
Пример #28
0
/*
 * Generic idle loop implementation
 *
 * Called with polling cleared.
 */
static void cpu_idle_loop(void)
{
	while (1) {
		/*
		 * If the arch has a polling bit, we maintain an invariant:
		 *
		 * Our polling bit is clear if we're not scheduled (i.e. if
		 * rq->curr != rq->idle).  This means that, if rq->idle has
		 * the polling bit set, then setting need_resched is
		 * guaranteed to cause the cpu to reschedule.
		 */

		__current_set_polling();
		tick_nohz_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			local_irq_disable();
			arch_cpu_idle_enter();

			/*
			 * In poll mode we reenable interrupts and spin.
			 *
			 * Also if we detected in the wakeup from idle
			 * path that the tick broadcast device expired
			 * for us, we don't want to go deep idle as we
			 * know that the IPI is going to arrive right
			 * away
			 */
			if (cpu_idle_force_poll ||
			    tick_check_broadcast_expired() ||
			    __get_cpu_var(idle_force_poll))
				cpu_idle_poll();
			else
				cpuidle_idle_call();

			arch_cpu_idle_exit();
		}
		tick_nohz_idle_exit();
		__current_clr_polling();

		/*
		 * We promise to reschedule if need_resched is set while
		 * polling is set.  That means that clearing polling
		 * needs to be visible before rescheduling.
		 */
		smp_mb__after_atomic();

		schedule_preempt_disabled();
		if (cpu_is_offline(smp_processor_id()))
			arch_cpu_idle_dead();
	}
}
Пример #29
0
/*
 * Generic idle loop implementation
 */
static void cpu_idle_loop(void)
{
	while (1) {
		tick_nohz_idle_enter();

		while (!need_resched()) {
			check_pgt_cache();
			rmb();

			if (cpu_is_offline(smp_processor_id()))
				arch_cpu_idle_dead();

			local_irq_disable();
			arch_cpu_idle_enter();

			/*
			 * In poll mode we reenable interrupts and spin.
			 *
			 * Also if we detected in the wakeup from idle
			 * path that the tick broadcast device expired
			 * for us, we don't want to go deep idle as we
			 * know that the IPI is going to arrive right
			 * away
			 */
			if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
				cpu_idle_poll();
			} else {
				if (!current_clr_polling_and_test()) {
					stop_critical_timings();
					rcu_idle_enter();
					arch_cpu_idle();
					WARN_ON_ONCE(irqs_disabled());
					rcu_idle_exit();
					start_critical_timings();
				} else {
					local_irq_enable();
				}
				__current_set_polling();
			}
			arch_cpu_idle_exit();
		}

		/*
		 * Since we fell out of the loop above, we know
		 * TIF_NEED_RESCHED must be set, propagate it into
		 * PREEMPT_NEED_RESCHED.
		 *
		 * This is required because for polling idle loops we will
		 * not have had an IPI to fold the state for us.
		 */
		preempt_set_need_resched();
		tick_nohz_idle_exit();
		schedule_preempt_disabled();
	}
}
Пример #30
0
bool irq_work_needs_cpu(void)
{
	struct llist_head *this_list;

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty_relaxed(this_list))
		return false;

	/* All work should have been flushed before going offline */
	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));

	return true;
}