예제 #1
0
static void migrate_hrtimers(int scpu)
{
	struct hrtimer_cpu_base *old_base, *new_base;
	int i;

	BUG_ON(cpu_online(scpu));
	tick_cancel_sched_timer(scpu);

	local_irq_disable();
	old_base = &per_cpu(hrtimer_bases, scpu);
	new_base = &__get_cpu_var(hrtimer_bases);
	/*
	 * The caller is globally serialized and nobody else
	 * takes two locks at once, deadlock is not possible.
	 */
	raw_spin_lock(&new_base->lock);
	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		migrate_hrtimer_list(&old_base->clock_base[i],
				     &new_base->clock_base[i]);
	}

	raw_spin_unlock(&old_base->lock);
	raw_spin_unlock(&new_base->lock);

	/* Check, if we got expired work to do */
	__hrtimer_peek_ahead_timers();
	local_irq_enable();
}
예제 #2
0
static void migrate_hrtimers(int cpu)
{
	struct hrtimer_cpu_base *old_base, *new_base;
	int i;

	BUG_ON(cpu_online(cpu));
	old_base = &per_cpu(hrtimer_bases, cpu);
	new_base = &get_cpu_var(hrtimer_bases);

	tick_cancel_sched_timer(cpu);

	local_irq_disable();
	double_spin_lock(&new_base->lock, &old_base->lock,
			 smp_processor_id() < cpu);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		migrate_hrtimer_list(&old_base->clock_base[i],
				     &new_base->clock_base[i]);
	}

	double_spin_unlock(&new_base->lock, &old_base->lock,
			   smp_processor_id() < cpu);
	local_irq_enable();
	put_cpu_var(hrtimer_bases);
}
예제 #3
0
static void migrate_hrtimers(int scpu)
{
    struct hrtimer_cpu_base *old_base, *new_base;
    int i;

    BUG_ON(cpu_online(scpu));
    tick_cancel_sched_timer(scpu);

    local_irq_disable();
    old_base = &per_cpu(hrtimer_bases, scpu);
    new_base = &__get_cpu_var(hrtimer_bases);
    raw_spin_lock(&new_base->lock);
    raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

    for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
        migrate_hrtimer_list(&old_base->clock_base[i],
                             &new_base->clock_base[i]);
    }

    raw_spin_unlock(&old_base->lock);
    raw_spin_unlock(&new_base->lock);


    __hrtimer_peek_ahead_timers();
    local_irq_enable();
}
예제 #4
0
static void migrate_hrtimers(int cpu)
{
	struct hrtimer_cpu_base *old_base, *new_base;
	int i, raise = 0;

	BUG_ON(cpu_online(cpu));
	old_base = &per_cpu(hrtimer_bases, cpu);
	new_base = &get_cpu_var(hrtimer_bases);

	tick_cancel_sched_timer(cpu);
	/*
	 * The caller is globally serialized and nobody else
	 * takes two locks at once, deadlock is not possible.
	 */
	spin_lock_irq(&new_base->lock);
	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		if (migrate_hrtimer_list(&old_base->clock_base[i],
					 &new_base->clock_base[i], cpu))
			raise = 1;
	}

	if (migrate_hrtimer_pending(old_base, new_base))
		raise = 1;

	spin_unlock(&old_base->lock);
	spin_unlock_irq(&new_base->lock);
	put_cpu_var(hrtimer_bases);

	if (raise)
		hrtimer_raise_softirq();
}