void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); unsigned int owner; local_irq_restore(flags); while (1) { owner = lp->owner_cpu; if (!owner || smp_vcpu_scheduled(~owner)) { for (count = spin_retry; count > 0; count--) { if (arch_spin_is_locked(lp)) continue; local_irq_disable(); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; local_irq_restore(flags); } if (MACHINE_IS_LPAR) continue; } owner = lp->owner_cpu; if (owner) smp_yield_cpu(~owner); local_irq_disable(); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; local_irq_restore(flags); } }
static void __init prom_smp_bootstrap(void) { local_irq_disable(); while (arch_spin_is_locked(&launch_lock)); __asm__ __volatile__( " move $sp, %0 \n" " move $gp, %1 \n" " j smp_bootstrap \n" : : "r" (secondary_sp), "r" (secondary_gp)); }
int arch_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; for (count = spin_retry; count > 0; count--) { if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return 1; } return 0; }
void arch_spin_unlock_wait(arch_spinlock_t *lock) { u32 iterations = 0; while (arch_spin_is_locked(lock)) delay_backoff(iterations++); }