static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ spin_bug(lock, "lockup suspected"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif /* * The trylock above was causing a livelock. Give the lower level arch * specific lock code a chance to acquire the lock. We have already * printed a warning/backtrace at this point. The non-debug arch * specific code might actually succeed in acquiring the lock. If it is * not successful, the end-result is the same - there is no forward * progress. */ arch_spin_lock(&lock->raw_lock); }
void do_raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); if (unlikely(!arch_spin_trylock(&lock->raw_lock))) __spin_lock_debug(lock); debug_spin_lock_after(lock); }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " "%s/%d, %ps\n", raw_smp_processor_id(), current->comm, task_pid_nr(current), lock); printk(KERN_EMERG "%s: loops_per_jiffy=%lu, HZ=%d, loops=%llu\n", __func__, loops_per_jiffy, HZ, loops); /* Added by HTC */ spin_dump(lock, "lockup"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif } } }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; // printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " // "%s/%d, %p\n", // raw_smp_processor_id(), current->comm, ; dump_stack(); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif } } }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * LOOP_HZ; int print_once = 1; char aee_str[40]; unsigned long long t1; t1 = sched_clock(); for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ printk("spin time: %llu ns(start:%llu ns, lpj:%lu, HZ:%d)", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ); if (print_once) { print_once = 0; spin_dump(lock, "lockup"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif debug_show_all_locks(); sprintf( aee_str, "Spinlock lockup:%s\n", current->comm); aee_kernel_exception( aee_str,"spinlock debugger\n"); } } }
unsigned __kprobes long oops_begin(void) { int cpu; unsigned long flags; /* notify the hw-branch tracer so it may disable tracing and add the last trace to the trace buffer - the earlier this happens, the more useful the trace. */ trace_hw_branch_oops(); oops_enter(); /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; char aee_str[40]; for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " "%s/%d, %p\n", raw_smp_processor_id(), current->comm, task_pid_nr(current), lock); sprintf( aee_str, "Spinlock lockup:%s\n", current->comm); aee_kernel_exception( aee_str,"spinlock debugger\n"); dump_stack(); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif } } }
void do_raw_spin_lock(raw_spinlock_t *lock) { debug_spin_lock_before(lock); #if 0 /* Temporarily comment out for testing hrtimer spinlock issue */ if (unlikely(!arch_spin_trylock(&lock->raw_lock))) __spin_lock_debug(lock); #else arch_spin_lock(&lock->raw_lock); #endif debug_spin_lock_after(lock); }
int __ipipe_spin_trylock_irq(ipipe_spinlock_t *lock) { unsigned long flags; flags = hard_local_irq_save(); if (!arch_spin_trylock(&lock->arch_lock)) { hard_local_irq_restore(flags); return 0; } __set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); return 1; }
int do_raw_spin_trylock(raw_spinlock_t *lock) { int ret = arch_spin_trylock(&lock->raw_lock); if (ret) debug_spin_lock_after(lock); #ifndef CONFIG_SMP /* * Must not happen on UP: */ SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); #endif return ret; }
int __ipipe_spin_trylock_irqsave(ipipe_spinlock_t *lock, unsigned long *x) { unsigned long flags; int s; flags = hard_local_irq_save(); if (!arch_spin_trylock(&lock->arch_lock)) { hard_local_irq_restore(flags); return 0; } s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status); *x = arch_mangle_irq_bits(s, flags); return 1; }
unsigned __kprobes long oops_begin(void) { int cpu; unsigned long flags; oops_enter(); /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; spin_dump(lock, "lockup"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif } } }
unsigned __kprobes long oops_begin(void) { int cpu; unsigned long flags; oops_enter(); raw_local_irq_save(flags); cpu = smp_processor_id(); if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) ; else arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; }
static void __spin_lock_debug(raw_spinlock_t *lock) { #ifdef CONFIG_MTK_MUTATION u64 i; u64 loops = loops_per_jiffy * LOOP_HZ; int print_once = 1; char aee_str[50]; unsigned long long t1,t2; t1 = sched_clock(); t2 = t1; for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } if(sched_clock() - t2 < WARNING_TIME) continue; t2 = sched_clock(); if(oops_in_progress != 0) continue; // in exception follow, printk maybe spinlock error /* lockup suspected: */ printk("spin time: %llu ns(start:%llu ns, lpj:%lu, LPHZ:%d), value: 0x%08x\n", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ, lock->raw_lock.slock); if (print_once) { print_once = 0; spin_dump(lock, "lockup suspected"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif debug_show_all_locks(); snprintf( aee_str, 50, "Spinlock lockup:%s\n", current->comm); aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n"); #ifdef CONFIG_PANIC_ON_DEBUG_SPINLOCK panic("Please check this spin_lock bug warning! if it is okay, disable CONFIG_PANIC_ON_DEBUG_SPINLOCK and ignore this warning!\n"); #endif } } #else //CONFIG_MTK_MUTATION u64 i; u64 loops = loops_per_jiffy * HZ; for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ spin_dump(lock, "lockup suspected"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif /* * The trylock above was causing a livelock. Give the lower level arch * specific lock code a chance to acquire the lock. We have already * printed a warning/backtrace at this point. The non-debug arch * specific code might actually succeed in acquiring the lock. If it is * not successful, the end-result is the same - there is no forward * progress. */ arch_spin_lock(&lock->raw_lock); #endif //CONFIG_MTK_MUTATION }
DLLEXPORT bool ke_spin_lock_try(struct ke_spinlock * lock) { arch_spin_trylock((arch_spinlock_t*)lock); }