/* * Acquire a usimple_lock. * * MACH_RT: Returns with preemption disabled. Note * that the hw_lock routines are responsible for * maintaining preemption state. */ void usimple_lock( usimple_lock_t l) { int i; unsigned int timeouttb; /* Used to convert time to timebase ticks */ pc_t pc; #if ETAP_LOCK_TRACE etap_time_t start_wait_time; int no_miss_info = 0; #endif /* ETAP_LOCK_TRACE */ #if USLOCK_DEBUG int count = 0; #endif /* USLOCK_DEBUG */ OBTAIN_PC(pc, l); USLDBG(usld_lock_pre(l, pc)); #if ETAP_LOCK_TRACE ETAP_TIME_CLEAR(start_wait_time); #endif /* ETAP_LOCK_TRACE */ while (!hw_lock_try(&l->interlock)) { ETAPCALL(if (no_miss_info++ == 0) start_wait_time = etap_simplelock_miss(l)); while (hw_lock_held(&l->interlock)) { /* * Spin watching the lock value in cache, * without consuming external bus cycles. * On most SMP architectures, the atomic * instruction(s) used by hw_lock_try * cost much, much more than an ordinary * memory read. */ #if USLOCK_DEBUG if (count++ > max_lock_loops #if MACH_KDB && NCPUS > 1 && l != &kdb_lock #endif /* MACH_KDB && NCPUS > 1 */ ) { if (l == &printf_lock) { return; } mp_disable_preemption(); #if MACH_KDB db_printf("cpu %d looping on simple_lock(%x)" "(=%x)", cpu_number(), l, *hw_lock_addr(l->interlock)); db_printf(" called by %x\n", pc); #endif Debugger("simple lock deadlock detection"); count = 0; mp_enable_preemption(); } #endif /* USLOCK_DEBUG */ } } ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time)); USLDBG(usld_lock_post(l, pc)); }
int simple_lock_try_no_trace( simple_lock_t l) { pc_t pc; unsigned int success; OBTAIN_PC(pc, l); USLDBG(usld_lock_try_pre(l, pc)); if (success = hw_lock_try(&l->interlock)) { USLDBG(usld_lock_try_post(l, pc)); } return success; }
/* * Conditionally acquire a usimple_lock. * * MACH_RT: On success, returns with preemption disabled. * On failure, returns with preemption in the same state * as when first invoked. Note that the hw_lock routines * are responsible for maintaining preemption state. * * XXX No stats are gathered on a miss; I preserved this * behavior from the original assembly-language code, but * doesn't it make sense to log misses? XXX */ unsigned int usimple_lock_try( usimple_lock_t l) { pc_t pc; unsigned int success; etap_time_t zero_time; OBTAIN_PC(pc, l); USLDBG(usld_lock_try_pre(l, pc)); if (success = hw_lock_try(&l->interlock)) { USLDBG(usld_lock_try_post(l, pc)); ETAP_TIME_CLEAR(zero_time); ETAPCALL(etap_simplelock_hold(l, pc, zero_time)); } return success; }
/* * Conditionally acquire a usimple_lock. * * On success, returns with preemption disabled. * On failure, returns with preemption in the same state * as when first invoked. Note that the hw_lock routines * are responsible for maintaining preemption state. * * XXX No stats are gathered on a miss; I preserved this * behavior from the original assembly-language code, but * doesn't it make sense to log misses? XXX */ unsigned int usimple_lock_try( usimple_lock_t l) { #ifndef MACHINE_SIMPLE_LOCK unsigned int success; DECL_PC(pc); OBTAIN_PC(pc, l); USLDBG(usld_lock_try_pre(l, pc)); if ((success = hw_lock_try(&l->interlock))) { USLDBG(usld_lock_try_post(l, pc)); } return success; #else return(simple_lock_try((simple_lock_t)l)); #endif }
void simple_lock_no_trace( simple_lock_t l) { pc_t pc; OBTAIN_PC(pc, l); USLDBG(usld_lock_pre(l, pc)); while (!hw_lock_try(&l->interlock)) { while (hw_lock_held(&l->interlock)) { /* * Spin watching the lock value in cache, * without consuming external bus cycles. * On most SMP architectures, the atomic * instruction(s) used by hw_lock_try * cost much, much more than an ordinary * memory read. */ } } USLDBG(usld_lock_post(l, pc)); }