static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_enqueue_deadline() " "entry %p is not locked\n", entry); /* XXX More lock pretense: */ if (!hw_lock_held((hw_lock_t)&queue->lock_data)) panic("_call_entry_enqueue_deadline() " "queue %p is not locked\n", queue); if (old_queue != NULL && old_queue != queue) panic("_call_entry_enqueue_deadline() " "old_queue %p != queue", old_queue); call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); /* For efficiency, track the earliest soft deadline on the queue, so that * fuzzy decisions can be made without lock acquisitions. */ queue->earliest_soft_deadline = ((timer_call_t)queue_first(&queue->head))->soft_deadline; if (old_queue) old_queue->count--; queue->count++; return (old_queue); }
/* * Acquire a usimple_lock. * * MACH_RT: Returns with preemption disabled. Note * that the hw_lock routines are responsible for * maintaining preemption state. */ void usimple_lock( usimple_lock_t l) { int i; unsigned int timeouttb; /* Used to convert time to timebase ticks */ pc_t pc; #if ETAP_LOCK_TRACE etap_time_t start_wait_time; int no_miss_info = 0; #endif /* ETAP_LOCK_TRACE */ #if USLOCK_DEBUG int count = 0; #endif /* USLOCK_DEBUG */ OBTAIN_PC(pc, l); USLDBG(usld_lock_pre(l, pc)); #if ETAP_LOCK_TRACE ETAP_TIME_CLEAR(start_wait_time); #endif /* ETAP_LOCK_TRACE */ while (!hw_lock_try(&l->interlock)) { ETAPCALL(if (no_miss_info++ == 0) start_wait_time = etap_simplelock_miss(l)); while (hw_lock_held(&l->interlock)) { /* * Spin watching the lock value in cache, * without consuming external bus cycles. * On most SMP architectures, the atomic * instruction(s) used by hw_lock_try * cost much, much more than an ordinary * memory read. */ #if USLOCK_DEBUG if (count++ > max_lock_loops #if MACH_KDB && NCPUS > 1 && l != &kdb_lock #endif /* MACH_KDB && NCPUS > 1 */ ) { if (l == &printf_lock) { return; } mp_disable_preemption(); #if MACH_KDB db_printf("cpu %d looping on simple_lock(%x)" "(=%x)", cpu_number(), l, *hw_lock_addr(l->interlock)); db_printf(" called by %x\n", pc); #endif Debugger("simple lock deadlock detection"); count = 0; mp_enable_preemption(); } #endif /* USLOCK_DEBUG */ } } ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time)); USLDBG(usld_lock_post(l, pc)); }
static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_dequeue() " "entry %p is not locked\n", entry); /* * XXX The queue lock is actually a mutex in spin mode * but there's no way to test for it being held * so we pretend it's a spinlock! */ if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) panic("_call_entry_dequeue() " "queue %p is not locked\n", old_queue); call_entry_dequeue(CE(entry)); return (old_queue); }
static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( timer_call_t entry, mpqueue_head_t *queue, uint64_t deadline) { mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue); if (!hw_lock_held((hw_lock_t)&entry->lock)) panic("_call_entry_enqueue_deadline() " "entry %p is not locked\n", entry); /* XXX More lock pretense: */ if (!hw_lock_held((hw_lock_t)&queue->lock_data)) panic("_call_entry_enqueue_deadline() " "queue %p is not locked\n", queue); if (old_queue != NULL && old_queue != queue) panic("_call_entry_enqueue_deadline() " "old_queue %p != queue", old_queue); call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline); return (old_queue); }
void simple_lock_no_trace( simple_lock_t l) { pc_t pc; OBTAIN_PC(pc, l); USLDBG(usld_lock_pre(l, pc)); while (!hw_lock_try(&l->interlock)) { while (hw_lock_held(&l->interlock)) { /* * Spin watching the lock value in cache, * without consuming external bus cycles. * On most SMP architectures, the atomic * instruction(s) used by hw_lock_try * cost much, much more than an ordinary * memory read. */ } } USLDBG(usld_lock_post(l, pc)); }