/* * As long as db_cpu is not -1 or cpu_number(), we know that debugger * is active on another cpu. */ void lock_db(void) { int my_cpu = cpu_number(); for (;;) { #if CONSOLE_ON_MASTER if (my_cpu == master_cpu) { db_console(); } #endif /* CONSOLE_ON_MASTER */ if (db_cpu != -1 && db_cpu != my_cpu) continue; #if CONSOLE_ON_MASTER if (my_cpu == master_cpu) { if (!simple_lock_try(&db_lock)) continue; } else { simple_lock(&db_lock); } #else /* CONSOLE_ON_MASTER */ simple_lock(&db_lock); #endif /* CONSOLE_ON_MASTER */ if (db_cpu == -1 || db_cpu == my_cpu) break; simple_unlock(&db_lock); } }
void timer_queue_shutdown( mpqueue_head_t *queue) { timer_call_t call; mpqueue_head_t *new_queue; spl_t s; DBG("timer_queue_shutdown(%p)\n", queue); s = splclock(); /* Note comma operator in while expression re-locking each iteration */ while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); if (!simple_lock_try(&call->lock)) { /* * case (2b) lock order inversion, dequeue and skip * Don't change the call_entry queue back-pointer * but set the async_dequeue field. */ timer_queue_shutdown_lock_skips++; timer_call_entry_dequeue_async(call); #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, call, call->async_dequeue, CE(call)->queue, 0x2b, 0); #endif timer_queue_unlock(queue); continue; } /* remove entry from old queue */ timer_call_entry_dequeue(call); timer_queue_unlock(queue); /* and queue it on new */ new_queue = timer_queue_assign(CE(call)->deadline); timer_queue_lock_spin(new_queue); timer_call_entry_enqueue_deadline( call, new_queue, CE(call)->deadline); timer_queue_unlock(new_queue); simple_unlock(&call->lock); } timer_queue_unlock(queue); splx(s); }
void simple_lock( long *l ) { int nloops = 0; if (simple_lock_try(l)) return; if (use_Simple_lock) Simple_lock(l); else while (!simple_lock_try(l)) { vm_offset_t phys = kvtophys((vm_offset_t)l); /* flush Dcache every now and then */ if ((nloops & 0x1f) == 0) alphacache_Dflush(phys); if (++nloops > 100000) { gimmeabreak(); nloops = 0; } } }
void kprintf(const char *fmt, ...) { va_list listp; boolean_t state; if (!disable_serial_output) { boolean_t early = FALSE; if (rdmsr64(MSR_IA32_GS_BASE) == 0) { early = TRUE; } /* If PE_kputc has not yet been initialized, don't * take any locks, just dump to serial */ if (!PE_kputc || early) { va_start(listp, fmt); _doprnt(fmt, &listp, pal_serial_putc, 16); va_end(listp); return; } /* * Spin to get kprintf lock but re-enable interrupts while * failing. * This allows interrupts to be handled while waiting but * interrupts are disabled once we have the lock. */ state = ml_set_interrupts_enabled(FALSE); pal_preemption_assert(); while (!simple_lock_try(&kprintf_lock)) { ml_set_interrupts_enabled(state); ml_set_interrupts_enabled(FALSE); } if (cpu_number() != cpu_last_locked) { MP_DEBUG_KPRINTF("[cpu%d...]\n", cpu_number()); cpu_last_locked = cpu_number(); } va_start(listp, fmt); _doprnt(fmt, &listp, PE_kputc, 16); va_end(listp); simple_unlock(&kprintf_lock); ml_set_interrupts_enabled(state); } }
/* * Conditionally acquire a usimple_lock. * * On success, returns with preemption disabled. * On failure, returns with preemption in the same state * as when first invoked. Note that the hw_lock routines * are responsible for maintaining preemption state. * * XXX No stats are gathered on a miss; I preserved this * behavior from the original assembly-language code, but * doesn't it make sense to log misses? XXX */ unsigned int usimple_lock_try( usimple_lock_t l) { #ifndef MACHINE_SIMPLE_LOCK unsigned int success; DECL_PC(pc); OBTAIN_PC(pc, l); USLDBG(usld_lock_try_pre(l, pc)); if ((success = hw_lock_try(&l->interlock))) { USLDBG(usld_lock_try_post(l, pc)); } return success; #else return(simple_lock_try((simple_lock_t)l)); #endif }
uint64_t timer_queue_expire_with_options( mpqueue_head_t *queue, uint64_t deadline, boolean_t rescan) { timer_call_t call = NULL; uint32_t tc_iterations = 0; DBG("timer_queue_expire(%p,)\n", queue); uint64_t cur_deadline = deadline; timer_queue_lock_spin(queue); while (!queue_empty(&queue->head)) { /* Upon processing one or more timer calls, refresh the * deadline to account for time elapsed in the callout */ if (++tc_iterations > 1) cur_deadline = mach_absolute_time(); if (call == NULL) call = TIMER_CALL(queue_first(&queue->head)); if (call->soft_deadline <= cur_deadline) { timer_call_func_t func; timer_call_param_t param0, param1; TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_EXPIRE | DBG_FUNC_NONE, call, call->soft_deadline, CE(call)->deadline, CE(call)->entry_time, 0); /* Bit 0 of the "soft" deadline indicates that * this particular timer call is rate-limited * and hence shouldn't be processed before its * hard deadline. */ if ((call->soft_deadline & 0x1) && (CE(call)->deadline > cur_deadline)) { if (rescan == FALSE) break; } if (!simple_lock_try(&call->lock)) { /* case (2b) lock inversion, dequeue and skip */ timer_queue_expire_lock_skips++; timer_call_entry_dequeue_async(call); call = NULL; continue; } timer_call_entry_dequeue(call); func = CE(call)->func; param0 = CE(call)->param0; param1 = CE(call)->param1; simple_unlock(&call->lock); timer_queue_unlock(queue); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_START, call, VM_KERNEL_UNSLIDE(func), param0, param1, 0); #if CONFIG_DTRACE DTRACE_TMR7(callout__start, timer_call_func_t, func, timer_call_param_t, param0, unsigned, call->flags, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), call); #endif /* Maintain time-to-deadline in per-processor data * structure for thread wakeup deadline statistics. */ uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd)); *ttdp = call->ttd; (*func)(param0, param1); *ttdp = 0; #if CONFIG_DTRACE DTRACE_TMR4(callout__end, timer_call_func_t, func, param0, param1, call); #endif TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_END, call, VM_KERNEL_UNSLIDE(func), param0, param1, 0); call = NULL; timer_queue_lock_spin(queue); } else { if (__probable(rescan == FALSE)) {
void timer_queue_shutdown( mpqueue_head_t *queue) { timer_call_t call; mpqueue_head_t *new_queue; spl_t s; DBG("timer_queue_shutdown(%p)\n", queue); s = splclock(); /* Note comma operator in while expression re-locking each iteration */ while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); if (!simple_lock_try(&call->lock)) { /* * case (2b) lock order inversion, dequeue and skip * Don't change the call_entry queue back-pointer * but set the async_dequeue field. */ timer_queue_shutdown_lock_skips++; timer_call_entry_dequeue_async(call); #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), call->async_dequeue, VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), 0x2b, 0); #endif timer_queue_unlock(queue); continue; } boolean_t call_local = ((call->flags & TIMER_CALL_LOCAL) != 0); /* remove entry from old queue */ timer_call_entry_dequeue(call); timer_queue_unlock(queue); if (call_local == FALSE) { /* and queue it on new, discarding LOCAL timers */ new_queue = timer_queue_assign(TCE(call)->deadline); timer_queue_lock_spin(new_queue); timer_call_entry_enqueue_deadline( call, new_queue, TCE(call)->deadline); timer_queue_unlock(new_queue); } else { timer_queue_shutdown_discarded++; } /* The only lingering LOCAL timer should be this thread's * quantum expiration timer. */ assert((call_local == FALSE) || (TCE(call)->func == thread_quantum_expire)); simple_unlock(&call->lock); } timer_queue_unlock(queue); splx(s); }
void _doprnt( const char *fmt, va_list argp, /* character output routine */ void (*putc)( char, vm_offset_t), int radix, /* default radix - for '%r' */ vm_offset_t putc_arg) { int length; int prec; boolean_t ladjust; char padc; long n; unsigned long u; int plus_sign; int sign_char; boolean_t altfmt, truncate; int base; char c; printf_init(); #if 0 /* Make sure that we get *some* printout, no matter what */ simple_lock(&_doprnt_lock); #else { int i = 0; while (i < 1*1024*1024) { if (simple_lock_try(&_doprnt_lock)) break; i++; } } #endif while ((c = *fmt) != '\0') { if (c != '%') { (*putc)(c, putc_arg); fmt++; continue; } fmt++; length = 0; prec = -1; ladjust = FALSE; padc = ' '; plus_sign = 0; sign_char = 0; altfmt = FALSE; while (TRUE) { c = *fmt; if (c == '#') { altfmt = TRUE; } else if (c == '-') { ladjust = TRUE; } else if (c == '+') { plus_sign = '+'; } else if (c == ' ') { if (plus_sign == 0) plus_sign = ' '; } else break; fmt++; } if (c == '0') { padc = '0'; c = *++fmt; } if (isdigit(c)) { while(isdigit(c)) { length = 10 * length + Ctod(c); c = *++fmt; } } else if (c == '*') { length = va_arg(argp, int); c = *++fmt; if (length < 0) { ladjust = !ladjust; length = -length; } } if (c == '.') { c = *++fmt; if (isdigit(c)) { prec = 0; while(isdigit(c)) { prec = 10 * prec + Ctod(c); c = *++fmt; } } else if (c == '*') { prec = va_arg(argp, int); c = *++fmt; }
/* * timer_queue_migrate() is called by etimer_queue_migrate() * to move timer requests from the local processor (queue_from) * to a target processor's (queue_to). */ int timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) { timer_call_t call; timer_call_t head_to; int timers_migrated = 0; DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to); assert(!ml_get_interrupts_enabled()); assert(queue_from != queue_to); if (serverperfmode) { /* * if we're running a high end server * avoid migrations... they add latency * and don't save us power under typical * server workloads */ return -4; } /* * Take both local (from) and target (to) timer queue locks while * moving the timers from the local queue to the target processor. * We assume that the target is always the boot processor. * But only move if all of the following is true: * - the target queue is non-empty * - the local queue is non-empty * - the local queue's first deadline is later than the target's * - the local queue contains no non-migrateable "local" call * so that we need not have the target resync. */ timer_call_lock_spin(queue_to); head_to = TIMER_CALL(queue_first(&queue_to->head)); if (queue_empty(&queue_to->head)) { timers_migrated = -1; goto abort1; } timer_call_lock_spin(queue_from); if (queue_empty(&queue_from->head)) { timers_migrated = -2; goto abort2; } call = TIMER_CALL(queue_first(&queue_from->head)); if (CE(call)->deadline < CE(head_to)->deadline) { timers_migrated = 0; goto abort2; } /* perform scan for non-migratable timers */ do { if (call->flags & TIMER_CALL_LOCAL) { timers_migrated = -3; goto abort2; } call = TIMER_CALL(queue_next(qe(call))); } while (!queue_end(&queue_from->head, qe(call))); /* migration loop itself -- both queues are locked */ while (!queue_empty(&queue_from->head)) { call = TIMER_CALL(queue_first(&queue_from->head)); if (!simple_lock_try(&call->lock)) { /* case (2b) lock order inversion, dequeue only */ timer_queue_migrate_lock_skips++; (void) remque(qe(call)); call->async_dequeue = TRUE; continue; } timer_call_entry_dequeue(call); timer_call_entry_enqueue_deadline( call, queue_to, CE(call)->deadline); timers_migrated++; simple_unlock(&call->lock); } abort2: timer_call_unlock(queue_from); abort1: timer_call_unlock(queue_to); return timers_migrated; }
uint64_t timer_queue_expire( mpqueue_head_t *queue, uint64_t deadline) { timer_call_t call; DBG("timer_queue_expire(%p,)\n", queue); timer_call_lock_spin(queue); while (!queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); if (call->soft_deadline <= deadline) { timer_call_func_t func; timer_call_param_t param0, param1; if (!simple_lock_try(&call->lock)) { /* case (2b) lock inversion, dequeue and skip */ timer_queue_expire_lock_skips++; (void) remque(qe(call)); call->async_dequeue = TRUE; continue; } timer_call_entry_dequeue(call); func = CE(call)->func; param0 = CE(call)->param0; param1 = CE(call)->param1; simple_unlock(&call->lock); timer_call_unlock(queue); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_START, VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0); #if CONFIG_DTRACE && (DEVELOPMENT || DEBUG ) DTRACE_TMR3(callout__start, timer_call_func_t, func, timer_call_param_t, param0, timer_call_param_t, param1); #endif (*func)(param0, param1); #if CONFIG_DTRACE && (DEVELOPMENT || DEBUG ) DTRACE_TMR3(callout__end, timer_call_func_t, func, timer_call_param_t, param0, timer_call_param_t, param1); #endif KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_END, VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0); timer_call_lock_spin(queue); } else break; } if (!queue_empty(&queue->head)) deadline = CE(call)->deadline; else deadline = UINT64_MAX; timer_call_unlock(queue); return (deadline); }