int kthread_create_cpu(void (*func)(void *), void *arg, struct thread **tdp, int cpu, const char *fmt, ...) { thread_t td; __va_list ap; td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, cpu, TDF_VERBOSE); if (tdp) *tdp = td; cpu_set_thread_handler(td, kthread_exit, func, arg); /* * Set up arg0 for 'ps' etc */ __va_start(ap, fmt); kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); __va_end(ap); td->td_ucred = crhold(proc0.p_ucred); /* * Schedule the thread to run */ lwkt_schedule(td); return 0; }
void ath_hal_printf(struct ath_hal *ah, const char* fmt, ...) { __va_list ap; __va_start(ap, fmt); ath_hal_vprintf(ah, fmt, ap); __va_end(ap); }
void kprintf0(const char *fmt, ...) { __va_list ap; __va_start(ap, fmt); kvcprintf(fmt, PCHAR_, NULL, 10, ap); __va_end(ap); }
void mpt_prtc(struct mpt_softc *mpt, const char *fmt, ...) { __va_list ap; __va_start(ap, fmt); kvprintf(fmt, ap); __va_end(ap); }
/* * Printing * * NOTE: We bypass subr_prf's cons_spin here by using our own putchar * function. */ void db_printf(const char *fmt, ...) { __va_list listp; __va_start(listp, fmt); kvcprintf (fmt, db_putchar, NULL, db_radix, listp); __va_end(listp); /* DELAY(100000);*/ }
void mpt_prt(struct mpt_softc *mpt, const char *fmt, ...) { __va_list ap; kprintf("%s: ", device_get_nameunit(mpt->dev)); __va_start(ap, fmt); kvprintf(fmt, ap); __va_end(ap); }
void hkprintf(const char *ctl, ...) { __va_list va; if (hammer_debug_debug) { __va_start(va, ctl); kvprintf(ctl, va); __va_end(va); } }
/* * Format the given arguments and append the resulting string to an sbuf. */ int sbuf_printf(struct sbuf *s, const char *fmt, ...) { __va_list ap; int result; __va_start(ap, fmt); result = sbuf_vprintf(s, fmt, ap); __va_end(ap); return (result); }
static int fct2 (int dummy, ...) { va_list argp; int ret = dummy; __va_start (argp, dummy); ret += fct1 (argp, SZ_ARGS); __va_end (argp); return ret; }
void DO_HALDEBUG(struct ath_hal *ah, u_int mask, const char* fmt, ...) { if ((mask == HAL_DEBUG_UNMASKABLE) || (ah != NULL && ah->ah_config.ah_debug & mask) || (ath_hal_debug & mask)) { __va_list ap; __va_start(ap, fmt); ath_hal_vprintf(ah, fmt, ap); __va_end(ap); } }
static int disk_debug(int level, char *fmt, ...) { __va_list ap; __va_start(ap, fmt); if (level <= disk_debug_enable) kvprintf(fmt, ap); __va_end(ap); return 0; }
void db_iprintf(const char *fmt,...) { int i; __va_list listp; for (i = db_indent; i >= 8; i -= 8) db_printf("\t"); while (--i >= 0) db_printf(" "); __va_start(listp, fmt); kvcprintf (fmt, db_putchar, NULL, db_radix, listp); __va_end(listp); }
static void rndtest_report(struct rndtest_state *rsp, int failure, const char *fmt, ...) { char buf[80]; __va_list ap; if (rndtest_verbose == 0) return; if (!failure && rndtest_verbose == 1) /* don't report successes */ return; __va_start(ap, fmt); kvsnprintf(buf, sizeof (buf), fmt, ap); __va_end(ap); device_printf(rsp->rs_parent, "rndtest: %s\n", buf); }
/* * Same as kthread_create() but you can specify a custom stack size. */ int kthread_create_stk(void (*func)(void *), void *arg, struct thread **tdp, int stksize, const char *fmt, ...) { thread_t td; __va_list ap; td = lwkt_alloc_thread(NULL, stksize, -1, TDF_VERBOSE); if (tdp) *tdp = td; cpu_set_thread_handler(td, kthread_exit, func, arg); __va_start(ap, fmt); kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); __va_end(ap); lwkt_schedule(td); return 0; }
/* * Create a kernel process/thread/whatever. It shares it's address space * with proc0 - ie: kernel only. * * XXX only the SMB protocol uses this, we should convert this mess to a * pure thread when possible. */ int smb_kthread_create(void (*func)(void *), void *arg, struct proc **newpp, int flags, const char *fmt, ...) { int error; __va_list ap; struct proc *p2; struct lwp *lp2; error = fork1(&lwp0, RFMEM | RFFDG | RFPROC | flags, &p2); if (error) return error; /* save a global descriptor, if desired */ if (newpp != NULL) *newpp = p2; /* this is a non-swapped system process */ p2->p_flags |= P_SYSTEM; p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; lp2 = ONLY_LWP_IN_PROC(p2); /* set up arg0 for 'ps', et al */ __va_start(ap, fmt); kvsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap); __va_end(ap); lp2->lwp_thread->td_ucred = crhold(proc0.p_ucred); /* call the processes' main()... */ cpu_set_fork_handler(lp2, (void (*)(void *, struct trapframe *))func, arg); start_forked_proc(&lwp0, p2); return 0; }
/* * Panic is called on unresolvable fatal errors. It prints "panic: mesg", * and then reboots. If we are called twice, then we avoid trying to sync * the disks as this often leads to recursive panics. */ void panic(const char *fmt, ...) { int bootopt, newpanic; globaldata_t gd = mycpu; thread_t td = gd->gd_curthread; __va_list ap; static char buf[256]; #ifdef SMP /* * If a panic occurs on multiple cpus before the first is able to * halt the other cpus, only one cpu is allowed to take the panic. * Attempt to be verbose about this situation but if the kprintf() * itself panics don't let us overrun the kernel stack. * * Be very nasty about descheduling our thread at the lowest * level possible in an attempt to freeze the thread without * inducing further panics. * * Bumping gd_trap_nesting_level will also bypass assertions in * lwkt_switch() and allow us to switch away even if we are a * FAST interrupt or IPI. * * The setting of panic_cpu_gd also determines how kprintf() * spin-locks itself. DDB can set panic_cpu_gd as well. */ for (;;) { globaldata_t xgd = panic_cpu_gd; /* * Someone else got the panic cpu */ if (xgd && xgd != gd) { crit_enter(); ++mycpu->gd_trap_nesting_level; if (mycpu->gd_trap_nesting_level < 25) { kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n", mycpu->gd_cpuid, td); } td->td_release = NULL; /* be a grinch */ for (;;) { lwkt_deschedule_self(td); lwkt_switch(); } /* NOT REACHED */ /* --mycpu->gd_trap_nesting_level */ /* crit_exit() */ } /* * Reentrant panic */ if (xgd && xgd == gd) break; /* * We got it */ if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd)) break; } #else panic_cpu_gd = gd; #endif /* * Try to get the system into a working state. Save information * we are about to destroy. */ kvcreinitspin(); if (panicstr == NULL) { bcopy(td->td_toks_array, panic_tokens, sizeof(panic_tokens)); panic_tokens_count = td->td_toks_stop - &td->td_toks_base; } lwkt_relalltokens(td); td->td_toks_stop = &td->td_toks_base; /* * Setup */ bootopt = RB_AUTOBOOT | RB_DUMP; if (sync_on_panic == 0) bootopt |= RB_NOSYNC; newpanic = 0; if (panicstr) { bootopt |= RB_NOSYNC; } else { panicstr = fmt; newpanic = 1; } /* * Format the panic string. */ __va_start(ap, fmt); kvsnprintf(buf, sizeof(buf), fmt, ap); if (panicstr == fmt) panicstr = buf; __va_end(ap); kprintf("panic: %s\n", buf); #ifdef SMP /* two separate prints in case of an unmapped page and trap */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); #endif #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC) led_switch("error", 1); #endif #if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE) wdog_disable(); #endif /* * Enter the debugger or fall through & dump. Entering the * debugger will stop cpus. If not entering the debugger stop * cpus here. */ #if defined(DDB) if (newpanic && trace_on_panic) print_backtrace(-1); if (debugger_on_panic) Debugger("panic"); else #endif #ifdef SMP if (newpanic) stop_cpus(mycpu->gd_other_cpus); #else ; #endif boot(bootopt); }
/* * Append a non-NUL character to an sbuf. This prototype signature is * suitable for use with kvcprintf(9). */ static void sbuf_putc_func(int c, void *arg) { if (c != '\0') sbuf_put_byte(arg, c); } int sbuf_vprintf(struct sbuf *s, const char *fmt, __va_list ap) { assert_sbuf_integrity(s); assert_sbuf_state(s, 0); KASSERT(fmt != NULL, ("%s called with a NULL format string", __func__)); (void)kvcprintf(fmt, sbuf_putc_func, s, 10, ap); if (s->s_error != 0) return (-1); return (0); } #else /* !_KERNEL */ int sbuf_vprintf(struct sbuf *s, const char *fmt, __va_list ap) { __va_list ap_copy; int error, len; assert_sbuf_integrity(s); assert_sbuf_state(s, 0); KASSERT(fmt != NULL, ("%s called with a NULL format string", __func__)); if (s->s_error != 0) return (-1); /* * For the moment, there is no way to get vsnprintf(3) to hand * back a character at a time, to push everything into * sbuf_putc_func() as was done for the kernel. * * In userspace, while drains are useful, there's generally * not a problem attempting to malloc(3) on out of space. So * expand a userland sbuf if there is not enough room for the * data produced by sbuf_[v]printf(3). */ error = 0; do { va_copy(ap_copy, ap); len = vsnprintf(&s->s_buf[s->s_len], SBUF_FREESPACE(s) + 1, fmt, ap_copy); __va_end(ap_copy); if (SBUF_FREESPACE(s) >= len) break; /* Cannot print with the current available space. */ if (s->s_drain_func != NULL && s->s_len > 0) error = sbuf_drain(s); else error = sbuf_extend(s, len - SBUF_FREESPACE(s)); } while (error == 0); /* * s->s_len is the length of the string, without the terminating nul. * When updating s->s_len, we must subtract 1 from the length that * we passed into vsnprintf() because that length includes the * terminating nul. * * vsnprintf() returns the amount that would have been copied, * given sufficient space, so don't over-increment s_len. */ if (SBUF_FREESPACE(s) < len) len = SBUF_FREESPACE(s); s->s_len += len; if (SBUF_ISSECTION(s)) s->s_sect_len += len; if (!SBUF_HASROOM(s) && !SBUF_CANEXTEND(s)) s->s_error = ENOMEM; KASSERT(s->s_len < s->s_size, ("wrote past end of sbuf (%d >= %d)", s->s_len, s->s_size)); if (s->s_error != 0) return (-1); return (0); }