int test___va_start(int i, ...) { va_list ap; __va_start(&ap, ( &reinterpret_cast<const char &>(i) ), ( (sizeof(i) + 4 - 1) & ~(4 - 1) ), ( &reinterpret_cast<const char &>(i) )); return (*(int *)((ap += ( (sizeof(int) + 4 - 1) & ~(4 - 1) ) + ( ((va_list)0 - (ap)) & (__alignof(int) - 1) )) - ( (sizeof(int) + 4 - 1) & ~(4 - 1) ))); }
int kthread_create_cpu(void (*func)(void *), void *arg, struct thread **tdp, int cpu, const char *fmt, ...) { thread_t td; __va_list ap; td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, cpu, TDF_VERBOSE); if (tdp) *tdp = td; cpu_set_thread_handler(td, kthread_exit, func, arg); /* * Set up arg0 for 'ps' etc */ __va_start(ap, fmt); kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); __va_end(ap); td->td_ucred = crhold(proc0.p_ucred); /* * Schedule the thread to run */ lwkt_schedule(td); return 0; }
void ath_hal_printf(struct ath_hal *ah, const char* fmt, ...) { __va_list ap; __va_start(ap, fmt); ath_hal_vprintf(ah, fmt, ap); __va_end(ap); }
void kprintf0(const char *fmt, ...) { __va_list ap; __va_start(ap, fmt); kvcprintf(fmt, PCHAR_, NULL, 10, ap); __va_end(ap); }
void mpt_prtc(struct mpt_softc *mpt, const char *fmt, ...) { __va_list ap; __va_start(ap, fmt); kvprintf(fmt, ap); __va_end(ap); }
/* * Printing * * NOTE: We bypass subr_prf's cons_spin here by using our own putchar * function. */ void db_printf(const char *fmt, ...) { __va_list listp; __va_start(listp, fmt); kvcprintf (fmt, db_putchar, NULL, db_radix, listp); __va_end(listp); /* DELAY(100000);*/ }
void mpt_prt(struct mpt_softc *mpt, const char *fmt, ...) { __va_list ap; kprintf("%s: ", device_get_nameunit(mpt->dev)); __va_start(ap, fmt); kvprintf(fmt, ap); __va_end(ap); }
/* * Format the given arguments and append the resulting string to an sbuf. */ int sbuf_printf(struct sbuf *s, const char *fmt, ...) { __va_list ap; int result; __va_start(ap, fmt); result = sbuf_vprintf(s, fmt, ap); __va_end(ap); return (result); }
static int fct2 (int dummy, ...) { va_list argp; int ret = dummy; __va_start (argp, dummy); ret += fct1 (argp, SZ_ARGS); __va_end (argp); return ret; }
void hkprintf(const char *ctl, ...) { __va_list va; if (hammer_debug_debug) { __va_start(va, ctl); kvprintf(ctl, va); __va_end(va); } }
static int disk_debug(int level, char *fmt, ...) { __va_list ap; __va_start(ap, fmt); if (level <= disk_debug_enable) kvprintf(fmt, ap); __va_end(ap); return 0; }
/* * ppb_MS_init_msq() * * Initialize a microsequence - see macros in ppb_msq.h * */ int ppb_MS_init_msq(struct ppb_microseq *msq, int nbparam, ...) { int i; int param, ins, arg, type; __va_list p_list; __va_start(p_list, nbparam); for (i=0; i<nbparam; i++) { /* retrieve the parameter descriptor */ param = __va_arg(p_list, int); ins = MS_INS(param); arg = MS_ARG(param); type = MS_TYP(param); /* check the instruction position */ if (arg >= PPB_MS_MAXARGS) panic("%s: parameter out of range (0x%x)!", __func__, param); #if 0 kprintf("%s: param = %d, ins = %d, arg = %d, type = %d\n", __func__, param, ins, arg, type); #endif /* properly cast the parameter */ switch (type) { case MS_TYP_INT: msq[ins].arg[arg].i = __va_arg(p_list, int); break; case MS_TYP_CHA: msq[ins].arg[arg].i = (int)__va_arg(p_list, int); break; case MS_TYP_PTR: msq[ins].arg[arg].p = __va_arg(p_list, void *); break; case MS_TYP_FUN: msq[ins].arg[arg].f = __va_arg(p_list, void *); break; default: panic("%s: unknown parameter (0x%x)!", __func__, param); } } return (0); }
void DO_HALDEBUG(struct ath_hal *ah, u_int mask, const char* fmt, ...) { if ((mask == HAL_DEBUG_UNMASKABLE) || (ah != NULL && ah->ah_config.ah_debug & mask) || (ath_hal_debug & mask)) { __va_list ap; __va_start(ap, fmt); ath_hal_vprintf(ah, fmt, ap); __va_end(ap); } }
void db_iprintf(const char *fmt,...) { int i; __va_list listp; for (i = db_indent; i >= 8; i -= 8) db_printf("\t"); while (--i >= 0) db_printf(" "); __va_start(listp, fmt); kvcprintf (fmt, db_putchar, NULL, db_radix, listp); __va_end(listp); }
static void rndtest_report(struct rndtest_state *rsp, int failure, const char *fmt, ...) { char buf[80]; __va_list ap; if (rndtest_verbose == 0) return; if (!failure && rndtest_verbose == 1) /* don't report successes */ return; __va_start(ap, fmt); kvsnprintf(buf, sizeof (buf), fmt, ap); __va_end(ap); device_printf(rsp->rs_parent, "rndtest: %s\n", buf); }
/* * Same as kthread_create() but you can specify a custom stack size. */ int kthread_create_stk(void (*func)(void *), void *arg, struct thread **tdp, int stksize, const char *fmt, ...) { thread_t td; __va_list ap; td = lwkt_alloc_thread(NULL, stksize, -1, TDF_VERBOSE); if (tdp) *tdp = td; cpu_set_thread_handler(td, kthread_exit, func, arg); __va_start(ap, fmt); kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); __va_end(ap); lwkt_schedule(td); return 0; }
/* * Create a kernel process/thread/whatever. It shares it's address space * with proc0 - ie: kernel only. * * XXX only the SMB protocol uses this, we should convert this mess to a * pure thread when possible. */ int smb_kthread_create(void (*func)(void *), void *arg, struct proc **newpp, int flags, const char *fmt, ...) { int error; __va_list ap; struct proc *p2; struct lwp *lp2; error = fork1(&lwp0, RFMEM | RFFDG | RFPROC | flags, &p2); if (error) return error; /* save a global descriptor, if desired */ if (newpp != NULL) *newpp = p2; /* this is a non-swapped system process */ p2->p_flags |= P_SYSTEM; p2->p_sigacts->ps_flag |= PS_NOCLDWAIT; lp2 = ONLY_LWP_IN_PROC(p2); /* set up arg0 for 'ps', et al */ __va_start(ap, fmt); kvsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap); __va_end(ap); lp2->lwp_thread->td_ucred = crhold(proc0.p_ucred); /* call the processes' main()... */ cpu_set_fork_handler(lp2, (void (*)(void *, struct trapframe *))func, arg); start_forked_proc(&lwp0, p2); return 0; }
/* * Panic is called on unresolvable fatal errors. It prints "panic: mesg", * and then reboots. If we are called twice, then we avoid trying to sync * the disks as this often leads to recursive panics. */ void panic(const char *fmt, ...) { int bootopt, newpanic; globaldata_t gd = mycpu; thread_t td = gd->gd_curthread; __va_list ap; static char buf[256]; #ifdef SMP /* * If a panic occurs on multiple cpus before the first is able to * halt the other cpus, only one cpu is allowed to take the panic. * Attempt to be verbose about this situation but if the kprintf() * itself panics don't let us overrun the kernel stack. * * Be very nasty about descheduling our thread at the lowest * level possible in an attempt to freeze the thread without * inducing further panics. * * Bumping gd_trap_nesting_level will also bypass assertions in * lwkt_switch() and allow us to switch away even if we are a * FAST interrupt or IPI. * * The setting of panic_cpu_gd also determines how kprintf() * spin-locks itself. DDB can set panic_cpu_gd as well. */ for (;;) { globaldata_t xgd = panic_cpu_gd; /* * Someone else got the panic cpu */ if (xgd && xgd != gd) { crit_enter(); ++mycpu->gd_trap_nesting_level; if (mycpu->gd_trap_nesting_level < 25) { kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n", mycpu->gd_cpuid, td); } td->td_release = NULL; /* be a grinch */ for (;;) { lwkt_deschedule_self(td); lwkt_switch(); } /* NOT REACHED */ /* --mycpu->gd_trap_nesting_level */ /* crit_exit() */ } /* * Reentrant panic */ if (xgd && xgd == gd) break; /* * We got it */ if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd)) break; } #else panic_cpu_gd = gd; #endif /* * Try to get the system into a working state. Save information * we are about to destroy. */ kvcreinitspin(); if (panicstr == NULL) { bcopy(td->td_toks_array, panic_tokens, sizeof(panic_tokens)); panic_tokens_count = td->td_toks_stop - &td->td_toks_base; } lwkt_relalltokens(td); td->td_toks_stop = &td->td_toks_base; /* * Setup */ bootopt = RB_AUTOBOOT | RB_DUMP; if (sync_on_panic == 0) bootopt |= RB_NOSYNC; newpanic = 0; if (panicstr) { bootopt |= RB_NOSYNC; } else { panicstr = fmt; newpanic = 1; } /* * Format the panic string. */ __va_start(ap, fmt); kvsnprintf(buf, sizeof(buf), fmt, ap); if (panicstr == fmt) panicstr = buf; __va_end(ap); kprintf("panic: %s\n", buf); #ifdef SMP /* two separate prints in case of an unmapped page and trap */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); #endif #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC) led_switch("error", 1); #endif #if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE) wdog_disable(); #endif /* * Enter the debugger or fall through & dump. Entering the * debugger will stop cpus. If not entering the debugger stop * cpus here. */ #if defined(DDB) if (newpanic && trace_on_panic) print_backtrace(-1); if (debugger_on_panic) Debugger("panic"); else #endif #ifdef SMP if (newpanic) stop_cpus(mycpu->gd_other_cpus); #else ; #endif boot(bootopt); }
void test___va_start_ignore_const(const char *format, ...) { va_list args; ((void)(__va_start(&args, (&const_cast<char &>(reinterpret_cast<const volatile char &>(format))), ((sizeof(format) + 4 - 1) & ~(4 - 1)), (&const_cast<char &>(reinterpret_cast<const volatile char &>(format)))))); }