void panicsys(const char *format, va_list alist, struct regs *rp, int on_panic_stack) { int s = spl8(); kthread_t *t = curthread; cpu_t *cp = CPU; caddr_t intr_stack = NULL; uint_t intr_actv; ushort_t schedflag = t->t_schedflag; cpu_t *bound_cpu = t->t_bound_cpu; char preempt = t->t_preempt; (void) setjmp(&t->t_pcb); t->t_flag |= T_PANIC; t->t_schedflag |= TS_DONT_SWAP; t->t_bound_cpu = cp; t->t_preempt++; /* * Switch lbolt to event driven mode. */ lbolt_hybrid = lbolt_event_driven; panic_enter_hw(s); /* * If we're on the interrupt stack and an interrupt thread is available * in this CPU's pool, preserve the interrupt stack by detaching an * interrupt thread and making its stack the intr_stack. */ if (CPU_ON_INTR(cp) && cp->cpu_intr_thread != NULL) { kthread_t *it = cp->cpu_intr_thread; intr_stack = cp->cpu_intr_stack; intr_actv = cp->cpu_intr_actv; cp->cpu_intr_stack = thread_stk_init(it->t_stk); cp->cpu_intr_thread = it->t_link; /* * Clear only the high level bits of cpu_intr_actv. * We want to indicate that high-level interrupts are * not active without destroying the low-level interrupt * information stored there. */ cp->cpu_intr_actv &= ((1 << (LOCK_LEVEL + 1)) - 1); } /* * Record one-time panic information and quiesce the other CPUs. * Then print out the panic message and stack trace. */ if (on_panic_stack) { panic_data_t *pdp = (panic_data_t *)panicbuf; pdp->pd_version = PANICBUFVERS; pdp->pd_msgoff = sizeof (panic_data_t) - sizeof (panic_nv_t); if (t->t_panic_trap != NULL) panic_savetrap(pdp, t->t_panic_trap); else panic_saveregs(pdp, rp); (void) vsnprintf(&panicbuf[pdp->pd_msgoff], PANICBUFSIZE - pdp->pd_msgoff, format, alist); /* * Call into the platform code to stop the other CPUs. * We currently have all interrupts blocked, and expect that * the platform code will lower ipl only as far as needed to * perform cross-calls, and will acquire as *few* locks as is * possible -- panicstr is not set so we can still deadlock. */ panic_stopcpus(cp, t, s); panicstr = (char *)format; va_copy(panicargs, alist); panic_lbolt = LBOLT_NO_ACCOUNT; panic_lbolt64 = LBOLT_NO_ACCOUNT64; panic_hrestime = hrestime; panic_hrtime = gethrtime_waitfree(); panic_thread = t; panic_regs = t->t_pcb; panic_reg = rp; panic_cpu = *cp; panic_ipl = spltoipl(s); panic_schedflag = schedflag; panic_bound_cpu = bound_cpu; panic_preempt = preempt; if (intr_stack != NULL) { panic_cpu.cpu_intr_stack = intr_stack; panic_cpu.cpu_intr_actv = intr_actv; } /* * Lower ipl to 10 to keep clock() from running, but allow * keyboard interrupts to enter the debugger. These callbacks * are executed with panicstr set so they can bypass locks. */ splx(ipltospl(CLOCK_LEVEL)); panic_quiesce_hw(pdp); (void) FTRACE_STOP(); (void) callb_execute_class(CB_CL_PANIC, NULL); if (log_intrq != NULL) log_flushq(log_intrq); /* * If log_consq has been initialized and syslogd has started, * print any messages in log_consq that haven't been consumed. */ if (log_consq != NULL && log_consq != log_backlogq) log_printq(log_consq); fm_banner(); #if defined(__x86) /* * A hypervisor panic originates outside of Solaris, so we * don't want to prepend the panic message with misleading * pointers from within Solaris. */ if (!IN_XPV_PANIC()) #endif printf("\n\rpanic[cpu%d]/thread=%p: ", cp->cpu_id, (void *)t); vprintf(format, alist); printf("\n\n"); if (t->t_panic_trap != NULL) { panic_showtrap(t->t_panic_trap); printf("\n"); } traceregs(rp); printf("\n"); if (((boothowto & RB_DEBUG) || obpdebug) && !nopanicdebug && !panic_forced) { if (dumpvp != NULL) { debug_enter("panic: entering debugger " "(continue to save dump)"); } else { debug_enter("panic: entering debugger " "(no dump device, continue to reboot)"); } } } else if (panic_dump != 0 || panic_sync != 0 || panicstr != NULL) { printf("\n\rpanic[cpu%d]/thread=%p: ", cp->cpu_id, (void *)t); vprintf(format, alist); printf("\n"); } else goto spin; /* * Prior to performing sync or dump, we make sure that do_polled_io is * set, but we'll leave ipl at 10; deadman(), a CY_HIGH_LEVEL cyclic, * will re-enter panic if we are not making progress with sync or dump. */ /* * Sync the filesystems. Reset t_cred if not set because much of * the filesystem code depends on CRED() being valid. */ if (!in_sync && panic_trigger(&panic_sync)) { if (t->t_cred == NULL) t->t_cred = kcred; splx(ipltospl(CLOCK_LEVEL)); do_polled_io = 1; vfs_syncall(); } /* * Take the crash dump. If the dump trigger is already set, try to * enter the debugger again before rebooting the system. */ if (panic_trigger(&panic_dump)) { panic_dump_hw(s); splx(ipltospl(CLOCK_LEVEL)); errorq_panic(); do_polled_io = 1; dumpsys(); } else if (((boothowto & RB_DEBUG) || obpdebug) && !nopanicdebug) { debug_enter("panic: entering debugger (continue to reboot)"); } else printf("dump aborted: please record the above information!\n"); if (halt_on_panic) mdboot(A_REBOOT, AD_HALT, NULL, B_FALSE); else mdboot(A_REBOOT, panic_bootfcn, panic_bootstr, B_FALSE); spin: /* * Restore ipl to at most CLOCK_LEVEL so we don't end up spinning * and unable to jump into the debugger. */ splx(MIN(s, ipltospl(CLOCK_LEVEL))); for (;;) ; }
/*ARGSUSED*/ void mdboot(int cmd, int fcn, char *mdep, boolean_t invoke_cb) { processorid_t bootcpuid = 0; static int is_first_quiesce = 1; static int is_first_reset = 1; int reset_status = 0; static char fallback_str[] = "Falling back to regular reboot.\n"; if (fcn == AD_FASTREBOOT && !newkernel.fi_valid) fcn = AD_BOOT; if (!panicstr) { kpreempt_disable(); if (fcn == AD_FASTREBOOT) { mutex_enter(&cpu_lock); if (CPU_ACTIVE(cpu_get(bootcpuid))) { affinity_set(bootcpuid); } mutex_exit(&cpu_lock); } else { affinity_set(CPU_CURRENT); } } if (force_shutdown_method != AD_UNKNOWN) fcn = force_shutdown_method; /* * XXX - rconsvp is set to NULL to ensure that output messages * are sent to the underlying "hardware" device using the * monitor's printf routine since we are in the process of * either rebooting or halting the machine. */ rconsvp = NULL; /* * Print the reboot message now, before pausing other cpus. * There is a race condition in the printing support that * can deadlock multiprocessor machines. */ if (!(fcn == AD_HALT || fcn == AD_POWEROFF)) prom_printf("rebooting...\n"); if (IN_XPV_PANIC()) reset(); /* * We can't bring up the console from above lock level, so do it now */ pm_cfb_check_and_powerup(); /* make sure there are no more changes to the device tree */ devtree_freeze(); if (invoke_cb) (void) callb_execute_class(CB_CL_MDBOOT, NULL); /* * Clear any unresolved UEs from memory. */ page_retire_mdboot(); #if defined(__xpv) /* * XXPV Should probably think some more about how we deal * with panicing before it's really safe to panic. * On hypervisors, we reboot very quickly.. Perhaps panic * should only attempt to recover by rebooting if, * say, we were able to mount the root filesystem, * or if we successfully launched init(1m). */ if (panicstr && proc_init == NULL) (void) HYPERVISOR_shutdown(SHUTDOWN_poweroff); #endif /* * stop other cpus and raise our priority. since there is only * one active cpu after this, and our priority will be too high * for us to be preempted, we're essentially single threaded * from here on out. */ (void) spl6(); if (!panicstr) { mutex_enter(&cpu_lock); pause_cpus(NULL, NULL); mutex_exit(&cpu_lock); } /* * If the system is panicking, the preloaded kernel is valid, and * fastreboot_onpanic has been set, and the system has been up for * longer than fastreboot_onpanic_uptime (default to 10 minutes), * choose Fast Reboot. */ if (fcn == AD_BOOT && panicstr && newkernel.fi_valid && fastreboot_onpanic && (panic_lbolt - lbolt_at_boot) > fastreboot_onpanic_uptime) { fcn = AD_FASTREBOOT; } /* * Try to quiesce devices. */ if (is_first_quiesce) { /* * Clear is_first_quiesce before calling quiesce_devices() * so that if quiesce_devices() causes panics, it will not * be invoked again. */ is_first_quiesce = 0; quiesce_active = 1; quiesce_devices(ddi_root_node(), &reset_status); if (reset_status == -1) { if (fcn == AD_FASTREBOOT && !force_fastreboot) { prom_printf("Driver(s) not capable of fast " "reboot.\n"); prom_printf(fallback_str); fastreboot_capable = 0; fcn = AD_BOOT; } else if (fcn != AD_FASTREBOOT) fastreboot_capable = 0; } quiesce_active = 0; } /* * Try to reset devices. reset_leaves() should only be called * a) when there are no other threads that could be accessing devices, * and * b) on a system that's not capable of fast reboot (fastreboot_capable * being 0), or on a system where quiesce_devices() failed to * complete (quiesce_active being 1). */ if (is_first_reset && (!fastreboot_capable || quiesce_active)) { /* * Clear is_first_reset before calling reset_devices() * so that if reset_devices() causes panics, it will not * be invoked again. */ is_first_reset = 0; reset_leaves(); } /* Verify newkernel checksum */ if (fastreboot_capable && fcn == AD_FASTREBOOT && fastboot_cksum_verify(&newkernel) != 0) { fastreboot_capable = 0; prom_printf("Fast reboot: checksum failed for the new " "kernel.\n"); prom_printf(fallback_str); } (void) spl8(); if (fastreboot_capable && fcn == AD_FASTREBOOT) { /* * psm_shutdown is called within fast_reboot() */ fast_reboot(); } else { (*psm_shutdownf)(cmd, fcn); if (fcn == AD_HALT || fcn == AD_POWEROFF) halt((char *)NULL); else prom_reboot(""); } /*NOTREACHED*/ }
/*ARGSUSED*/ int suspend_start(char *error_reason, size_t max_reason_len) { uint64_t source_tick; uint64_t source_stick; uint64_t rv; timestruc_t source_tod; int spl; ASSERT(suspend_supported()); DBG("suspend: %s", __func__); sfmmu_ctxdoms_lock(); mutex_enter(&cpu_lock); /* Suspend the watchdog */ watchdog_suspend(); /* Record the TOD */ mutex_enter(&tod_lock); source_tod = tod_get(); mutex_exit(&tod_lock); /* Pause all other CPUs */ pause_cpus(NULL); DBG_PROM("suspend: CPUs paused\n"); /* Suspend cyclics */ cyclic_suspend(); DBG_PROM("suspend: cyclics suspended\n"); /* Disable interrupts */ spl = spl8(); DBG_PROM("suspend: spl8()\n"); source_tick = gettick_counter(); source_stick = gettick(); DBG_PROM("suspend: source_tick: 0x%lx\n", source_tick); DBG_PROM("suspend: source_stick: 0x%lx\n", source_stick); /* * Call into the HV to initiate the suspend. hv_guest_suspend() * returns after the guest has been resumed or if the suspend * operation failed or was cancelled. After a successful suspend, * the %tick and %stick registers may have changed by an amount * that is not proportional to the amount of time that has passed. * They may have jumped forwards or backwards. Some variation is * allowed and accounted for using suspend_tick_stick_max_delta, * but otherwise this jump must be uniform across all CPUs and we * operate under the assumption that it is (maintaining two global * offset variables--one for %tick and one for %stick.) */ DBG_PROM("suspend: suspending... \n"); rv = hv_guest_suspend(); if (rv != 0) { splx(spl); cyclic_resume(); start_cpus(); watchdog_resume(); mutex_exit(&cpu_lock); sfmmu_ctxdoms_unlock(); DBG("suspend: failed, rv: %ld\n", rv); return (rv); } suspend_count++; /* Update the global tick and stick offsets and the preserved TOD */ set_tick_offsets(source_tick, source_stick, &source_tod); /* Ensure new offsets are globally visible before resuming CPUs */ membar_sync(); /* Enable interrupts */ splx(spl); /* Set the {%tick,%stick}.NPT bits on all CPUs */ if (enable_user_tick_stick_emulation) { xc_all((xcfunc_t *)enable_tick_stick_npt, NULL, NULL); xt_sync(cpu_ready_set); ASSERT(gettick_npt() != 0); ASSERT(getstick_npt() != 0); } /* If emulation is enabled, but not currently active, enable it */ if (enable_user_tick_stick_emulation && !tick_stick_emulation_active) { tick_stick_emulation_active = B_TRUE; } sfmmu_ctxdoms_remove(); /* Resume cyclics, unpause CPUs */ cyclic_resume(); start_cpus(); /* Set the TOD */ mutex_enter(&tod_lock); tod_set(source_tod); mutex_exit(&tod_lock); /* Re-enable the watchdog */ watchdog_resume(); mutex_exit(&cpu_lock); /* Download the latest MD */ if ((rv = mach_descrip_update()) != 0) cmn_err(CE_PANIC, "suspend: mach_descrip_update failed: %ld", rv); sfmmu_ctxdoms_update(); sfmmu_ctxdoms_unlock(); /* Get new MD, update CPU mappings/relationships */ if (suspend_update_cpu_mappings) update_cpu_mappings(); DBG("suspend: target tick: 0x%lx", gettick_counter()); DBG("suspend: target stick: 0x%llx", gettick()); DBG("suspend: user %%tick/%%stick emulation is %d", tick_stick_emulation_active); DBG("suspend: finished"); return (0); }