void cpu_reset(void) { #ifdef SMP if (smp_active_mask == 1) { cpu_reset_real(); /* NOTREACHED */ } else { cpumask_t map; int cnt; kprintf("cpu_reset called on cpu#%d\n",mycpu->gd_cpuid); map = mycpu->gd_other_cpus & ~stopped_cpus & smp_active_mask; if (map != 0) { kprintf("cpu_reset: Stopping other CPUs\n"); stop_cpus(map); /* Stop all other CPUs */ } if (mycpu->gd_cpuid == 0) { DELAY(1000000); cpu_reset_real(); /* NOTREACHED */ } else { /* We are not BSP (CPU #0) */ cpu_reset_proxyid = mycpu->gd_cpuid; cpustop_restartfunc = cpu_reset_proxy; kprintf("cpu_reset: Restarting BSP\n"); started_cpus = (1<<0); /* Restart CPU #0 */ cnt = 0; while (cpu_reset_proxy_active == 0 && cnt < 10000000) cnt++; /* Wait for BSP to announce restart */ if (cpu_reset_proxy_active == 0) kprintf("cpu_reset: Failed to restart BSP\n"); __asm __volatile("cli" : : : "memory"); cpu_reset_proxy_active = 2; cnt = 0; while (cpu_reset_proxy_active == 2 && cnt < 10000000) cnt++; /* Do nothing */ if (cpu_reset_proxy_active == 2) { kprintf("cpu_reset: BSP did not grab mp lock\n"); cpu_reset_real(); /* XXX: Bogus ? */ } cpu_reset_proxy_active = 4; __asm __volatile("sti" : : : "memory"); while (1); /* NOTREACHED */ } } #else cpu_reset_real(); #endif }
static void cpu_reset_proxy(void) { cpu_reset_proxy_active = 1; while (cpu_reset_proxy_active == 1) ; /* Wait for other cpu to disable interupts */ kprintf("cpu_reset_proxy: Grabbed mp lock for BSP\n"); cpu_reset_proxy_active = 3; while (cpu_reset_proxy_active == 3) ; /* Wait for other cpu to enable interrupts */ stop_cpus(CPUMASK(cpu_reset_proxyid)); kprintf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid); DELAY(1000000); cpu_reset_real(); }
static void cpu_reset_proxy() { cpuset_t tcrp; cpu_reset_proxy_active = 1; while (cpu_reset_proxy_active == 1) ia32_pause(); /* Wait for other cpu to see that we've started */ CPU_SETOF(cpu_reset_proxyid, &tcrp); stop_cpus(tcrp); printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid); DELAY(1000000); cpu_reset_real(); }
void cpu_reset() { #ifdef SMP cpuset_t map; u_int cnt; if (smp_started) { map = all_cpus; CPU_CLR(PCPU_GET(cpuid), &map); CPU_NAND(&map, &stopped_cpus); if (!CPU_EMPTY(&map)) { printf("cpu_reset: Stopping other CPUs\n"); stop_cpus(map); } if (PCPU_GET(cpuid) != 0) { cpu_reset_proxyid = PCPU_GET(cpuid); cpustop_restartfunc = cpu_reset_proxy; cpu_reset_proxy_active = 0; printf("cpu_reset: Restarting BSP\n"); /* Restart CPU #0. */ CPU_SETOF(0, &started_cpus); wmb(); cnt = 0; while (cpu_reset_proxy_active == 0 && cnt < 10000000) { ia32_pause(); cnt++; /* Wait for BSP to announce restart */ } if (cpu_reset_proxy_active == 0) printf("cpu_reset: Failed to restart BSP\n"); enable_intr(); cpu_reset_proxy_active = 2; while (1) ia32_pause(); /* NOTREACHED */ } DELAY(1000000); } #endif cpu_reset_real(); /* NOTREACHED */ }
/* * Panic is called on unresolvable fatal errors. It prints "panic: mesg", * and then reboots. If we are called twice, then we avoid trying to sync * the disks as this often leads to recursive panics. */ void panic(const char *fmt, ...) { int bootopt, newpanic; globaldata_t gd = mycpu; thread_t td = gd->gd_curthread; __va_list ap; static char buf[256]; #ifdef SMP /* * If a panic occurs on multiple cpus before the first is able to * halt the other cpus, only one cpu is allowed to take the panic. * Attempt to be verbose about this situation but if the kprintf() * itself panics don't let us overrun the kernel stack. * * Be very nasty about descheduling our thread at the lowest * level possible in an attempt to freeze the thread without * inducing further panics. * * Bumping gd_trap_nesting_level will also bypass assertions in * lwkt_switch() and allow us to switch away even if we are a * FAST interrupt or IPI. * * The setting of panic_cpu_gd also determines how kprintf() * spin-locks itself. DDB can set panic_cpu_gd as well. */ for (;;) { globaldata_t xgd = panic_cpu_gd; /* * Someone else got the panic cpu */ if (xgd && xgd != gd) { crit_enter(); ++mycpu->gd_trap_nesting_level; if (mycpu->gd_trap_nesting_level < 25) { kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n", mycpu->gd_cpuid, td); } td->td_release = NULL; /* be a grinch */ for (;;) { lwkt_deschedule_self(td); lwkt_switch(); } /* NOT REACHED */ /* --mycpu->gd_trap_nesting_level */ /* crit_exit() */ } /* * Reentrant panic */ if (xgd && xgd == gd) break; /* * We got it */ if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd)) break; } #else panic_cpu_gd = gd; #endif /* * Try to get the system into a working state. Save information * we are about to destroy. */ kvcreinitspin(); if (panicstr == NULL) { bcopy(td->td_toks_array, panic_tokens, sizeof(panic_tokens)); panic_tokens_count = td->td_toks_stop - &td->td_toks_base; } lwkt_relalltokens(td); td->td_toks_stop = &td->td_toks_base; /* * Setup */ bootopt = RB_AUTOBOOT | RB_DUMP; if (sync_on_panic == 0) bootopt |= RB_NOSYNC; newpanic = 0; if (panicstr) { bootopt |= RB_NOSYNC; } else { panicstr = fmt; newpanic = 1; } /* * Format the panic string. */ __va_start(ap, fmt); kvsnprintf(buf, sizeof(buf), fmt, ap); if (panicstr == fmt) panicstr = buf; __va_end(ap); kprintf("panic: %s\n", buf); #ifdef SMP /* two separate prints in case of an unmapped page and trap */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); #endif #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC) led_switch("error", 1); #endif #if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE) wdog_disable(); #endif /* * Enter the debugger or fall through & dump. Entering the * debugger will stop cpus. If not entering the debugger stop * cpus here. */ #if defined(DDB) if (newpanic && trace_on_panic) print_backtrace(-1); if (debugger_on_panic) Debugger("panic"); else #endif #ifdef SMP if (newpanic) stop_cpus(mycpu->gd_other_cpus); #else ; #endif boot(bootopt); }
/* * kdb_trap - field a TRACE or BPT trap */ int kdb_trap(int type, int code, struct x86_64_saved_state *regs) { volatile int ddb_mode = !(boothowto & RB_GDB); /* * XXX try to do nothing if the console is in graphics mode. * Handle trace traps (and hardware breakpoints...) by ignoring * them except for forgetting about them. Return 0 for other * traps to say that we haven't done anything. The trap handler * will usually panic. We should handle breakpoint traps for * our breakpoints by disarming our breakpoints and fixing up * %eip. */ if (cons_unavail && ddb_mode) { if (type == T_TRCTRAP) { regs->tf_rflags &= ~PSL_T; return (1); } return (0); } switch (type) { case T_BPTFLT: /* breakpoint */ case T_TRCTRAP: /* debug exception */ break; default: /* * XXX this is almost useless now. In most cases, * trap_fatal() has already printed a much more verbose * message. However, it is dangerous to print things in * trap_fatal() - kprintf() might be reentered and trap. * The debugger should be given control first. */ if (ddb_mode) db_printf("kernel: type %d trap, code=%x\n", type, code); if (db_nofault) { jmp_buf *no_fault = db_nofault; db_nofault = NULL; longjmp(*no_fault, 1); } } /* * This handles unexpected traps in ddb commands, including calls to * non-ddb functions. db_nofault only applies to memory accesses by * internal ddb commands. */ if (db_global_jmpbuf_valid) longjmp(db_global_jmpbuf, 1); /* * XXX We really should switch to a local stack here. */ ddb_regs = *regs; crit_enter(); db_printf("\nCPU%d stopping CPUs: 0x%016jx\n", mycpu->gd_cpuid, (uintmax_t)CPUMASK_LOWMASK(mycpu->gd_other_cpus)); /* We stop all CPUs except ourselves (obviously) */ stop_cpus(mycpu->gd_other_cpus); db_printf(" stopped\n"); setjmp(db_global_jmpbuf); db_global_jmpbuf_valid = TRUE; db_active++; vcons_set_mode(1); if (ddb_mode) { cndbctl(TRUE); db_trap(type, code); cndbctl(FALSE); } else gdb_handle_exception(&ddb_regs, type, code); db_active--; vcons_set_mode(0); db_global_jmpbuf_valid = FALSE; db_printf("\nCPU%d restarting CPUs: 0x%016jx\n", mycpu->gd_cpuid, (uintmax_t)CPUMASK_LOWMASK(stopped_cpus)); /* Restart all the CPUs we previously stopped */ if (CPUMASK_CMPMASKNEQ(stopped_cpus, mycpu->gd_other_cpus)) { db_printf("whoa, other_cpus: 0x%016jx, " "stopped_cpus: 0x%016jx\n", (uintmax_t)CPUMASK_LOWMASK(mycpu->gd_other_cpus), (uintmax_t)CPUMASK_LOWMASK(stopped_cpus)); panic("stop_cpus() failed"); } restart_cpus(stopped_cpus); db_printf(" restarted\n"); crit_exit(); regs->tf_rip = ddb_regs.tf_rip; regs->tf_rflags = ddb_regs.tf_rflags; regs->tf_rax = ddb_regs.tf_rax; regs->tf_rcx = ddb_regs.tf_rcx; regs->tf_rdx = ddb_regs.tf_rdx; regs->tf_rbx = ddb_regs.tf_rbx; regs->tf_rsp = ddb_regs.tf_rsp; regs->tf_ss = ddb_regs.tf_ss & 0xffff; regs->tf_rbp = ddb_regs.tf_rbp; regs->tf_rsi = ddb_regs.tf_rsi; regs->tf_rdi = ddb_regs.tf_rdi; regs->tf_r8 = ddb_regs.tf_r8; regs->tf_r9 = ddb_regs.tf_r9; regs->tf_r10 = ddb_regs.tf_r10; regs->tf_r11 = ddb_regs.tf_r11; regs->tf_r12 = ddb_regs.tf_r12; regs->tf_r13 = ddb_regs.tf_r13; regs->tf_r14 = ddb_regs.tf_r14; regs->tf_r15 = ddb_regs.tf_r15; /* regs->tf_es = ddb_regs.tf_es & 0xffff; */ /* regs->tf_fs = ddb_regs.tf_fs & 0xffff; */ /* regs->tf_gs = ddb_regs.tf_gs & 0xffff; */ regs->tf_cs = ddb_regs.tf_cs & 0xffff; /* regs->tf_ds = ddb_regs.tf_ds & 0xffff; */ return (1); }
/* * kdb_trap - field a TRACE or BPT trap */ int kdb_trap(int type, int code, struct i386_saved_state *regs) { volatile int ddb_mode = !(boothowto & RB_GDB); /* * XXX try to do nothing if the console is in graphics mode. * Handle trace traps (and hardware breakpoints...) by ignoring * them except for forgetting about them. Return 0 for other * traps to say that we haven't done anything. The trap handler * will usually panic. We should handle breakpoint traps for * our breakpoints by disarming our breakpoints and fixing up * %eip. */ if (cons_unavail && ddb_mode) { if (type == T_TRCTRAP) { regs->tf_eflags &= ~PSL_T; return (1); } return (0); } switch (type) { case T_BPTFLT: /* breakpoint */ case T_TRCTRAP: /* debug exception */ break; default: /* * XXX this is almost useless now. In most cases, * trap_fatal() has already printed a much more verbose * message. However, it is dangerous to print things in * trap_fatal() - kprintf() might be reentered and trap. * The debugger should be given control first. */ if (ddb_mode) db_printf("kernel: type %d trap, code=%x\n", type, code); if (db_nofault) { jmp_buf *no_fault = db_nofault; db_nofault = NULL; longjmp(*no_fault, 1); } } /* * This handles unexpected traps in ddb commands, including calls to * non-ddb functions. db_nofault only applies to memory accesses by * internal ddb commands. */ if (db_global_jmpbuf_valid) longjmp(db_global_jmpbuf, 1); /* * XXX We really should switch to a local stack here. */ ddb_regs = *regs; /* * If in kernel mode, esp and ss are not saved, so dummy them up. */ if (ISPL(regs->tf_cs) == 0) { ddb_regs.tf_esp = (int)®s->tf_esp; ddb_regs.tf_ss = rss(); } crit_enter(); #ifdef SMP db_printf("\nCPU%d stopping CPUs: 0x%08x\n", mycpu->gd_cpuid, mycpu->gd_other_cpus); /* We stop all CPUs except ourselves (obviously) */ stop_cpus(mycpu->gd_other_cpus); db_printf(" stopped\n"); #endif /* SMP */ setjmp(db_global_jmpbuf); db_global_jmpbuf_valid = TRUE; db_active++; if (ddb_mode) { cndbctl(TRUE); db_trap(type, code); cndbctl(FALSE); } else gdb_handle_exception(&ddb_regs, type, code); db_active--; db_global_jmpbuf_valid = FALSE; #ifdef SMP db_printf("\nCPU%d restarting CPUs: 0x%08x\n", mycpu->gd_cpuid, stopped_cpus); /* Restart all the CPUs we previously stopped */ if (stopped_cpus != mycpu->gd_other_cpus) { db_printf("whoa, other_cpus: 0x%08x, stopped_cpus: 0x%08x\n", mycpu->gd_other_cpus, stopped_cpus); panic("stop_cpus() failed"); } restart_cpus(stopped_cpus); db_printf(" restarted\n"); #endif /* SMP */ crit_exit(); regs->tf_eip = ddb_regs.tf_eip; regs->tf_eflags = ddb_regs.tf_eflags; regs->tf_eax = ddb_regs.tf_eax; regs->tf_ecx = ddb_regs.tf_ecx; regs->tf_edx = ddb_regs.tf_edx; regs->tf_ebx = ddb_regs.tf_ebx; /* * If in user mode, the saved ESP and SS were valid, restore them. */ if (ISPL(regs->tf_cs)) { regs->tf_esp = ddb_regs.tf_esp; regs->tf_ss = ddb_regs.tf_ss & 0xffff; } regs->tf_ebp = ddb_regs.tf_ebp; regs->tf_esi = ddb_regs.tf_esi; regs->tf_edi = ddb_regs.tf_edi; regs->tf_es = ddb_regs.tf_es & 0xffff; regs->tf_fs = ddb_regs.tf_fs & 0xffff; regs->tf_gs = ddb_regs.tf_gs & 0xffff; regs->tf_cs = ddb_regs.tf_cs & 0xffff; regs->tf_ds = ddb_regs.tf_ds & 0xffff; return (1); }
/* Full PV mode suspension. */ static void xctrl_suspend() { int i, j, k, fpp, suspend_cancelled; unsigned long max_pfn, start_info_mfn; EVENTHANDLER_INVOKE(power_suspend); #ifdef SMP struct thread *td; cpuset_t map; u_int cpuid; /* * Bind us to CPU 0 and stop any other VCPUs. */ td = curthread; thread_lock(td); sched_bind(td, 0); thread_unlock(td); cpuid = PCPU_GET(cpuid); KASSERT(cpuid == 0, ("xen_suspend: not running on cpu 0")); map = all_cpus; CPU_CLR(cpuid, &map); CPU_NAND(&map, &stopped_cpus); if (!CPU_EMPTY(&map)) stop_cpus(map); #endif /* * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE * drivers need this. */ mtx_lock(&Giant); if (DEVICE_SUSPEND(root_bus) != 0) { mtx_unlock(&Giant); printf("%s: device_suspend failed\n", __func__); #ifdef SMP if (!CPU_EMPTY(&map)) restart_cpus(map); #endif return; } mtx_unlock(&Giant); local_irq_disable(); xencons_suspend(); gnttab_suspend(); intr_suspend(); max_pfn = HYPERVISOR_shared_info->arch.max_pfn; void *shared_info = HYPERVISOR_shared_info; HYPERVISOR_shared_info = NULL; pmap_kremove((vm_offset_t) shared_info); PT_UPDATES_FLUSH(); xen_start_info->store_mfn = MFNTOPFN(xen_start_info->store_mfn); xen_start_info->console.domU.mfn = MFNTOPFN(xen_start_info->console.domU.mfn); /* * We'll stop somewhere inside this hypercall. When it returns, * we'll start resuming after the restore. */ start_info_mfn = VTOMFN(xen_start_info); pmap_suspend(); suspend_cancelled = HYPERVISOR_suspend(start_info_mfn); pmap_resume(); pmap_kenter_ma((vm_offset_t) shared_info, xen_start_info->shared_info); HYPERVISOR_shared_info = shared_info; HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = VTOMFN(xen_pfn_to_mfn_frame_list_list); fpp = PAGE_SIZE/sizeof(unsigned long); for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) { if ((j % fpp) == 0) { k++; xen_pfn_to_mfn_frame_list_list[k] = VTOMFN(xen_pfn_to_mfn_frame_list[k]); j = 0; } xen_pfn_to_mfn_frame_list[k][j] = VTOMFN(&xen_phys_machine[i]); } HYPERVISOR_shared_info->arch.max_pfn = max_pfn; gnttab_resume(); intr_resume(suspend_cancelled != 0); local_irq_enable(); xencons_resume(); #ifdef CONFIG_SMP for_each_cpu(i) vcpu_prepare(i); #endif /* * Only resume xenbus /after/ we've prepared our VCPUs; otherwise * the VCPU hotplug callback can race with our vcpu_prepare */ mtx_lock(&Giant); DEVICE_RESUME(root_bus); mtx_unlock(&Giant); #ifdef SMP thread_lock(curthread); sched_unbind(curthread); thread_unlock(curthread); if (!CPU_EMPTY(&map)) restart_cpus(map); #endif EVENTHANDLER_INVOKE(power_resume); }
/* * ddb_trap - field a kernel trap */ int kdb_trap(int vector, struct trapframe *regs) { int ddb_mode = !(boothowto & RB_GDB); register_t s; /* * Don't bother checking for usermode, since a benign entry * by the kernel (call to Debugger() or a breakpoint) has * already checked for usermode. If neither of those * conditions exist, something Bad has happened. */ if (vector != IA64_VEC_BREAK && vector != IA64_VEC_SINGLE_STEP_TRAP) { #if 0 if (ddb_mode) { db_printf("ddbprinttrap from 0x%lx\n", /* XXX */ regs->tf_regs[FRAME_PC]); ddbprinttrap(a0, a1, a2, entry); /* * Tell caller "We did NOT handle the trap." * Caller should panic, or whatever. */ return (0); } #endif if (db_nofault) { jmp_buf *no_fault = db_nofault; db_nofault = 0; longjmp(*no_fault, 1); } } /* * XXX Should switch to DDB's own stack, here. */ s = intr_disable(); #ifdef SMP #ifdef CPUSTOP_ON_DDBBREAK #if defined(VERBOSE_CPUSTOP_ON_DDBBREAK) db_printf("CPU%d stopping CPUs: 0x%08x...", PCPU_GET(cpuid), PCPU_GET(other_cpus)); #endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */ /* We stop all CPUs except ourselves (obviously) */ stop_cpus(PCPU_GET(other_cpus)); #if defined(VERBOSE_CPUSTOP_ON_DDBBREAK) db_printf(" stopped.\n"); #endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */ #endif /* CPUSTOP_ON_DDBBREAK */ #endif /* SMP */ ddb_regs = *regs; /* * XXX pretend that registers outside the current frame don't exist. */ db_eregs = db_regs + DB_MISC_REGS + 8 + 32 + (ddb_regs.tf_cr_ifs & 0x7f); __asm __volatile("flushrs"); /* so we can look at them */ db_active++; if (ddb_mode) { cndbctl(TRUE); /* DDB active, unblank video */ db_trap(vector, 0); /* Where the work happens */ cndbctl(FALSE); /* DDB inactive */ } else gdb_handle_exception(&ddb_regs, vector); db_active--; #ifdef SMP #ifdef CPUSTOP_ON_DDBBREAK #if defined(VERBOSE_CPUSTOP_ON_DDBBREAK) db_printf("CPU%d restarting CPUs: 0x%08x...", PCPU_GET(cpuid), stopped_cpus); #endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */ /* Restart all the CPUs we previously stopped */ if (stopped_cpus != PCPU_GET(other_cpus) && smp_started != 0) { db_printf("whoa, other_cpus: 0x%08x, stopped_cpus: 0x%08x\n", PCPU_GET(other_cpus), stopped_cpus); panic("stop_cpus() failed"); } restart_cpus(stopped_cpus); #if defined(VERBOSE_CPUSTOP_ON_DDBBREAK) db_printf(" restarted.\n"); #endif /* VERBOSE_CPUSTOP_ON_DDBBREAK */ #endif /* CPUSTOP_ON_DDBBREAK */ #endif /* SMP */ *regs = ddb_regs; intr_restore(s); /* * Tell caller "We HAVE handled the trap." */ return (1); }