int doadump(boolean_t textdump) { boolean_t coredump; int error; error = 0; if (dumping) return (EBUSY); if (dumper.dumper == NULL) return (ENXIO); savectx(&dumppcb); dumptid = curthread->td_tid; dumping++; coredump = TRUE; #ifdef DDB if (textdump && textdump_pending) { coredump = FALSE; textdump_dumpsys(&dumper); } #endif if (coredump) error = dumpsys(&dumper); dumping--; return (error); }
/* * Finish a fork operation, with process p2 nearly set up. * Copy and update the kernel stack and pcb, making the child * ready to run, and marking it so that it can return differently * than the parent. Returns 1 in the child process, 0 in the parent. * We currently double-map the user area so that the stack is at the same * address in each process; in the future we will probably relocate * the frame pointers on the stack after copying. */ void cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *pcb = &p2->p_addr->u_pcb; struct trapframe *tf; struct switchframe *sf; #if NNPX > 0 npxsave_proc(p1, 1); #endif p2->p_md.md_flags = p1->p_md.md_flags; /* Copy pcb from proc p1 to p2. */ if (p1 == curproc) { /* Sync the PCB before we copy it. */ savectx(curpcb); } #ifdef DIAGNOSTIC else if (p1 != &proc0) panic("cpu_fork: curproc"); #endif *pcb = p1->p_addr->u_pcb; /* * Preset these so that gdt_compact() doesn't get confused if called * during the allocations below. * * Note: pcb_ldt_sel is handled in the pmap_activate() call when * we run the new process. */ p2->p_md.md_tss_sel = GSEL(GNULL_SEL, SEL_KPL); /* Fix up the TSS. */ pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); pcb->pcb_tss.tss_esp0 = (int)p2->p_addr + USPACE - 16; p2->p_md.md_tss_sel = tss_alloc(pcb); /* * Copy the trapframe, and arrange for the child to return directly * through rei(). */ p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_tss.tss_esp0 - 1; *tf = *p1->p_md.md_regs; /* * If specified, give the child a different stack. */ if (stack != NULL) tf->tf_esp = (u_int)stack + stacksize; sf = (struct switchframe *)tf - 1; sf->sf_ppl = 0; sf->sf_esi = (int)func; sf->sf_ebx = (int)arg; sf->sf_eip = (int)proc_trampoline; pcb->pcb_esp = (int)sf; }
void cpu_reboot(int howto, char *bootstr) { static int waittime = -1; /* Take a snapshot before clobbering any registers. */ if (curproc) savectx((struct user *)curpcb); if (cold) { howto |= RB_HALT; goto haltsys; } /* If "always halt" was specified as a boot flag, obey. */ if (boothowto & RB_HALT) { howto |= RB_HALT; } #ifdef KLOADER_KERNEL_PATH if ((howto & RB_HALT) == 0) kloader_reboot_setup(KLOADER_KERNEL_PATH); #endif boothowto = howto; if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) { waittime = 0; vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } splhigh(); if (howto & RB_DUMP) dumpsys(); haltsys: doshutdownhooks(); if ((howto & RB_POWERDOWN) == RB_POWERDOWN) sifbios_halt(0); /* power down */ else if (howto & RB_HALT) sifbios_halt(1); /* halt */ else { #ifdef KLOADER_KERNEL_PATH kloader_reboot(); /* NOTREACHED */ #endif sifbios_halt(2); /* reset */ } while (1) ; /* NOTREACHED */ }
/* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb and trap frame, making the child ready to run. * * Rig the child's kernel stack so that it will start out in * proc_trampoline() and call child_return() with p2 as an * argument. This causes the newly-created child process to go * directly to user level with an apparent return value of 0 from * fork(), while the parent process returns normally. * * p1 is the process being forked; if p1 == &proc0, we are creating * a kernel thread, and the return path and argument are specified with * `func' and `arg'. * * If an alternate user-level stack is requested (with non-zero values * in both the stack and stacksize args), set up the user stack pointer * accordingly. */ void cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *pcb1, *pcb2; struct trapframe *tf; pcb1 = lwp_getpcb(l1); pcb2 = lwp_getpcb(l2); /* Copy pcb from lwp l1 to l2. */ if (l1 == curlwp) { /* Sync the PCB before we copy it. */ savectx(pcb1); #if 0 /* ia64_highfp_save(???); */ #endif } else { KASSERT(l1 == &lwp0); } *pcb2 = *pcb1; l2->l_md.md_flags = l1->l_md.md_flags; l2->l_md.md_tf = (struct trapframe *)(uvm_lwp_getuarea(l2) + USPACE) - 1; l2->l_md.md_astpending = 0; /* * Copy the trapframe. */ tf = l2->l_md.md_tf; *tf = *l1->l_md.md_tf; /* * If specified, give the child a different stack. */ if (stack != NULL) tf->tf_special.sp = (unsigned long)stack + stacksize; /* Set-up the return values as expected by the fork() libc stub. */ if (tf->tf_special.psr & IA64_PSR_IS) { tf->tf_scratch.gr8 = 0; tf->tf_scratch.gr10 = 1; } else { tf->tf_scratch.gr8 = 0; tf->tf_scratch.gr9 = 1; tf->tf_scratch.gr10 = 0; } tf->tf_scratch.gr2 = (unsigned long)FDESC_FUNC(func); tf->tf_scratch.gr3 = (unsigned long)arg; pcb2->pcb_special.sp = (unsigned long)tf - 16; pcb2->pcb_special.rp = (unsigned long)FDESC_FUNC(lwp_trampoline); pcb2->pcb_special.pfs = 0; return; }
void cpu_reboot(int howto, char *bootstr) { /* Take a snapshot before clobbering any registers. */ savectx(curpcb); if (cold) { howto |= RB_HALT; goto haltsys; } /* If "always halt" was specified as a boot flag, obey. */ if (boothowto & RB_HALT) howto |= RB_HALT; boothowto = howto; if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) { waittime = 0; vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } splhigh(); if (howto & RB_DUMP) dumpsys(); haltsys: doshutdownhooks(); pmf_system_shutdown(boothowto); if (howto & RB_HALT) { printf("\n"); printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cnpollc(1); /* For proper keyboard command handling */ cngetc(); cnpollc(0); } printf("rebooting...\n\n"); delay(500000); *(volatile char *)MIPS_PHYS_TO_KSEG1(LED_ADDR) = LED_RESET; printf("WARNING: reboot failed!\n"); for (;;) ; }
__dead void boot(int howto) { if (curproc && curproc->p_addr) savectx(curpcb); if (cold) { if ((howto & RB_USERREQ) == 0) howto |= RB_HALT; goto haltsys; } boothowto = howto; if ((howto & RB_NOSYNC) == 0) { vfs_shutdown(); if ((howto & RB_TIMEBAD) == 0) { resettodr(); } else { printf("WARNING: not updating battery clock\n"); } } if_downall(); uvm_shutdown(); splhigh(); cold = 1; if ((howto & RB_DUMP) != 0) dumpsys(); haltsys: config_suspend_all(DVACT_POWERDOWN); if ((howto & RB_HALT) != 0) { printf("System halted.\n\n"); bootstack(); cmmu_shutdown(); scm_halt(); } doboot(); for (;;) ; /* NOTREACHED */ }
/* * Finish a fork operation, with process l2 nearly set up. * Copy and update the pcb and trap frame, making the child ready to run. * * Rig the child's kernel stack so that it will start out in * lwp_trampoline() and call child_return() with l2 as an * argument. This causes the newly-created child process to go * directly to user level with an apparent return value of 0 from * fork(), while the parent process returns normally. * * l1 is the process being forked; if l1 == &lwp0, we are creating * a kernel thread, and the return path and argument are specified with * `func' and `arg'. * * If an alternate user-level stack is requested (with non-zero values * in both the stack and stacksize args), set up the user stack pointer * accordingly. */ void cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *pcb1, *pcb2; struct trapframe *tf; struct switchframe *sf; pcb1 = lwp_getpcb(l1); pcb2 = lwp_getpcb(l2); l2->l_md.md_flags = l1->l_md.md_flags; /* Copy pcb from lwp l1 to l2. */ if (l1 == curlwp) { /* Sync the PCB before we copy it. */ savectx(curpcb); } else { KASSERT(l1 == &lwp0); } *pcb2 = *pcb1; /* * Copy the trap frame. */ tf = (struct trapframe *)(uvm_lwp_getuarea(l2) + USPACE) - 1; l2->l_md.md_regs = (int *)tf; *tf = *(struct trapframe *)l1->l_md.md_regs; /* * If specified, give the child a different stack. */ if (stack != NULL) tf->tf_regs[15] = (u_int)stack + stacksize; sf = (struct switchframe *)tf - 1; sf->sf_pc = (u_int)lwp_trampoline; pcb2->pcb_regs[6] = (int)func; /* A2 */ pcb2->pcb_regs[7] = (int)arg; /* A3 */ pcb2->pcb_regs[8] = (int)l2; /* A4 */ pcb2->pcb_regs[11] = (int)sf; /* SSP */ pcb2->pcb_ps = PSL_LOWIPL; /* start kthreads at IPL 0 */ }
/* * Handle an IPI_STOP by saving our current context and spinning until we * are resumed. */ void cpustop_handler(void) { int cpu; cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* Indicate that we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) ia32_pause(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); if (cpu == 0 && cpustop_restartfunc != NULL) { cpustop_restartfunc(); cpustop_restartfunc = NULL; } }
/* * Finish a fork operation, with process p2 nearly set up. * Copy and update the kernel stack and pcb, making the child * ready to run, and marking it so that it can return differently * than the parent. Returns 1 in the child process, 0 in the parent. * We currently double-map the user area so that the stack is at the same * address in each process; in the future we will probably relocate * the frame pointers on the stack after copying. */ void cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *pcb = &p2->p_addr->u_pcb; struct trapframe *tf; struct switchframe *sf; /* * If fpuproc != p1, then the fpu h/w state is irrelevant and the * state had better already be in the pcb. This is true for forks * but not for dumps. * * If fpuproc == p1, then we have to save the fpu h/w state to * p1's pcb so that we can copy it. */ if (p1->p_addr->u_pcb.pcb_fpcpu != NULL) fpusave_proc(p1, 1); p2->p_md.md_flags = p1->p_md.md_flags; syscall_intern(p2); /* Copy pcb from proc p1 to p2. */ if (p1 == curproc) { /* Sync the PCB before we copy it. */ savectx(curpcb); } #ifdef DIAGNOSTIC else if (p1 != &proc0) panic("cpu_fork: curproc"); #endif *pcb = p1->p_addr->u_pcb; /* * Preset these so that gdt_compact() doesn't get confused if called * during the allocations below. * * Note: pcb_ldt_sel is handled in the pmap_activate() call when * we run the new process. */ p2->p_md.md_tss_sel = GSEL(GNULL_SEL, SEL_KPL); /* * Activate the addres space. Note this will refresh pcb_ldt_sel. */ pmap_activate(p2); /* Fix up the TSS. */ pcb->pcb_tss.tss_rsp0 = (u_int64_t)p2->p_addr + USPACE - 16; pcb->pcb_tss.tss_ist[0] = (u_int64_t)p2->p_addr + PAGE_SIZE - 16; p2->p_md.md_tss_sel = tss_alloc(pcb); /* * Copy the trapframe. */ p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_tss.tss_rsp0 - 1; *tf = *p1->p_md.md_regs; setredzone(p2); /* * If specified, give the child a different stack. */ if (stack != NULL) tf->tf_rsp = (u_int64_t)stack + stacksize; sf = (struct switchframe *)tf - 1; sf->sf_ppl = IPL_NONE; sf->sf_r12 = (u_int64_t)func; sf->sf_r13 = (u_int64_t)arg; if (func == child_return) sf->sf_rip = (u_int64_t)child_trampoline; else sf->sf_rip = (u_int64_t)proc_trampoline; pcb->pcb_rsp = (u_int64_t)sf; pcb->pcb_rbp = 0; }
static int ipi_handler(void *arg) { u_int cpu, ipi; cpu = PCPU_GET(cpuid); ipi = pic_ipi_read((int)arg); while ((ipi != 0x3ff)) { switch (ipi) { case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ cpu_idcache_wbinv_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; case IPI_TLB: CTR1(KTR_SMP, "%s: IPI_TLB", __func__); cpufuncs.cf_tlb_flushID(); break; default: panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } pic_ipi_clear(ipi); ipi = pic_ipi_read(-1); } return (FILTER_HANDLED); }
int cpu_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, struct proc *p) { /* all sysctl names at this level are terminal */ if (namelen != 1) return (ENOTDIR); /* overloaded */ switch (name[0]) { case CPU_CONSDEV: return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tab->cn_dev, sizeof cn_tab->cn_dev)); default: } return (EOPNOTSUPP); } void cpu_reboot(int howto, char *bootstr) { /* take a snap shot before clobbering any registers */ if (curproc) savectx(curpcb); /* If system is cold, just halt. */ if (cold) { howto |= RB_HALT; goto haltsys; } /* If "always halt" was specified as a boot flag, obey. */ if ((boothowto & RB_HALT) != 0) { howto |= RB_HALT; } #ifdef KLOADER_KERNEL_PATH if ((howto & RB_HALT) == 0) kloader_reboot_setup(KLOADER_KERNEL_PATH); #endif boothowto = howto; if ((howto & RB_NOSYNC) == 0) { /* * Synchronize the disks.... */ vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } /* Disable interrupts. */ splhigh(); /* If rebooting and a dump is requested do it. */ #if notyet if (howto & RB_DUMP) dumpsys(); #endif haltsys: /* run any shutdown hooks */ doshutdownhooks(); /* Finally, halt/reboot the system. */ if (howto & RB_HALT) { printf("halted.\n"); } else { #ifdef KLOADER_KERNEL_PATH kloader_reboot(); /* NOTREACHED */ #endif } #if NHD64465IF > 0 hd64465_shutdown(); #endif cpu_reset(); /*NOTREACHED*/ while(1) ; } /* return # of physical pages. */ int mem_cluster_init(paddr_t addr) { phys_ram_seg_t *seg; int npages, i; /* cluster 0 is always kernel myself. */ mem_clusters[0].start = SH_CS3_START; mem_clusters[0].size = addr - SH_CS3_START; mem_cluster_cnt = 1; /* search CS3 */ #ifdef SH3 /* SH7709A's CS3 is splited to 2 banks. */ if (CPU_IS_SH3) { __find_dram_shadow(addr, SH7709_CS3_BANK0_END); __find_dram_shadow(SH7709_CS3_BANK1_START, SH7709_CS3_BANK1_END); } #endif #ifdef SH4 /* contig CS3 */ if (CPU_IS_SH4) { __find_dram_shadow(addr, SH_CS3_END); } #endif _DPRINTF("mem_cluster_cnt = %d\n", mem_cluster_cnt); npages = 0; for (i = 0, seg = mem_clusters; i < mem_cluster_cnt; i++, seg++) { _DPRINTF("mem_clusters[%d] = {0x%lx+0x%lx <0x%lx}", i, (paddr_t)seg->start, (paddr_t)seg->size, (paddr_t)seg->start + (paddr_t)seg->size); npages += sh3_btop(seg->size); #ifdef NARLY_MEMORY_PROBE if (i == 0) { _DPRINTF(" don't check.\n"); continue; } if (__check_dram((paddr_t)seg->start, (paddr_t)seg->start + (paddr_t)seg->size) != 0) panic("D-RAM check failed."); #else _DPRINTF("\n"); #endif /* NARLY_MEMORY_PROBE */ } return (npages); }
void cpu_reboot(int howto, char *bootstr) { /* Take a snapshot before clobbering any registers. */ if (curlwp) savectx((struct user *)curpcb); if (cold) { howto |= RB_HALT; goto haltsys; } /* If "always halt" was specified as a boot flag, obey. */ if (boothowto & RB_HALT) howto |= RB_HALT; boothowto = howto; if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) { waittime = 0; vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } splhigh(); if (howto & RB_DUMP) dumpsys(); haltsys: doshutdownhooks(); if (howto & RB_HALT) { printf("\n"); printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cnpollc(1); /* For proper keyboard command handling */ cngetc(); cnpollc(0); } printf("rebooting...\n\n"); if (cfe_present) { /* * XXX * For some reason we can't return to CFE with * and do a warm start. Need to look into this... */ cfe_exit(0, (howto & RB_DUMP) ? 1 : 0); printf("cfe_exit didn't!\n"); } printf("WARNING: reboot failed!\n"); for (;;); }
void cpu_reboot(volatile int howto, char *bootstr) { /* take a snap shot before clobbering any registers */ savectx(curpcb); #ifdef DEBUG if (panicstr) stacktrace(); #endif /* If system is cold, just halt. */ if (cold) { howto |= RB_HALT; goto haltsys; } /* If "always halt" was specified as a boot flag, obey. */ if ((boothowto & RB_HALT) != 0) howto |= RB_HALT; boothowto = howto; if ((howto & RB_NOSYNC) == 0 && waittime < 0) { /* * Synchronize the disks.... */ waittime = 0; vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } /* Disable interrupts. */ disable_intr(); splhigh(); /* If rebooting and a dump is requested do it. */ #if 0 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) #else if (howto & RB_DUMP) #endif dumpsys(); haltsys: /* run any shutdown hooks */ doshutdownhooks(); pmf_system_shutdown(boothowto); if ((howto & RB_POWERDOWN) == RB_POWERDOWN) prom_halt(0x80); /* rom monitor RB_PWOFF */ /* Finally, halt/reboot the system. */ printf("%s\n\n", howto & RB_HALT ? "halted." : "rebooting..."); prom_halt(howto & RB_HALT); /*NOTREACHED*/ }
/* * Finish a fork operation, with process p2 nearly set up. * Copy and update the kernel stack and pcb, making the child * ready to run, and marking it so that it can return differently * than the parent. Returns 1 in the child process, 0 in the parent. */ void cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *pcb = &p2->p_addr->u_pcb; struct trapframe *tf; struct switchframe *sf; /* * If fpuproc != p1, then the fpu h/w state is irrelevant and the * state had better already be in the pcb. This is true for forks * but not for dumps. * * If fpuproc == p1, then we have to save the fpu h/w state to * p1's pcb so that we can copy it. */ if (p1->p_addr->u_pcb.pcb_fpcpu != NULL) fpusave_proc(p1, 1); p2->p_md.md_flags = p1->p_md.md_flags; /* Copy pcb from proc p1 to p2. */ if (p1 == curproc) { /* Sync the PCB before we copy it. */ savectx(curpcb); } #ifdef DIAGNOSTIC else if (p1 != &proc0) panic("cpu_fork: curproc"); #endif *pcb = p1->p_addr->u_pcb; /* * Activate the address space. */ pmap_activate(p2); /* Record where this process's kernel stack is */ pcb->pcb_kstack = (u_int64_t)p2->p_addr + USPACE - 16; /* * Copy the trapframe. */ p2->p_md.md_regs = tf = (struct trapframe *)pcb->pcb_kstack - 1; *tf = *p1->p_md.md_regs; setredzone(p2); /* * If specified, give the child a different stack. */ if (stack != NULL) tf->tf_rsp = (u_int64_t)stack + stacksize; sf = (struct switchframe *)tf - 1; sf->sf_r12 = (u_int64_t)func; sf->sf_r13 = (u_int64_t)arg; /* XXX fork of init(8) returns via proc_trampoline() */ if (p2->p_pid == 1) sf->sf_rip = (u_int64_t)proc_trampoline; else sf->sf_rip = (u_int64_t)child_trampoline; pcb->pcb_rsp = (u_int64_t)sf; pcb->pcb_rbp = 0; }
int acpi_sleep_machdep(struct acpi_softc *sc, int state) { ACPI_STATUS status; if (sc->acpi_wakeaddr == 0ul) return (-1); /* couldn't alloc wake memory */ #ifdef SMP suspcpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &suspcpus); #endif if (acpi_resume_beep != 0) timer_spkr_acquire(); AcpiSetFirmwareWakingVector(WAKECODE_PADDR(sc)); intr_suspend(); if (savectx(susppcbs[0])) { #ifdef __amd64__ ctx_fpusave(susppcbs[0]->pcb_fpususpend); #endif #ifdef SMP if (!CPU_EMPTY(&suspcpus) && suspend_cpus(suspcpus) == 0) { device_printf(sc->acpi_dev, "Failed to suspend APs\n"); return (0); /* couldn't sleep */ } #endif WAKECODE_FIXUP(resume_beep, uint8_t, (acpi_resume_beep != 0)); WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0)); #ifndef __amd64__ WAKECODE_FIXUP(wakeup_cr4, register_t, susppcbs[0]->pcb_cr4); #endif WAKECODE_FIXUP(wakeup_pcb, struct pcb *, susppcbs[0]); WAKECODE_FIXUP(wakeup_gdt, uint16_t, susppcbs[0]->pcb_gdt.rd_limit); WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t, susppcbs[0]->pcb_gdt.rd_base); /* Call ACPICA to enter the desired sleep state */ if (state == ACPI_STATE_S4 && sc->acpi_s4bios) status = AcpiEnterSleepStateS4bios(); else status = AcpiEnterSleepState(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); return (0); /* couldn't sleep */ } for (;;) ia32_pause(); } return (1); /* wakeup successfully */ }
void cpu_reboot(int howto, char *bootstr) { static int waittime = -1; const struct alchemy_board *board; /* Take a snapshot before clobbering any registers. */ if (curproc) savectx((struct user *)curpcb); board = board_info(); KASSERT(board != NULL); /* If "always halt" was specified as a boot flag, obey. */ if (boothowto & RB_HALT) howto |= RB_HALT; boothowto = howto; /* If system is cold, just halt. */ if (cold) { boothowto |= RB_HALT; goto haltsys; } if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) { waittime = 0; /* * Synchronize the disks.... */ vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } /* Disable interrupts. */ splhigh(); if (boothowto & RB_DUMP) dumpsys(); haltsys: /* Run any shutdown hooks. */ doshutdownhooks(); if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN) if (board && board->ab_poweroff) board->ab_poweroff(); /* * YAMON may autoboot (depending on settings), and we cannot pass * flags to it (at least I haven't figured out how to yet), so * we "pseudo-halt" now. */ if (boothowto & RB_HALT) { printf("\n"); printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cnpollc(1); /* For proper keyboard command handling */ cngetc(); cnpollc(0); } printf("reseting board...\n\n"); /* * Try to use board-specific reset logic, which might involve a better * hardware reset. */ if (board->ab_reboot) board->ab_reboot(); #if 1 /* XXX * For some reason we are leaving the ethernet MAC in a state where * YAMON isn't happy with it. So just call the reset vector (grr, * Alchemy YAMON doesn't have a "reset" command). */ mips_icache_sync_all(); mips_dcache_wbinv_all(); __asm volatile("jr %0" :: "r"(MIPS_RESET_EXC_VEC)); #else printf("%s\n\n", ((howto & RB_HALT) != 0) ? "halted." : "rebooting..."); yamon_exit(boothowto); printf("Oops, back from yamon_exit()\n\nSpinning..."); #endif for (;;) /* spin forever */ ; /* XXX */ /*NOTREACHED*/ }
void cpu_reboot(int howto, char *bootstr) { static int waittime = -1; /* Take a snapshot before clobbering any registers. */ savectx(curpcb); /* If "always halt" was specified as a boot flag, obey. */ if (boothowto & RB_HALT) howto |= RB_HALT; boothowto = howto; /* If system is cold, just halt. */ if (cold) { boothowto |= RB_HALT; goto haltsys; } if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) { waittime = 0; /* * Synchronize the disks.... */ vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } /* Disable interrupts. */ splhigh(); if (boothowto & RB_DUMP) dumpsys(); haltsys: /* Run any shutdown hooks. */ doshutdownhooks(); pmf_system_shutdown(boothowto); #if 0 if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN) if (board && board->ab_poweroff) board->ab_poweroff(); #endif /* * Firmware may autoboot (depending on settings), and we cannot pass * flags to it (at least I haven't figured out how to yet), so * we "pseudo-halt" now. */ if (boothowto & RB_HALT) { printf("\n"); printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cnpollc(1); /* For proper keyboard command handling */ cngetc(); cnpollc(0); } printf("reseting board...\n\n"); mips_icache_sync_all(); mips_dcache_wbinv_all(); atheros_reset(); __asm volatile("jr %0" :: "r"(MIPS_RESET_EXC_VEC)); printf("Oops, back from reset\n\nSpinning..."); for (;;) /* spin forever */ ; /* XXX */ /*NOTREACHED*/ }
void interrupt(u_int64_t vector, struct trapframe *framep) { struct thread *td; volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK; td = curthread; atomic_add_int(&td->td_intr_nesting_level, 1); /* * Handle ExtINT interrupts by generating an INTA cycle to * read the vector. */ if (vector == 0) { vector = ib->ib_inta; printf("ExtINT interrupt: vector=%ld\n", vector); } if (vector == 255) {/* clock interrupt */ /* CTR0(KTR_INTR, "clock interrupt"); */ cnt.v_intr++; #ifdef EVCNT_COUNTERS clock_intr_evcnt.ev_count++; #else intrcnt[INTRCNT_CLOCK]++; #endif critical_enter(); #ifdef SMP clks[PCPU_GET(cpuid)]++; /* Only the BSP runs the real clock */ if (PCPU_GET(cpuid) == 0) { #endif handleclock(framep); /* divide hz (1024) by 8 to get stathz (128) */ if ((++schedclk2 & 0x7) == 0) statclock((struct clockframe *)framep); #ifdef SMP } else { ia64_set_itm(ia64_get_itc() + itm_reload); mtx_lock_spin(&sched_lock); hardclock_process(curthread, TRAPF_USERMODE(framep)); if ((schedclk2 & 0x7) == 0) statclock_process(curkse, TRAPF_PC(framep), TRAPF_USERMODE(framep)); mtx_unlock_spin(&sched_lock); } #endif critical_exit(); #ifdef SMP } else if (vector == ipi_vector[IPI_AST]) { asts[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); } else if (vector == ipi_vector[IPI_RENDEZVOUS]) { rdvs[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); smp_rendezvous_action(); } else if (vector == ipi_vector[IPI_STOP]) { u_int32_t mybit = PCPU_GET(cpumask); CTR1(KTR_SMP, "IPI_STOP, cpuid=%d", PCPU_GET(cpuid)); savectx(PCPU_GET(pcb)); stopped_cpus |= mybit; while ((started_cpus & mybit) == 0) /* spin */; started_cpus &= ~mybit; stopped_cpus &= ~mybit; if (PCPU_GET(cpuid) == 0 && cpustop_restartfunc != NULL) { void (*f)(void) = cpustop_restartfunc; cpustop_restartfunc = NULL; (*f)(); } } else if (vector == ipi_vector[IPI_TEST]) { CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid)); mp_ipi_test++; #endif } else { ints[PCPU_GET(cpuid)]++; ia64_dispatch_intr(framep, vector); } atomic_subtract_int(&td->td_intr_nesting_level, 1); }
/* * cpu_lwp_fork: finish a new LWP (l2) operation. * * First LWP (l1) is the process being forked. If it is &lwp0, then we * are creating a kthread, where return path and argument are specified * with `func' and `arg'. * * If an alternate user-level stack is requested (with non-zero values * in both the stack and stacksize arguments), then set up the user stack * pointer accordingly. */ void cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *pcb1, *pcb2; struct trapframe *tf; struct switchframe *sf; vaddr_t uv; pcb1 = lwp_getpcb(l1); pcb2 = lwp_getpcb(l2); /* * If parent LWP was using FPU, then we have to save the FPU h/w * state to PCB so that we can copy it. */ if (pcb1->pcb_fpcpu != NULL) { fpusave_lwp(l1, true); } /* * Sync the PCB before we copy it. */ if (l1 == curlwp) { KASSERT(pcb1 == curpcb); savectx(pcb1); } else { KASSERT(l1 == &lwp0); } /* Copy the PCB from parent. */ memcpy(pcb2, pcb1, sizeof(struct pcb)); #if defined(XEN) pcb2->pcb_iopl = SEL_KPL; #endif /* * Set the kernel stack address (from the address to uarea) and * trapframe address for child. * * Rig kernel stack so that it would start out in lwp_trampoline() * and call child_return() with l2 as an argument. This causes the * newly-created child process to go directly to user level with a * parent return value of 0 from fork(), while the parent process * returns normally. */ uv = uvm_lwp_getuarea(l2); #ifdef __x86_64__ pcb2->pcb_rsp0 = (uv + KSTACK_SIZE - 16) & ~0xf; tf = (struct trapframe *)pcb2->pcb_rsp0 - 1; #else pcb2->pcb_esp0 = (uv + KSTACK_SIZE - 16); tf = (struct trapframe *)pcb2->pcb_esp0 - 1; pcb2->pcb_iomap = NULL; #endif l2->l_md.md_regs = tf; /* * Copy the trapframe from parent, so that return to userspace * will be to right address, with correct registers. */ memcpy(tf, l1->l_md.md_regs, sizeof(struct trapframe)); /* Child LWP might get aston() before returning to userspace. */ tf->tf_trapno = T_ASTFLT; #if 0 /* DIAGNOSTIC */ /* Set a red zone in the kernel stack after the uarea. */ pmap_kremove(uv, PAGE_SIZE); pmap_update(pmap_kernel()); #endif /* If specified, set a different user stack for a child. */ if (stack != NULL) { #ifdef __x86_64__ tf->tf_rsp = (uint64_t)stack + stacksize; #else tf->tf_esp = (uint32_t)stack + stacksize; #endif } l2->l_md.md_flags = l1->l_md.md_flags; l2->l_md.md_astpending = 0; sf = (struct switchframe *)tf - 1; #ifdef __x86_64__ sf->sf_r12 = (uint64_t)func; sf->sf_r13 = (uint64_t)arg; sf->sf_rip = (uint64_t)lwp_trampoline; pcb2->pcb_rsp = (uint64_t)sf; pcb2->pcb_rbp = (uint64_t)l2; #else sf->sf_esi = (int)func; sf->sf_ebx = (int)arg; sf->sf_eip = (int)lwp_trampoline; pcb2->pcb_esp = (int)sf; pcb2->pcb_ebp = (int)l2; #endif }
void dumpsys(void) { u_long totalbytesleft, bytes, i, n, memseg; u_long maddr; daddr_t blkno; int (*dump)(dev_t, daddr_t, caddr_t, size_t); int error; /* Save registers. */ savectx(&dumppcb); if (dumpdev == NODEV) return; /* * For dumps during autoconfiguration, * if dump device has already configured... */ if (dumpsize == 0) dumpconf(); if (dumplo <= 0 || dumpsize == 0) { printf("\ndump to dev %u,%u not possible\n", major(dumpdev), minor(dumpdev)); return; } printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), minor(dumpdev), dumplo); error = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); printf("dump "); if (error == -1) { printf("area unavailable\n"); return; } if ((error = cpu_dump()) != 0) goto err; totalbytesleft = ptoa(cpu_dump_mempagecnt()); blkno = dumplo + cpu_dumpsize(); dump = bdevsw[major(dumpdev)].d_dump; error = 0; for (memseg = 0; memseg < mem_cluster_cnt; memseg++) { maddr = mem_clusters[memseg].start; bytes = mem_clusters[memseg].size; for (i = 0; i < bytes; i += n, totalbytesleft -= n) { /* Print out how many MBs we have left to go. */ if ((totalbytesleft % (1024*1024)) == 0) printf("%ld ", totalbytesleft / (1024 * 1024)); /* Limit size for next transfer. */ n = bytes - i; if (n > BYTES_PER_DUMP) n = BYTES_PER_DUMP; (void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ); error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n); if (error) goto err; maddr += n; blkno += btodb(n); /* XXX? */ #if 0 /* XXX this doesn't work. grr. */ /* operator aborting dump? */ if (sget() != NULL) { error = EINTR; break; } #endif } } err: switch (error) { case ENXIO: printf("device bad\n"); break; case EFAULT: printf("device not ready\n"); break; case EINVAL: printf("area improper\n"); break; case EIO: printf("i/o error\n"); break; case EINTR: printf("aborted from console\n"); break; case 0: printf("succeeded\n"); break; default: printf("error %d\n", error); break; } printf("\n\n"); delay(5000000); /* 5 seconds */ }
int amd64_set_ldt(struct proc *p, void *args, register_t *retval) { int error, i, n; struct pcb *pcb = &p->p_addr->u_pcb; pmap_t pmap = p->p_vmspace->vm_map.pmap; struct amd64_set_ldt_args ua; union descriptor desc; if ((error = copyin(args, &ua, sizeof(ua))) != 0) return (error); #ifdef LDT_DEBUG printf("amd64_set_ldt: start=%d num=%d descs=%p\n", ua.start, ua.num, ua.desc); #endif if (ua.start < 0 || ua.num < 0) return (EINVAL); if (ua.start > 8192 || (ua.start + ua.num) > 8192) return (EINVAL); /* * XXX LOCKING */ /* allocate user ldt */ if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) { size_t old_len, new_len; union descriptor *old_ldt, *new_ldt; if (pmap->pm_flags & PMF_USER_LDT) { old_len = pmap->pm_ldt_len * sizeof(union descriptor); old_ldt = pmap->pm_ldt; } else { old_len = NLDT * sizeof(union descriptor); old_ldt = ldt; pmap->pm_ldt_len = 512; } while ((ua.start + ua.num) > pmap->pm_ldt_len) pmap->pm_ldt_len *= 2; new_len = pmap->pm_ldt_len * sizeof(union descriptor); new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, new_len); memcpy(new_ldt, old_ldt, old_len); memset((caddr_t)new_ldt + old_len, 0, new_len - old_len); pmap->pm_ldt = new_ldt; if (pmap->pm_flags & PCB_USER_LDT) ldt_free(pmap); else pmap->pm_flags |= PCB_USER_LDT; ldt_alloc(pmap, new_ldt, new_len); pcb->pcb_ldt_sel = pmap->pm_ldt_sel; if (pcb == curpcb) lldt(pcb->pcb_ldt_sel); /* * XXX Need to notify other processors which may be * XXX currently using this pmap that they need to * XXX re-load the LDT. */ if (old_ldt != ldt) uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len); #ifdef LDT_DEBUG printf("amd64_set_ldt(%d): new_ldt=%p\n", p->p_pid, new_ldt); #endif } if (pcb == curpcb) savectx(curpcb); error = 0; /* Check descriptors for access violations. */ for (i = 0, n = ua.start; i < ua.num; i++, n++) { if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0) return (error); switch (desc.sd.sd_type) { case SDT_SYSNULL: desc.sd.sd_p = 0; break; case SDT_SYS286CGT: case SDT_SYS386CGT: /* * Only allow call gates targeting a segment * in the LDT or a user segment in the fixed * part of the gdt. Segments in the LDT are * constrained (below) to be user segments. */ if (desc.gd.gd_p != 0 && !ISLDT(desc.gd.gd_selector) && ((IDXSEL(desc.gd.gd_selector) >= NGDT) || (gdt[IDXSEL(desc.gd.gd_selector)].sd.sd_dpl != SEL_UPL))) return (EACCES); break; case SDT_MEMEC: case SDT_MEMEAC: case SDT_MEMERC: case SDT_MEMERAC: /* Must be "present" if executable and conforming. */ if (desc.sd.sd_p == 0) return (EACCES); break; case SDT_MEMRO: case SDT_MEMROA: case SDT_MEMRW: case SDT_MEMRWA: case SDT_MEMROD: case SDT_MEMRODA: case SDT_MEMRWD: case SDT_MEMRWDA: case SDT_MEME: case SDT_MEMEA: case SDT_MEMER: case SDT_MEMERA: break; default: /* Only care if it's present. */ if (desc.sd.sd_p != 0) return (EACCES); break; } if (desc.sd.sd_p != 0) { /* Only user (ring-3) descriptors may be present. */ if (desc.sd.sd_dpl != SEL_UPL) return (EACCES); } } /* Now actually replace the descriptors. */ for (i = 0, n = ua.start; i < ua.num; i++, n++) { if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0) goto out; pmap->pm_ldt[n] = desc; } *retval = ua.start; out: return (error); }
void dumpsys() { const struct bdevsw *bdev; daddr_t blkno; int psize; int error; int addr; int block; int len; vaddr_t dumpspace; kcore_seg_t *kseg_p; cpu_kcore_hdr_t *chdr_p; char dump_hdr[dbtob(1)]; /* assumes header fits in one block */ /* Save registers. */ savectx(&dumppcb); /* flush everything out of caches */ cpu_dcache_wbinv_all(); cpu_sdcache_wbinv_all(); if (dumpdev == NODEV) return; if (dumpsize == 0) { dumpconf(); if (dumpsize == 0) return; } if (dumplo <= 0) { printf("\ndump to dev %u,%u not possible\n", major(dumpdev), minor(dumpdev)); return; } printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), minor(dumpdev), dumplo); #ifdef UVM_SWAP_ENCRYPT uvm_swap_finicrypt_all(); #endif blkno = dumplo; dumpspace = (vaddr_t) memhook; bdev = bdevsw_lookup(dumpdev); if (bdev == NULL || bdev->d_psize == NULL) return; psize = (*bdev->d_psize)(dumpdev); printf("dump "); if (psize == -1) { printf("area unavailable\n"); return; } /* Setup the dump header */ kseg_p = (kcore_seg_t *)dump_hdr; chdr_p = (cpu_kcore_hdr_t *)&dump_hdr[ALIGN(sizeof(*kseg_p))]; bzero(dump_hdr, sizeof(dump_hdr)); CORE_SETMAGIC(*kseg_p, KCORE_MAGIC, MID_MACHINE, CORE_CPU); kseg_p->c_size = sizeof(dump_hdr) - ALIGN(sizeof(*kseg_p)); *chdr_p = cpu_kcore_hdr; error = (*bdev->d_dump)(dumpdev, blkno++, (caddr_t)dump_hdr, sizeof(dump_hdr)); if (error != 0) goto abort; len = 0; for (block = 0; block < bootconfig.dramblocks && error == 0; ++block) { addr = bootconfig.dram[block].address; for (;addr < (bootconfig.dram[block].address + (bootconfig.dram[block].pages * PAGE_SIZE)); addr += PAGE_SIZE) { if ((len % (1024*1024)) == 0) printf("%d ", len / (1024*1024)); pmap_kenter_pa(dumpspace, addr, PROT_READ); pmap_update(pmap_kernel()); error = (*bdev->d_dump)(dumpdev, blkno, (caddr_t) dumpspace, PAGE_SIZE); pmap_kremove(dumpspace, PAGE_SIZE); pmap_update(pmap_kernel()); if (error) break; blkno += btodb(PAGE_SIZE); len += PAGE_SIZE; } } abort: switch (error) { case ENXIO: printf("device bad\n"); break; case EFAULT: printf("device not ready\n"); break; case EINVAL: printf("area improper\n"); break; case EIO: printf("i/o error\n"); break; case EINTR: printf("aborted from console\n"); break; default: printf("succeeded\n"); break; } printf("\n\n"); delay(1000000); }