void cpu_idle(void) { struct thread *td = curthread; struct mdglobaldata *gd = mdcpu; int reqflags; crit_exit(); KKASSERT(td->td_critcount == 0); cpu_enable_intr(); for (;;) { /* * See if there are any LWKTs ready to go. */ lwkt_switch(); /* * The idle loop halts only if no threads are scheduleable * and no signals have occured. */ if (cpu_idle_hlt && (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { splz(); #ifdef SMP KKASSERT(MP_LOCK_HELD() == 0); #endif if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { #ifdef DEBUGIDLE struct timeval tv1, tv2; gettimeofday(&tv1, NULL); #endif reqflags = gd->mi.gd_reqflags & ~RQF_IDLECHECK_WK_MASK; umtx_sleep(&gd->mi.gd_reqflags, reqflags, 1000000); #ifdef DEBUGIDLE gettimeofday(&tv2, NULL); if (tv2.tv_usec - tv1.tv_usec + (tv2.tv_sec - tv1.tv_sec) * 1000000 > 500000) { kprintf("cpu %d idlelock %08x %08x\n", gd->mi.gd_cpuid, gd->mi.gd_reqflags, gd->gd_fpending); } #endif } ++cpu_idle_hltcnt; } else { splz(); #ifdef SMP __asm __volatile("pause"); #endif ++cpu_idle_spincnt; } } }
/* * Put the CPU in C1 in a machine-dependant way. * XXX: shouldn't be here! */ static void acpi_cst_c1_halt(void) { splz(); if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) __asm __volatile("sti; hlt"); else __asm __volatile("sti; pause"); }
/* * This function handles a segmentation fault. * * XXX We assume that trapframe is a subset of ucontext. It is as of * this writing. */ static void exc_segfault(int signo, siginfo_t *info, void *ctxp) { ucontext_t *ctx = ctxp; #if 0 kprintf("CAUGHT SIG %d RIP %08lx ERR %08lx TRAPNO %ld " "err %ld addr %08lx\n", signo, ctx->uc_mcontext.mc_rip, ctx->uc_mcontext.mc_err, ctx->uc_mcontext.mc_trapno & 0xFFFF, ctx->uc_mcontext.mc_trapno >> 16, ctx->uc_mcontext.mc_addr); #endif kern_trap((struct trapframe *)&ctx->uc_mcontext.mc_rdi); splz(); }
/* * Put the CPU in C1 in a machine-dependant way. * XXX: shouldn't be here! */ static void acpi_cpu_c1(void) { #ifdef __ia64__ ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); #else splz(); #ifdef SMP if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) __asm __volatile("sti; hlt"); else __asm __volatile("sti; pause"); #else if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) __asm __volatile("sti; hlt"); else __asm __volatile("sti"); #endif #endif /* !__ia64__ */ }