/* * The GPE handler is called when IBE/OBF or SCI events occur. We are * called from an unknown lock context. */ static UINT32 EcGpeHandler(ACPI_HANDLE GpeDevice, UINT32 GpeNumber, void *Context) { struct acpi_ec_softc *sc = Context; ACPI_STATUS Status; EC_STATUS EcStatus; KASSERT(Context != NULL, ("EcGpeHandler called with NULL")); KTR_LOG(acpi_ec_gpehdlstart); /* * Notify EcWaitEvent() that the status register is now fresh. If we * didn't do this, it wouldn't be possible to distinguish an old IBE * from a new one, for example when doing a write transaction (writing * address and then data values.) */ atomic_add_int(&sc->ec_gencount, 1); wakeup(sc); /* * If the EC_SCI bit of the status register is set, queue a query handler. * It will run the query and _Qxx method later, under the lock. */ EcStatus = EC_GET_CSR(sc); if ((EcStatus & EC_EVENT_SCI) && !sc->ec_sci_pend) { KTR_LOG(acpi_ec_gpequeuehdl); Status = AcpiOsExecute(OSL_GPE_HANDLER, EcGpeQueryHandler, Context); if (ACPI_SUCCESS(Status)) { sc->ec_sci_pend = TRUE; return (0); } else { kprintf("EcGpeHandler: queuing GPE query handler failed\n"); } } return (ACPI_REENABLE_GPE); }
/* * bfq_destroy_tdio(): .destroy_tdio callback of the bfq policy * * Called immediate after a dsched_thread_io struct's refcount decreases * to zero. This function will record the seek_avg and ttime_avg of the * destroyed thread with the KTR facility. * * lock: none * * refcount: the tdio's refcount should be zero. It may be nuked, and * any read/write to the tdio is not safe by then. */ static void bfq_destroy_tdio(struct dsched_thread_io *tdio) { struct bfq_thread_io *bfq_tdio = (struct bfq_thread_io *)tdio; /* * do not log threads without I/O */ if (bfq_tdio->seek_samples != 0 || bfq_tdio->ttime_samples != 0) { KTR_LOG(dsched_bfq_thread_seek_avg, bfq_tdio, bfq_tdio->seek_avg ); KTR_LOG(dsched_bfq_thread_ttime_avg, bfq_tdio, bfq_tdio->ttime_avg); } helper_msg_destroy_tdio((struct bfq_disk_ctx *)tdio->diskctx, tdio); }
/* * bfq_new_tdio(): .new_tdio callback of the bfq policy. Initialize * the bfq_thread_io structure. * * lock: none * refcount: none */ static void bfq_new_tdio(struct dsched_thread_io *tdio) { struct bfq_thread_io *bfq_tdio = (struct bfq_thread_io *) tdio; /* the queue has to be initialized some where else */ tdio->qlength = 0; tdio->debug_priv = 0xF00FF00F; bfq_tdio->budget = BFQ_DEFAULT_MIN_BUDGET; bfq_tdio->weight = BFQ_DEFAULT_WEIGHT; bfq_tdio->tdio_as_switch = 1; bfq_tdio->maybe_timeout = 0; bfq_tdio->seek_samples = 0; bfq_tdio->seek_avg = 0; bfq_tdio->seek_total = 0; bfq_tdio->ttime_samples = 0; bfq_tdio->ttime_avg = 0; bfq_tdio->service_received = 0; bfq_tdio->bio_dispatched = 0; bfq_tdio->bio_completed = 0; KTR_LOG(dsched_bfq_thread_created, bfq_tdio); }
static ACPI_STATUS EcWrite(struct acpi_ec_softc *sc, UINT8 Address, UINT8 Data) { ACPI_STATUS status; u_int gen_count; ACPI_SERIAL_ASSERT(ec); KTR_LOG(acpi_ec_writeaddr, Address, Data); status = EcCommand(sc, EC_COMMAND_WRITE); if (ACPI_FAILURE(status)) return (status); gen_count = sc->ec_gencount; EC_SET_DATA(sc, Address); status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count); if (ACPI_FAILURE(status)) { device_printf(sc->ec_dev, "EcWrite: failed waiting for sent address\n"); return (status); } gen_count = sc->ec_gencount; EC_SET_DATA(sc, Data); status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, gen_count); if (ACPI_FAILURE(status)) { device_printf(sc->ec_dev, "EcWrite: failed waiting for sent data\n"); return (status); } return (AE_OK); }
static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data) { ACPI_STATUS status; u_int gen_count; int retry; ACPI_SERIAL_ASSERT(ec); KTR_LOG(acpi_ec_readaddr, Address); for (retry = 0; retry < 2; retry++) { status = EcCommand(sc, EC_COMMAND_READ); if (ACPI_FAILURE(status)) return (status); gen_count = sc->ec_gencount; EC_SET_DATA(sc, Address); status = EcWaitEvent(sc, EC_EVENT_OUTPUT_BUFFER_FULL, gen_count); if (ACPI_FAILURE(status)) { if (ACPI_SUCCESS(EcCheckStatus(sc, "retr_check", EC_EVENT_INPUT_BUFFER_EMPTY))) continue; else break; } *Data = EC_GET_DATA(sc); return (AE_OK); } device_printf(sc->ec_dev, "EcRead: failed waiting to get data\n"); return (status); }
static int dirfs_root(struct mount *mp, struct vnode **vpp) { dirfs_mount_t dmp; dirfs_node_t dnp; int fd; int error; dbg(1, "called\n"); dmp = VFS_TO_DIRFS(mp); KKASSERT(dmp != NULL); if (dmp->dm_root == NULL) { /* * dm_root holds the root dirfs node. Allocate a new one since * there is none. Also attempt to lstat(2) it, in order to set * data for VOP_ACCESS() */ dnp = dirfs_node_alloc(mp); error = dirfs_node_stat(DIRFS_NOFD, dmp->dm_path, dnp); if (error != 0) { dirfs_node_free(dmp, dnp); return error; } dirfs_node_ref(dnp); /* leave inact for life of mount */ /* Root inode's parent is NULL, used for verification */ dnp->dn_parent = NULL; dmp->dm_root = dnp; dirfs_node_setflags(dnp, DIRFS_ROOT); /* * Maintain an open descriptor on the root dnp. The * normal open/close/cache does not apply for the root * so the descriptor is ALWAYS available. */ fd = open(dmp->dm_path, O_DIRECTORY); if (fd == -1) { dbg(9, "failed to open ROOT node\n"); dirfs_free_vp(dmp, dnp); dirfs_node_free(dmp, dnp); return errno; } dnp->dn_fd = fd; dnp->dn_type = VDIR; } else { dnp = dmp->dm_root; } /* * Acquire the root vnode (dn_type already set above). This * call will handle any races and return a locked vnode. */ dirfs_alloc_vp(mp, vpp, LK_CANRECURSE, dnp); KTR_LOG(dirfs_root, dnp, *vpp, dmp->dm_path, dnp->dn_fd, error); return 0; }
static ACPI_STATUS EcCheckStatus(struct acpi_ec_softc *sc, const char *msg, EC_EVENT event) { ACPI_STATUS status; EC_STATUS ec_status; status = AE_NO_HARDWARE_RESPONSE; ec_status = EC_GET_CSR(sc); if (sc->ec_burstactive && !(ec_status & EC_FLAG_BURST_MODE)) { KTR_LOG(acpi_ec_burstdis, msg); sc->ec_burstactive = FALSE; } if (EVENT_READY(event, ec_status)) { KTR_LOG(acpi_ec_waitrdy, msg, ec_status); status = AE_OK; } return (status); }
void fork_return(struct lwp *lp, struct trapframe *frame) { frame->tf_rax = 0; /* Child returns zero */ frame->tf_rflags &= ~PSL_C; /* success */ frame->tf_rdx = 1; generic_lwp_return(lp, frame); KTR_LOG(kernentry_fork_ret, lp->lwp_proc->p_pid, lp->lwp_tid); }
static ACPI_STATUS EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd) { ACPI_STATUS status; EC_EVENT event; EC_STATUS ec_status; u_int gen_count; ACPI_SERIAL_ASSERT(ec); /* Don't use burst mode if user disabled it. */ if (!ec_burst_mode && cmd == EC_COMMAND_BURST_ENABLE) return (AE_ERROR); /* Decide what to wait for based on command type. */ switch (cmd) { case EC_COMMAND_READ: case EC_COMMAND_WRITE: case EC_COMMAND_BURST_DISABLE: event = EC_EVENT_INPUT_BUFFER_EMPTY; break; case EC_COMMAND_QUERY: case EC_COMMAND_BURST_ENABLE: event = EC_EVENT_OUTPUT_BUFFER_FULL; break; default: device_printf(sc->ec_dev, "EcCommand: invalid command %#x\n", cmd); return (AE_BAD_PARAMETER); } /* * Ensure empty input buffer before issuing command. * Use generation count of zero to force a quick check. */ status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, 0); if (ACPI_FAILURE(status)) return (status); /* Run the command and wait for the chosen event. */ KTR_LOG(acpi_ec_cmdrun, cmd); gen_count = sc->ec_gencount; EC_SET_CSR(sc, cmd); status = EcWaitEvent(sc, event, gen_count); if (ACPI_SUCCESS(status)) { /* If we succeeded, burst flag should now be present. */ if (cmd == EC_COMMAND_BURST_ENABLE) { ec_status = EC_GET_CSR(sc); if ((ec_status & EC_FLAG_BURST_MODE) == 0) status = AE_ERROR; } } else device_printf(sc->ec_dev, "EcCommand: no response to %#x\n", cmd); return (status); }
static int dirfs_unmount(struct mount *mp, int mntflags) { dirfs_mount_t dmp; dirfs_node_t dnp; int cnt; int error; dbg(1, "called\n"); cnt = 0; dmp = VFS_TO_DIRFS(mp); error = vflush(mp, 0, 0); if (error) goto failure; /* * Clean up dm_fdlist. There should be no vnodes left so the * only ref should be from the fdlist. */ while ((dnp = TAILQ_FIRST(&dmp->dm_fdlist)) != NULL) { dirfs_node_setpassive(dmp, dnp, 0); } /* * Cleanup root node. In the case the filesystem is mounted * but no operation is done on it, there will be no call to * VFS_ROOT() so better check dnp is not NULL before attempting * to release it. */ dnp = dmp->dm_root; if (dnp != NULL) { dirfs_close_helper(dnp); debug_node2(dnp); dirfs_node_drop(dmp, dnp); /* last ref should free structure */ } kfree(dmp, M_DIRFS); mp->mnt_data = (qaddr_t) 0; failure: KTR_LOG(dirfs_unmount, dmp, mp, error); return error; }
static int dirfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { dirfs_mount_t dmp; struct stat st; size_t done, nlen; int error; dbg(1, "called\n"); if (mp->mnt_flag & MNT_UPDATE) { dmp = VFS_TO_DIRFS(mp); if (dmp->dm_rdonly == 0 && (mp->mnt_flag & MNT_RDONLY)) { /* XXX We should make sure all writes are synced */ dmp->dm_rdonly = 1; debug(2, "dirfs read-write -> read-only\n"); } if (dmp->dm_rdonly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { debug(2, "dirfs read-only -> read-write\n"); dmp->dm_rdonly = 0; } return 0; } dmp = kmalloc(sizeof(*dmp), M_DIRFS, M_WAITOK | M_ZERO); mp->mnt_data = (qaddr_t)dmp; dmp->dm_mount = mp; error = copyinstr(data, &dmp->dm_path, MAXPATHLEN, &done); if (error) { /* Attempt to copy from kernel address */ error = copystr(data, &dmp->dm_path, MAXPATHLEN, &done); if (error) { kfree(dmp, M_DIRFS); goto failure; } } /* Strip / character at the end to avoid problems */ nlen = strnlen(dmp->dm_path, MAXPATHLEN); if (dmp->dm_path[nlen-1] == '/') dmp->dm_path[nlen-1] = 0; /* Make sure host directory exists and it is indeed a directory. */ if ((stat(dmp->dm_path, &st)) == 0) { if (!S_ISDIR(st.st_mode)) { kfree(dmp, M_DIRFS); error = EINVAL; goto failure; } } else { error = errno; goto failure; } lockinit(&dmp->dm_lock, "dfsmnt", 0, LK_CANRECURSE); vfs_add_vnodeops(mp, &dirfs_vnode_vops, &mp->mnt_vn_norm_ops); vfs_getnewfsid(mp); /* Who is running the vkernel */ dmp->dm_uid = getuid(); dmp->dm_gid = getgid(); TAILQ_INIT(&dmp->dm_fdlist); RB_INIT(&dmp->dm_inotree); kmalloc_raise_limit(M_DIRFS_NODE, 0); dirfs_statfs(mp, &mp->mnt_stat, cred); failure: KTR_LOG(dirfs_mount, (dmp->dm_path) ? dmp->dm_path : "NULL", dmp, mp, error); return error; }
void trap(struct trapframe *frame) { struct globaldata *gd = mycpu; struct thread *td = gd->gd_curthread; struct lwp *lp = td->td_lwp; struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; int have_mplock = 0; #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; #endif vm_offset_t eva; p = td->td_proc; #ifdef DDB /* * We need to allow T_DNA faults when the debugger is active since * some dumping paths do large bcopy() which use the floating * point registers for faster copying. */ if (db_active && frame->tf_trapno != T_DNA) { eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0); ++gd->gd_trap_nesting_level; MAKEMPSAFE(have_mplock); trap_fatal(frame, eva); --gd->gd_trap_nesting_level; goto out2; } #endif eva = 0; ++gd->gd_trap_nesting_level; if (frame->tf_trapno == T_PAGEFLT) { /* * For some Cyrix CPUs, %cr2 is clobbered by interrupts. * This problem is worked around by using an interrupt * gate for the pagefault handler. We are finally ready * to read %cr2 and then must reenable interrupts. * * XXX this should be in the switch statement, but the * NO_FOOF_HACK and VM86 goto and ifdefs obfuscate the * flow of control too much for this to be obviously * correct. */ eva = rcr2(); cpu_enable_intr(); } --gd->gd_trap_nesting_level; if (!(frame->tf_eflags & PSL_I)) { /* * Buggy application or kernel code has disabled interrupts * and then trapped. Enabling interrupts now is wrong, but * it is better than running with interrupts disabled until * they are accidentally enabled later. */ type = frame->tf_trapno; if (ISPL(frame->tf_cs)==SEL_UPL || (frame->tf_eflags & PSL_VM)) { MAKEMPSAFE(have_mplock); kprintf( "pid %ld (%s): trap %d with interrupts disabled\n", (long)curproc->p_pid, curproc->p_comm, type); } else if (type != T_BPTFLT && type != T_TRCTRAP) { /* * XXX not quite right, since this may be for a * multiple fault in user mode. */ MAKEMPSAFE(have_mplock); kprintf("kernel trap %d with interrupts disabled\n", type); } cpu_enable_intr(); } #if defined(I586_CPU) && !defined(NO_F00F_HACK) restart: #endif type = frame->tf_trapno; code = frame->tf_err; if (in_vm86call) { if (frame->tf_eflags & PSL_VM && (type == T_PROTFLT || type == T_STKFLT)) { KKASSERT(get_mplock_count(curthread) > 0); i = vm86_emulate((struct vm86frame *)frame); KKASSERT(get_mplock_count(curthread) > 0); if (i != 0) { /* * returns to original process */ vm86_trap((struct vm86frame *)frame, have_mplock); KKASSERT(0); /* NOT REACHED */ } goto out2; } switch (type) { /* * these traps want either a process context, or * assume a normal userspace trap. */ case T_PROTFLT: case T_SEGNPFLT: trap_fatal(frame, eva); goto out2; case T_TRCTRAP: type = T_BPTFLT; /* kernel breakpoint */ /* FALL THROUGH */ } goto kernel_trap; /* normal kernel trap handling */ } if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { /* user trap */ KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, frame->tf_trapno, eva); userenter(td, p); sticks = (int)td->td_sticks; lp->lwp_md.md_regs = frame; switch (type) { case T_PRIVINFLT: /* privileged instruction fault */ i = SIGILL; ucode = ILL_PRVOPC; break; case T_BPTFLT: /* bpt instruction fault */ case T_TRCTRAP: /* trace trap */ frame->tf_eflags &= ~PSL_T; i = SIGTRAP; ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); break; case T_ARITHTRAP: /* arithmetic trap */ ucode = code; i = SIGFPE; break; case T_ASTFLT: /* Allow process switch */ mycpu->gd_cnt.v_soft++; if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { atomic_clear_int(&mycpu->gd_reqflags, RQF_AST_OWEUPC); addupc_task(p, p->p_prof.pr_addr, p->p_prof.pr_ticks); } goto out; /* * The following two traps can happen in * vm86 mode, and, if so, we want to handle * them specially. */ case T_PROTFLT: /* general protection fault */ case T_STKFLT: /* stack fault */ if (frame->tf_eflags & PSL_VM) { i = vm86_emulate((struct vm86frame *)frame); if (i == 0) goto out; break; } i = SIGBUS; ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR; break; case T_SEGNPFLT: /* segment not present fault */ i = SIGBUS; ucode = BUS_ADRERR; break; case T_TSSFLT: /* invalid TSS fault */ case T_DOUBLEFLT: /* double fault */ default: i = SIGBUS; ucode = BUS_OBJERR; break; case T_PAGEFLT: /* page fault */ i = trap_pfault(frame, TRUE, eva); if (i == -1) goto out; #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) goto restart; #endif if (i == 0) goto out; if (i == SIGSEGV) ucode = SEGV_MAPERR; else { i = SIGSEGV; ucode = SEGV_ACCERR; } break; case T_DIVIDE: /* integer divide fault */ ucode = FPE_INTDIV; i = SIGFPE; break; #if NISA > 0 case T_NMI: MAKEMPSAFE(have_mplock); #ifdef POWERFAIL_NMI goto handle_powerfail; #else /* !POWERFAIL_NMI */ /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap (type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi) panic("NMI indicates hardware failure"); break; #endif /* POWERFAIL_NMI */ #endif /* NISA > 0 */ case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; i = SIGFPE; break; case T_BOUND: /* bounds check fault */ ucode = FPE_FLTSUB; i = SIGFPE; break; case T_DNA: /* * Virtual kernel intercept - pass the DNA exception * to the virtual kernel if it asked to handle it. * This occurs when the virtual kernel is holding * onto the FP context for a different emulated * process then the one currently running. * * We must still call npxdna() since we may have * saved FP state that the virtual kernel needs * to hand over to a different emulated process. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve && (td->td_pcb->pcb_flags & FP_VIRTFP) ) { npxdna(); break; } #if NNPX > 0 /* * The kernel may have switched out the FP unit's * state, causing the user process to take a fault * when it tries to use the FP unit. Restore the * state here */ if (npxdna()) goto out; #endif if (!pmath_emulate) { i = SIGFPE; ucode = FPE_FPU_NP_TRAP; break; } i = (*pmath_emulate)(frame); if (i == 0) { if (!(frame->tf_eflags & PSL_T)) goto out2; frame->tf_eflags &= ~PSL_T; i = SIGTRAP; } /* else ucode = emulator_only_knows() XXX */ break; case T_FPOPFLT: /* FPU operand fetch fault */ ucode = ILL_COPROC; i = SIGILL; break; case T_XMMFLT: /* SIMD floating-point exception */ ucode = 0; /* XXX */ i = SIGFPE; break; } } else { kernel_trap: /* kernel trap */ switch (type) { case T_PAGEFLT: /* page fault */ trap_pfault(frame, FALSE, eva); goto out2; case T_DNA: #if NNPX > 0 /* * The kernel may be using npx for copying or other * purposes. */ if (npxdna()) goto out2; #endif break; case T_PROTFLT: /* general protection fault */ case T_SEGNPFLT: /* segment not present fault */ /* * Invalid segment selectors and out of bounds * %eip's and %esp's can be set up in user mode. * This causes a fault in kernel mode when the * kernel tries to return to user mode. We want * to get this fault so that we can fix the * problem here and not have to check all the * selectors and pointers when the user changes * them. */ #define MAYBE_DORETI_FAULT(where, whereto) \ do { \ if (frame->tf_eip == (int)where) { \ frame->tf_eip = (int)whereto; \ goto out2; \ } \ } while (0) if (mycpu->gd_intr_nesting_level == 0) { /* * Invalid %fs's and %gs's can be created using * procfs or PT_SETREGS or by invalidating the * underlying LDT entry. This causes a fault * in kernel mode when the kernel attempts to * switch contexts. Lose the bad context * (XXX) so that we can continue, and generate * a signal. */ MAYBE_DORETI_FAULT(doreti_iret, doreti_iret_fault); MAYBE_DORETI_FAULT(doreti_popl_ds, doreti_popl_ds_fault); MAYBE_DORETI_FAULT(doreti_popl_es, doreti_popl_es_fault); MAYBE_DORETI_FAULT(doreti_popl_fs, doreti_popl_fs_fault); MAYBE_DORETI_FAULT(doreti_popl_gs, doreti_popl_gs_fault); /* * NOTE: cpu doesn't push esp on kernel trap */ if (td->td_pcb->pcb_onfault && td->td_pcb->pcb_onfault_sp == (int)&frame->tf_esp) { frame->tf_eip = (register_t)td->td_pcb->pcb_onfault; goto out2; } } break; case T_TSSFLT: /* * PSL_NT can be set in user mode and isn't cleared * automatically when the kernel is entered. This * causes a TSS fault when the kernel attempts to * `iret' because the TSS link is uninitialized. We * want to get this fault so that we can fix the * problem here and not every time the kernel is * entered. */ if (frame->tf_eflags & PSL_NT) { frame->tf_eflags &= ~PSL_NT; goto out2; } break; case T_TRCTRAP: /* trace trap */ if (frame->tf_eip == (int)IDTVEC(syscall)) { /* * We've just entered system mode via the * syscall lcall. Continue single stepping * silently until the syscall handler has * saved the flags. */ goto out2; } if (frame->tf_eip == (int)IDTVEC(syscall) + 1) { /* * The syscall handler has now saved the * flags. Stop single stepping it. */ frame->tf_eflags &= ~PSL_T; goto out2; } /* * Ignore debug register trace traps due to * accesses in the user's address space, which * can happen under several conditions such as * if a user sets a watchpoint on a buffer and * then passes that buffer to a system call. * We still want to get TRCTRAPS for addresses * in kernel space because that is useful when * debugging the kernel. */ if (user_dbreg_trap()) { /* * Reset breakpoint bits because the * processor doesn't */ load_dr6(rdr6() & 0xfffffff0); goto out2; } /* * FALLTHROUGH (TRCTRAP kernel mode, kernel address) */ case T_BPTFLT: /* * If DDB is enabled, let it handle the debugger trap. * Otherwise, debugger traps "can't happen". */ ucode = TRAP_BRKPT; #ifdef DDB MAKEMPSAFE(have_mplock); if (kdb_trap (type, 0, frame)) goto out2; #endif break; #if NISA > 0 case T_NMI: MAKEMPSAFE(have_mplock); #ifdef POWERFAIL_NMI #ifndef TIMER_FREQ # define TIMER_FREQ 1193182 #endif handle_powerfail: { static unsigned lastalert = 0; if (time_uptime - lastalert > 10) { log(LOG_WARNING, "NMI: power fail\n"); sysbeep(TIMER_FREQ/880, hz); lastalert = time_uptime; } /* YYY mp count */ goto out2; } #else /* !POWERFAIL_NMI */ /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap (type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi == 0) goto out2; /* FALL THROUGH */ #endif /* POWERFAIL_NMI */ #endif /* NISA > 0 */ } MAKEMPSAFE(have_mplock); trap_fatal(frame, eva); goto out2; } /* * Virtual kernel intercept - if the fault is directly related to a * VM context managed by a virtual kernel then let the virtual kernel * handle it. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { vkernel_trap(lp, frame); goto out; } /* Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); MAKEMPSAFE(have_mplock); trapsignal(lp, i, ucode); #ifdef DEBUG if (type <= MAX_TRAP_MSG) { uprintf("fatal process exception: %s", trap_msg[type]); if ((type == T_PAGEFLT) || (type == T_PROTFLT)) uprintf(", fault VA = 0x%lx", (u_long)eva); uprintf("\n"); } #endif out: userret(lp, frame, sticks); userexit(lp); out2: ; if (have_mplock) rel_mplock(); if (p != NULL && lp != NULL) KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("trap: critical section count mismatch! %d/%d", crit_count, td->td_pri)); KASSERT(curstop == td->td_toks_stop, ("trap: extra tokens held after trap! %zd/%zd", curstop - &td->td_toks_base, td->td_toks_stop - &td->td_toks_base)); #endif }
static ACPI_STATUS EcWaitEvent(struct acpi_ec_softc *sc, EC_EVENT Event, u_int gen_count) { static int no_intr = 0; ACPI_STATUS Status; int count, i, need_poll, slp_ival; ACPI_SERIAL_ASSERT(ec); Status = AE_NO_HARDWARE_RESPONSE; need_poll = cold || rebooting || ec_polled_mode || sc->ec_suspending; /* Wait for event by polling or GPE (interrupt). */ if (need_poll) { count = (ec_timeout * 1000) / EC_POLL_DELAY; if (count == 0) count = 1; DELAY(10); for (i = 0; i < count; i++) { Status = EcCheckStatus(sc, "poll", Event); if (ACPI_SUCCESS(Status)) break; DELAY(EC_POLL_DELAY); } } else { slp_ival = hz / 1000; if (slp_ival != 0) { count = ec_timeout; } else { /* hz has less than 1 ms resolution so scale timeout. */ slp_ival = 1; count = ec_timeout / (1000 / hz); } /* * Wait for the GPE to signal the status changed, checking the * status register each time we get one. It's possible to get a * GPE for an event we're not interested in here (i.e., SCI for * EC query). */ for (i = 0; i < count; i++) { if (gen_count == sc->ec_gencount) tsleep(sc, 0, "ecgpe", slp_ival); /* * Record new generation count. It's possible the GPE was * just to notify us that a query is needed and we need to * wait for a second GPE to signal the completion of the * event we are actually waiting for. */ Status = EcCheckStatus(sc, "sleep", Event); if (ACPI_SUCCESS(Status)) { if (gen_count == sc->ec_gencount) no_intr++; else no_intr = 0; break; } gen_count = sc->ec_gencount; } /* * We finished waiting for the GPE and it never arrived. Try to * read the register once and trust whatever value we got. This is * the best we can do at this point. */ if (ACPI_FAILURE(Status)) Status = EcCheckStatus(sc, "sleep_end", Event); } if (!need_poll && no_intr > 10) { device_printf(sc->ec_dev, "not getting interrupts, switched to polled mode\n"); ec_polled_mode = 1; } if (ACPI_FAILURE(Status)) KTR_LOG(acpi_ec_timeout); return (Status); }
static ACPI_STATUS EcSpaceHandler(UINT32 Function, ACPI_PHYSICAL_ADDRESS Address, UINT32 Width, UINT64 *Value, void *Context, void *RegionContext) { struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context; ACPI_PHYSICAL_ADDRESS EcAddr; UINT8 *EcData; ACPI_STATUS Status; ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, (UINT32)Address); if (Function != ACPI_READ && Function != ACPI_WRITE) return_ACPI_STATUS (AE_BAD_PARAMETER); if (Width % 8 != 0 || Value == NULL || Context == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); if (Address + Width / 8 > 256) return_ACPI_STATUS (AE_BAD_ADDRESS); /* * If booting, check if we need to run the query handler. If so, we * we call it directly here since our thread taskq is not active yet. */ if (cold || rebooting || sc->ec_suspending) { if ((EC_GET_CSR(sc) & EC_EVENT_SCI)) { KTR_LOG(acpi_ec_gperun); EcGpeQueryHandler(sc); } } /* Serialize with EcGpeQueryHandler() at transaction granularity. */ Status = EcLock(sc); if (ACPI_FAILURE(Status)) return_ACPI_STATUS (Status); /* If we can't start burst mode, continue anyway. */ Status = EcCommand(sc, EC_COMMAND_BURST_ENABLE); if (ACPI_SUCCESS(Status)) { if (EC_GET_DATA(sc) == EC_BURST_ACK) { KTR_LOG(acpi_ec_burstenl); sc->ec_burstactive = TRUE; } } /* Perform the transaction(s), based on Width. */ EcAddr = Address; EcData = (UINT8 *)Value; if (Function == ACPI_READ) *Value = 0; do { switch (Function) { case ACPI_READ: Status = EcRead(sc, EcAddr, EcData); break; case ACPI_WRITE: Status = EcWrite(sc, EcAddr, *EcData); break; } if (ACPI_FAILURE(Status)) break; EcAddr++; EcData++; } while (EcAddr < Address + Width / 8); if (sc->ec_burstactive) { sc->ec_burstactive = FALSE; if (ACPI_SUCCESS(EcCommand(sc, EC_COMMAND_BURST_DISABLE))) KTR_LOG(acpi_ec_burstdisok); } EcUnlock(sc); return_ACPI_STATUS (Status); }
static void EcGpeQueryHandler(void *Context) { struct acpi_ec_softc *sc = (struct acpi_ec_softc *)Context; UINT8 Data; ACPI_STATUS Status; int retry, sci_enqueued; char qxx[5]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); KASSERT(Context != NULL, ("EcGpeQueryHandler called with NULL")); /* Serialize user access with EcSpaceHandler(). */ Status = EcLock(sc); if (ACPI_FAILURE(Status)) { device_printf(sc->ec_dev, "GpeQuery lock error: %s\n", AcpiFormatException(Status)); return; } /* * Send a query command to the EC to find out which _Qxx call it * wants to make. This command clears the SCI bit and also the * interrupt source since we are edge-triggered. To prevent the GPE * that may arise from running the query from causing another query * to be queued, we clear the pending flag only after running it. */ sci_enqueued = sc->ec_sci_pend; for (retry = 0; retry < 2; retry++) { Status = EcCommand(sc, EC_COMMAND_QUERY); if (ACPI_SUCCESS(Status)) break; if (ACPI_SUCCESS(EcCheckStatus(sc, "retr_check", EC_EVENT_INPUT_BUFFER_EMPTY))) continue; else break; } sc->ec_sci_pend = FALSE; if (ACPI_FAILURE(Status)) { EcUnlock(sc); device_printf(sc->ec_dev, "GPE query failed: %s\n", AcpiFormatException(Status)); return; } Data = EC_GET_DATA(sc); /* * We have to unlock before running the _Qxx method below since that * method may attempt to read/write from EC address space, causing * recursive acquisition of the lock. */ EcUnlock(sc); /* Ignore the value for "no outstanding event". (13.3.5) */ if (Data == 0) { KTR_LOG(acpi_ec_qryoknotrun, Data); return; } else { KTR_LOG(acpi_ec_qryokrun, Data); } /* Evaluate _Qxx to respond to the controller. */ ksnprintf(qxx, sizeof(qxx), "_Q%02X", Data); AcpiUtStrupr(qxx); Status = AcpiEvaluateObject(sc->ec_handle, qxx, NULL, NULL); if (ACPI_FAILURE(Status) && Status != AE_NOT_FOUND) { device_printf(sc->ec_dev, "evaluation of query method %s failed: %s\n", qxx, AcpiFormatException(Status)); } /* Reenable runtime GPE if its execution was deferred. */ if (sci_enqueued) { Status = AcpiFinishGpe(sc->ec_gpehandle, sc->ec_gpebit); if (ACPI_FAILURE(Status)) device_printf(sc->ec_dev, "reenabling runtime GPE failed: %s\n", AcpiFormatException(Status)); } }
/* * syscall2 - MP aware system call request C handler * * A system call is essentially treated as a trap except that the * MP lock is not held on entry or return. We are responsible for * obtaining the MP lock if necessary and for handling ASTs * (e.g. a task switch) prior to return. * * MPSAFE */ void syscall2(struct trapframe *frame) { struct thread *td = curthread; struct proc *p = td->td_proc; struct lwp *lp = td->td_lwp; struct sysent *callp; register_t orig_tf_rflags; int sticks; int error; int narg; #ifdef INVARIANTS int crit_count = td->td_critcount; #endif register_t *argp; u_int code; int regcnt, optimized_regcnt; union sysunion args; register_t *argsdst; mycpu->gd_cnt.v_syscall++; #ifdef DIAGNOSTIC if (ISPL(frame->tf_cs) != SEL_UPL) { panic("syscall"); /* NOT REACHED */ } #endif KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid, frame->tf_rax); userenter(td, p); /* lazy raise our priority */ regcnt = 6; optimized_regcnt = 6; /* * Misc */ sticks = (int)td->td_sticks; orig_tf_rflags = frame->tf_rflags; /* * Virtual kernel intercept - if a VM context managed by a virtual * kernel issues a system call the virtual kernel handles it, not us. * Restore the virtual kernel context and return from its system * call. The current frame is copied out to the virtual kernel. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { vkernel_trap(lp, frame); error = EJUSTRETURN; callp = NULL; code = 0; goto out; } /* * Get the system call parameters and account for time */ KASSERT(lp->lwp_md.md_regs == frame, ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); code = (u_int)frame->tf_rax; if (code == SYS_syscall || code == SYS___syscall) { code = frame->tf_rdi; regcnt--; argp = &frame->tf_rdi + 1; } else { argp = &frame->tf_rdi; } if (code >= p->p_sysent->sv_size) callp = &p->p_sysent->sv_table[0]; else callp = &p->p_sysent->sv_table[code]; narg = callp->sy_narg & SYF_ARGMASK; /* * On x86_64 we get up to six arguments in registers. The rest are * on the stack. The first six members of 'struct trapframe' happen * to be the registers used to pass arguments, in exactly the right * order. */ argsdst = (register_t *)(&args.nosys.sysmsg + 1); /* * Its easier to copy up to the highest number of syscall arguments * passed in registers, which is 6, than to conditionalize it. */ bcopy(argp, argsdst, sizeof(register_t) * optimized_regcnt); /* * Any arguments beyond available argument-passing registers must * be copyin()'d from the user stack. */ if (narg > regcnt) { caddr_t params; params = (caddr_t)frame->tf_rsp + sizeof(register_t); error = copyin(params, &argsdst[regcnt], (narg - regcnt) * sizeof(register_t)); if (error) { #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) { ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); } #endif goto bad; } } #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) { ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); } #endif /* * Default return value is 0 (will be copied to %rax). Double-value * returns use %rax and %rdx. %rdx is left unchanged for system * calls which return only one result. */ args.sysmsg_fds[0] = 0; args.sysmsg_fds[1] = frame->tf_rdx; /* * The syscall might manipulate the trap frame. If it does it * will probably return EJUSTRETURN. */ args.sysmsg_frame = frame; STOPEVENT(p, S_SCE, narg); /* MP aware */ /* * NOTE: All system calls run MPSAFE now. The system call itself * is responsible for getting the MP lock. */ #ifdef SYSCALL_DEBUG tsc_uclock_t tscval = rdtsc(); #endif error = (*callp->sy_call)(&args); #ifdef SYSCALL_DEBUG tscval = rdtsc() - tscval; tscval = tscval * 1000000 / tsc_frequency; if (SysCallsWorstCase[code] < tscval) SysCallsWorstCase[code] = tscval; #endif out: /* * MP SAFE (we may or may not have the MP lock at this point) */ //kprintf("SYSMSG %d ", error); switch (error) { case 0: /* * Reinitialize proc pointer `p' as it may be different * if this is a child returning from fork syscall. */ p = curproc; lp = curthread->td_lwp; frame->tf_rax = args.sysmsg_fds[0]; frame->tf_rdx = args.sysmsg_fds[1]; frame->tf_rflags &= ~PSL_C; break; case ERESTART: /* * Reconstruct pc, we know that 'syscall' is 2 bytes. * We have to do a full context restore so that %r10 * (which was holding the value of %rcx) is restored for * the next iteration. */ if (frame->tf_err != 0 && frame->tf_err != 2) kprintf("lp %s:%d frame->tf_err is weird %ld\n", td->td_comm, lp->lwp_proc->p_pid, frame->tf_err); frame->tf_rip -= frame->tf_err; frame->tf_r10 = frame->tf_rcx; break; case EJUSTRETURN: break; case EASYNC: panic("Unexpected EASYNC return value (for now)"); default: bad: if (p->p_sysent->sv_errsize) { if (error >= p->p_sysent->sv_errsize) error = -1; /* XXX */ else error = p->p_sysent->sv_errtbl[error]; } frame->tf_rax = error; frame->tf_rflags |= PSL_C; break; } /* * Traced syscall. trapsignal() should now be MP aware */ if (orig_tf_rflags & PSL_T) { frame->tf_rflags &= ~PSL_T; trapsignal(lp, SIGTRAP, TRAP_TRACE); } /* * Handle reschedule and other end-of-syscall issues */ userret(lp, frame, sticks); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) { ktrsysret(lp, code, error, args.sysmsg_result); } #endif /* * This works because errno is findable through the * register set. If we ever support an emulation where this * is not the case, this code will need to be revisited. */ STOPEVENT(p, S_SCX, code); userexit(lp); KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("syscall: critical section count mismatch! %d/%d", crit_count, td->td_pri)); KASSERT(&td->td_toks_base == td->td_toks_stop, ("syscall: %ld extra tokens held after trap! syscall %p", td->td_toks_stop - &td->td_toks_base, callp->sy_call)); #endif }
/* * syscall2 - MP aware system call request C handler * * A system call is essentially treated as a trap. The MP lock is not * held on entry or return. We are responsible for handling ASTs * (e.g. a task switch) prior to return. * * MPSAFE */ void syscall2(struct trapframe *frame) { struct thread *td = curthread; struct proc *p = td->td_proc; struct lwp *lp = td->td_lwp; caddr_t params; struct sysent *callp; register_t orig_tf_eflags; int sticks; int error; int narg; #ifdef INVARIANTS int crit_count = td->td_critcount; #endif int have_mplock = 0; u_int code; union sysunion args; #ifdef DIAGNOSTIC if (ISPL(frame->tf_cs) != SEL_UPL) { get_mplock(); panic("syscall"); /* NOT REACHED */ } #endif KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid, frame->tf_eax); userenter(td, p); /* lazy raise our priority */ /* * Misc */ sticks = (int)td->td_sticks; orig_tf_eflags = frame->tf_eflags; /* * Virtual kernel intercept - if a VM context managed by a virtual * kernel issues a system call the virtual kernel handles it, not us. * Restore the virtual kernel context and return from its system * call. The current frame is copied out to the virtual kernel. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { vkernel_trap(lp, frame); error = EJUSTRETURN; callp = NULL; goto out; } /* * Get the system call parameters and account for time */ lp->lwp_md.md_regs = frame; params = (caddr_t)frame->tf_esp + sizeof(int); code = frame->tf_eax; if (p->p_sysent->sv_prepsyscall) { (*p->p_sysent->sv_prepsyscall)( frame, (int *)(&args.nosys.sysmsg + 1), &code, ¶ms); } else { /* * Need to check if this is a 32 bit or 64 bit syscall. * fuword is MP aware. */ if (code == SYS_syscall) { /* * Code is first argument, followed by actual args. */ code = fuword(params); params += sizeof(int); } else if (code == SYS___syscall) { /* * Like syscall, but code is a quad, so as to maintain * quad alignment for the rest of the arguments. */ code = fuword(params); params += sizeof(quad_t); } } code &= p->p_sysent->sv_mask; if (code >= p->p_sysent->sv_size) callp = &p->p_sysent->sv_table[0]; else callp = &p->p_sysent->sv_table[code]; narg = callp->sy_narg & SYF_ARGMASK; #if 0 if (p->p_sysent->sv_name[0] == 'L') kprintf("Linux syscall, code = %d\n", code); #endif /* * copyin is MP aware, but the tracing code is not */ if (narg && params) { error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1), narg * sizeof(register_t)); if (error) { #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) { MAKEMPSAFE(have_mplock); ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); } #endif goto bad; } } #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) { MAKEMPSAFE(have_mplock); ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); } #endif /* * For traditional syscall code edx is left untouched when 32 bit * results are returned. Since edx is loaded from fds[1] when the * system call returns we pre-set it here. */ args.sysmsg_fds[0] = 0; args.sysmsg_fds[1] = frame->tf_edx; /* * The syscall might manipulate the trap frame. If it does it * will probably return EJUSTRETURN. */ args.sysmsg_frame = frame; STOPEVENT(p, S_SCE, narg); /* MP aware */ /* * NOTE: All system calls run MPSAFE now. The system call itself * is responsible for getting the MP lock. */ error = (*callp->sy_call)(&args); out: /* * MP SAFE (we may or may not have the MP lock at this point) */ switch (error) { case 0: /* * Reinitialize proc pointer `p' as it may be different * if this is a child returning from fork syscall. */ p = curproc; lp = curthread->td_lwp; frame->tf_eax = args.sysmsg_fds[0]; frame->tf_edx = args.sysmsg_fds[1]; frame->tf_eflags &= ~PSL_C; break; case ERESTART: /* * Reconstruct pc, assuming lcall $X,y is 7 bytes, * int 0x80 is 2 bytes. We saved this in tf_err. */ frame->tf_eip -= frame->tf_err; break; case EJUSTRETURN: break; case EASYNC: panic("Unexpected EASYNC return value (for now)"); default: bad: if (p->p_sysent->sv_errsize) { if (error >= p->p_sysent->sv_errsize) error = -1; /* XXX */ else error = p->p_sysent->sv_errtbl[error]; } frame->tf_eax = error; frame->tf_eflags |= PSL_C; break; } /* * Traced syscall. trapsignal() is not MP aware. */ if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) { MAKEMPSAFE(have_mplock); frame->tf_eflags &= ~PSL_T; trapsignal(lp, SIGTRAP, TRAP_TRACE); } /* * Handle reschedule and other end-of-syscall issues */ userret(lp, frame, sticks); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) { MAKEMPSAFE(have_mplock); ktrsysret(lp, code, error, args.sysmsg_result); } #endif /* * This works because errno is findable through the * register set. If we ever support an emulation where this * is not the case, this code will need to be revisited. */ STOPEVENT(p, S_SCX, code); userexit(lp); /* * Release the MP lock if we had to get it */ if (have_mplock) rel_mplock(); KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("syscall: critical section count mismatch! %d/%d", crit_count, td->td_pri)); KASSERT(&td->td_toks_base == td->td_toks_stop, ("syscall: extra tokens held after trap! %zd", td->td_toks_stop - &td->td_toks_base)); #endif }
/* * Exception, fault, and trap interface to the kernel. * This common code is called from assembly language IDT gate entry * routines that prepare a suitable stack frame, and restore this * frame after the exception has been processed. * * This function is also called from doreti in an interlock to handle ASTs. * For example: hardwareint->INTROUTINE->(set ast)->doreti->trap * * NOTE! We have to retrieve the fault address prior to potentially * blocking, including blocking on any token. * * NOTE! NMI and kernel DBG traps remain on their respective pcpu IST * stacks if taken from a kernel RPL. trap() cannot block in this * situation. DDB entry or a direct report-and-return is ok. * * XXX gd_trap_nesting_level currently prevents lwkt_switch() from panicing * if an attempt is made to switch from a fast interrupt or IPI. */ void trap(struct trapframe *frame) { static struct krate sscpubugrate = { 1 }; struct globaldata *gd = mycpu; struct thread *td = gd->gd_curthread; struct lwp *lp = td->td_lwp; struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; #endif vm_offset_t eva; p = td->td_proc; clear_quickret(); #ifdef DDB /* * We need to allow T_DNA faults when the debugger is active since * some dumping paths do large bcopy() which use the floating * point registers for faster copying. */ if (db_active && frame->tf_trapno != T_DNA) { eva = (frame->tf_trapno == T_PAGEFLT ? frame->tf_addr : 0); ++gd->gd_trap_nesting_level; trap_fatal(frame, eva); --gd->gd_trap_nesting_level; goto out2; } #endif eva = 0; if ((frame->tf_rflags & PSL_I) == 0) { /* * Buggy application or kernel code has disabled interrupts * and then trapped. Enabling interrupts now is wrong, but * it is better than running with interrupts disabled until * they are accidentally enabled later. */ type = frame->tf_trapno; if (ISPL(frame->tf_cs) == SEL_UPL) { /* JG curproc can be NULL */ kprintf( "pid %ld (%s): trap %d with interrupts disabled\n", (long)curproc->p_pid, curproc->p_comm, type); } else if ((type == T_STKFLT || type == T_PROTFLT || type == T_SEGNPFLT) && frame->tf_rip == (long)doreti_iret) { /* * iretq fault from kernel mode during return to * userland. * * This situation is expected, don't complain. */ } else if (type != T_NMI && type != T_BPTFLT && type != T_TRCTRAP) { /* * XXX not quite right, since this may be for a * multiple fault in user mode. */ kprintf("kernel trap %d (%s @ 0x%016jx) with " "interrupts disabled\n", type, td->td_comm, frame->tf_rip); } cpu_enable_intr(); } type = frame->tf_trapno; code = frame->tf_err; if (ISPL(frame->tf_cs) == SEL_UPL) { /* user trap */ KTR_LOG(kernentry_trap, p->p_pid, lp->lwp_tid, frame->tf_trapno, eva); userenter(td, p); sticks = (int)td->td_sticks; KASSERT(lp->lwp_md.md_regs == frame, ("Frame mismatch %p %p", lp->lwp_md.md_regs, frame)); switch (type) { case T_PRIVINFLT: /* privileged instruction fault */ i = SIGILL; ucode = ILL_PRVOPC; break; case T_BPTFLT: /* bpt instruction fault */ case T_TRCTRAP: /* trace trap */ frame->tf_rflags &= ~PSL_T; i = SIGTRAP; ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); break; case T_ARITHTRAP: /* arithmetic trap */ ucode = code; i = SIGFPE; break; case T_ASTFLT: /* Allow process switch */ mycpu->gd_cnt.v_soft++; if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { atomic_clear_int(&mycpu->gd_reqflags, RQF_AST_OWEUPC); addupc_task(p, p->p_prof.pr_addr, p->p_prof.pr_ticks); } goto out; case T_PROTFLT: /* general protection fault */ i = SIGBUS; ucode = BUS_OBJERR; break; case T_STKFLT: /* stack fault */ case T_SEGNPFLT: /* segment not present fault */ i = SIGBUS; ucode = BUS_ADRERR; break; case T_TSSFLT: /* invalid TSS fault */ case T_DOUBLEFLT: /* double fault */ default: i = SIGBUS; ucode = BUS_OBJERR; break; case T_PAGEFLT: /* page fault */ i = trap_pfault(frame, TRUE); #ifdef DDB if (frame->tf_rip == 0) { /* used for kernel debugging only */ while (freeze_on_seg_fault) tsleep(p, 0, "freeze", hz * 20); } #endif if (i == -1 || i == 0) goto out; if (i == SIGSEGV) { ucode = SEGV_MAPERR; } else { i = SIGSEGV; ucode = SEGV_ACCERR; } break; case T_DIVIDE: /* integer divide fault */ ucode = FPE_INTDIV; i = SIGFPE; break; #if NISA > 0 case T_NMI: /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi) panic("NMI indicates hardware failure"); break; #endif /* NISA > 0 */ case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; i = SIGFPE; break; case T_BOUND: /* bounds check fault */ ucode = FPE_FLTSUB; i = SIGFPE; break; case T_DNA: /* * Virtual kernel intercept - pass the DNA exception * to the virtual kernel if it asked to handle it. * This occurs when the virtual kernel is holding * onto the FP context for a different emulated * process then the one currently running. * * We must still call npxdna() since we may have * saved FP state that the virtual kernel needs * to hand over to a different emulated process. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve && (td->td_pcb->pcb_flags & FP_VIRTFP) ) { npxdna(); break; } /* * The kernel may have switched out the FP unit's * state, causing the user process to take a fault * when it tries to use the FP unit. Restore the * state here */ if (npxdna()) { gd->gd_cnt.v_trap++; goto out; } i = SIGFPE; ucode = FPE_FPU_NP_TRAP; break; case T_FPOPFLT: /* FPU operand fetch fault */ ucode = ILL_COPROC; i = SIGILL; break; case T_XMMFLT: /* SIMD floating-point exception */ ucode = 0; /* XXX */ i = SIGFPE; break; } } else { /* kernel trap */ switch (type) { case T_PAGEFLT: /* page fault */ trap_pfault(frame, FALSE); goto out2; case T_DNA: /* * The kernel is apparently using fpu for copying. * XXX this should be fatal unless the kernel has * registered such use. */ if (npxdna()) { gd->gd_cnt.v_trap++; goto out2; } break; case T_STKFLT: /* stack fault */ case T_PROTFLT: /* general protection fault */ case T_SEGNPFLT: /* segment not present fault */ /* * Invalid segment selectors and out of bounds * %rip's and %rsp's can be set up in user mode. * This causes a fault in kernel mode when the * kernel tries to return to user mode. We want * to get this fault so that we can fix the * problem here and not have to check all the * selectors and pointers when the user changes * them. */ if (mycpu->gd_intr_nesting_level == 0) { /* * NOTE: in 64-bit mode traps push rsp/ss * even if no ring change occurs. */ if (td->td_pcb->pcb_onfault && td->td_pcb->pcb_onfault_sp == frame->tf_rsp) { frame->tf_rip = (register_t) td->td_pcb->pcb_onfault; goto out2; } /* * If the iretq in doreti faults during * return to user, it will be special-cased * in IDTVEC(prot) to get here. We want * to 'return' to doreti_iret_fault in * ipl.s in approximately the same state we * were in at the iretq. */ if (frame->tf_rip == (long)doreti_iret) { frame->tf_rip = (long)doreti_iret_fault; goto out2; } } break; case T_TSSFLT: /* * PSL_NT can be set in user mode and isn't cleared * automatically when the kernel is entered. This * causes a TSS fault when the kernel attempts to * `iret' because the TSS link is uninitialized. We * want to get this fault so that we can fix the * problem here and not every time the kernel is * entered. */ if (frame->tf_rflags & PSL_NT) { frame->tf_rflags &= ~PSL_NT; #if 0 /* do we need this? */ if (frame->tf_rip == (long)doreti_iret) frame->tf_rip = (long)doreti_iret_fault; #endif goto out2; } break; case T_TRCTRAP: /* trace trap */ /* * Detect historical CPU artifact on syscall or int $3 * entry (if not shortcutted in exception.s via * DIRECT_DISALLOW_SS_CPUBUG). */ gd->gd_cnt.v_trap++; if (frame->tf_rip == (register_t)IDTVEC(fast_syscall)) { krateprintf(&sscpubugrate, "Caught #DB at syscall cpu artifact\n"); goto out2; } if (frame->tf_rip == (register_t)IDTVEC(bpt)) { krateprintf(&sscpubugrate, "Caught #DB at int $N cpu artifact\n"); goto out2; } /* * Ignore debug register trace traps due to * accesses in the user's address space, which * can happen under several conditions such as * if a user sets a watchpoint on a buffer and * then passes that buffer to a system call. * We still want to get TRCTRAPS for addresses * in kernel space because that is useful when * debugging the kernel. */ if (user_dbreg_trap()) { /* * Reset breakpoint bits because the * processor doesn't */ load_dr6(rdr6() & ~0xf); goto out2; } /* * FALLTHROUGH (TRCTRAP kernel mode, kernel address) */ case T_BPTFLT: /* * If DDB is enabled, let it handle the debugger trap. * Otherwise, debugger traps "can't happen". */ ucode = TRAP_BRKPT; #ifdef DDB if (kdb_trap(type, 0, frame)) goto out2; #endif break; #if NISA > 0 case T_NMI: /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap(type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi == 0) goto out2; /* FALL THROUGH */ #endif /* NISA > 0 */ } trap_fatal(frame, 0); goto out2; } /* * Fault from user mode, virtual kernel interecept. * * If the fault is directly related to a VM context managed by a * virtual kernel then let the virtual kernel handle it. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { vkernel_trap(lp, frame); goto out; } /* Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); gd->gd_cnt.v_trap++; trapsignal(lp, i, ucode); #ifdef DEBUG if (type <= MAX_TRAP_MSG) { uprintf("fatal process exception: %s", trap_msg[type]); if ((type == T_PAGEFLT) || (type == T_PROTFLT)) uprintf(", fault VA = 0x%lx", frame->tf_addr); uprintf("\n"); } #endif out: userret(lp, frame, sticks); userexit(lp); out2: ; if (p != NULL && lp != NULL) KTR_LOG(kernentry_trap_ret, p->p_pid, lp->lwp_tid); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("trap: critical section count mismatch! %d/%d", crit_count, td->td_pri)); KASSERT(curstop == td->td_toks_stop, ("trap: extra tokens held after trap! %ld/%ld", curstop - &td->td_toks_base, td->td_toks_stop - &td->td_toks_base)); #endif }
void user_trap(struct trapframe *frame) { struct globaldata *gd = mycpu; struct thread *td = gd->gd_curthread; struct lwp *lp = td->td_lwp; struct proc *p; int sticks = 0; int i = 0, ucode = 0, type, code; int have_mplock = 0; #ifdef INVARIANTS int crit_count = td->td_critcount; lwkt_tokref_t curstop = td->td_toks_stop; #endif vm_offset_t eva; p = td->td_proc; /* * This is a bad kludge to avoid changing the various trapframe * structures. Because we are enabled as a virtual kernel, * the original tf_err field will be passed to us shifted 16 * over in the tf_trapno field for T_PAGEFLT. */ if (frame->tf_trapno == T_PAGEFLT) eva = frame->tf_err; else eva = 0; #if 0 kprintf("USER_TRAP AT %08x xflags %d trapno %d eva %08x\n", frame->tf_eip, frame->tf_xflags, frame->tf_trapno, eva); #endif /* * Everything coming from user mode runs through user_trap, * including system calls. */ if (frame->tf_trapno == T_SYSCALL80) { syscall2(frame); return; } KTR_LOG(kernentry_trap, lp->lwp_proc->p_pid, lp->lwp_tid, frame->tf_trapno, eva); #ifdef DDB if (db_active) { eva = (frame->tf_trapno == T_PAGEFLT ? rcr2() : 0); ++gd->gd_trap_nesting_level; MAKEMPSAFE(have_mplock); trap_fatal(frame, TRUE, eva); --gd->gd_trap_nesting_level; goto out2; } #endif #if defined(I586_CPU) && !defined(NO_F00F_HACK) restart: #endif type = frame->tf_trapno; code = frame->tf_err; userenter(td, p); sticks = (int)td->td_sticks; lp->lwp_md.md_regs = frame; switch (type) { case T_PRIVINFLT: /* privileged instruction fault */ i = SIGILL; ucode = ILL_PRVOPC; break; case T_BPTFLT: /* bpt instruction fault */ case T_TRCTRAP: /* trace trap */ frame->tf_eflags &= ~PSL_T; i = SIGTRAP; ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT); break; case T_ARITHTRAP: /* arithmetic trap */ ucode = code; i = SIGFPE; break; case T_ASTFLT: /* Allow process switch */ mycpu->gd_cnt.v_soft++; if (mycpu->gd_reqflags & RQF_AST_OWEUPC) { atomic_clear_int(&mycpu->gd_reqflags, RQF_AST_OWEUPC); addupc_task(p, p->p_prof.pr_addr, p->p_prof.pr_ticks); } goto out; /* * The following two traps can happen in * vm86 mode, and, if so, we want to handle * them specially. */ case T_PROTFLT: /* general protection fault */ case T_STKFLT: /* stack fault */ #if 0 if (frame->tf_eflags & PSL_VM) { i = vm86_emulate((struct vm86frame *)frame); if (i == 0) goto out; break; } #endif i = SIGBUS; ucode = (type == T_PROTFLT) ? BUS_OBJERR : BUS_ADRERR; break; case T_SEGNPFLT: /* segment not present fault */ i = SIGBUS; ucode = BUS_ADRERR; break; case T_TSSFLT: /* invalid TSS fault */ case T_DOUBLEFLT: /* double fault */ default: i = SIGBUS; ucode = BUS_OBJERR; break; case T_PAGEFLT: /* page fault */ MAKEMPSAFE(have_mplock); i = trap_pfault(frame, TRUE, eva); if (i == -1) goto out; #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) goto restart; #endif if (i == 0) goto out; if (i == SIGSEGV) ucode = SEGV_MAPERR; else { i = SIGSEGV; ucode = SEGV_ACCERR; } break; case T_DIVIDE: /* integer divide fault */ ucode = FPE_INTDIV; i = SIGFPE; break; #if NISA > 0 case T_NMI: MAKEMPSAFE(have_mplock); /* machine/parity/power fail/"kitchen sink" faults */ if (isa_nmi(code) == 0) { #ifdef DDB /* * NMI can be hooked up to a pushbutton * for debugging. */ if (ddb_on_nmi) { kprintf ("NMI ... going to debugger\n"); kdb_trap (type, 0, frame); } #endif /* DDB */ goto out2; } else if (panic_on_nmi) panic("NMI indicates hardware failure"); break; #endif /* NISA > 0 */ case T_OFLOW: /* integer overflow fault */ ucode = FPE_INTOVF; i = SIGFPE; break; case T_BOUND: /* bounds check fault */ ucode = FPE_FLTSUB; i = SIGFPE; break; case T_DNA: /* * Virtual kernel intercept - pass the DNA exception * to the (emulated) virtual kernel if it asked to handle * it. This occurs when the virtual kernel is holding * onto the FP context for a different emulated * process then the one currently running. * * We must still call npxdna() since we may have * saved FP state that the (emulated) virtual kernel * needs to hand over to a different emulated process. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve && (td->td_pcb->pcb_flags & FP_VIRTFP) ) { npxdna(frame); break; } #if NNPX > 0 /* * The kernel may have switched out the FP unit's * state, causing the user process to take a fault * when it tries to use the FP unit. Restore the * state here */ if (npxdna(frame)) goto out; #endif if (!pmath_emulate) { i = SIGFPE; ucode = FPE_FPU_NP_TRAP; break; } i = (*pmath_emulate)(frame); if (i == 0) { if (!(frame->tf_eflags & PSL_T)) goto out2; frame->tf_eflags &= ~PSL_T; i = SIGTRAP; } /* else ucode = emulator_only_knows() XXX */ break; case T_FPOPFLT: /* FPU operand fetch fault */ ucode = ILL_COPROC; i = SIGILL; break; case T_XMMFLT: /* SIMD floating-point exception */ ucode = 0; /* XXX */ i = SIGFPE; break; } /* * Virtual kernel intercept - if the fault is directly related to a * VM context managed by a virtual kernel then let the virtual kernel * handle it. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { vkernel_trap(lp, frame); goto out; } /* * Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); MAKEMPSAFE(have_mplock); trapsignal(lp, i, ucode); #ifdef DEBUG if (type <= MAX_TRAP_MSG) { uprintf("fatal process exception: %s", trap_msg[type]); if ((type == T_PAGEFLT) || (type == T_PROTFLT)) uprintf(", fault VA = 0x%lx", (u_long)eva); uprintf("\n"); } #endif out: userret(lp, frame, sticks); userexit(lp); out2: ; if (have_mplock) rel_mplock(); KTR_LOG(kernentry_trap_ret, lp->lwp_proc->p_pid, lp->lwp_tid); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("trap: critical section count mismatch! %d/%d", crit_count, td->td_pri)); KASSERT(curstop == td->td_toks_stop, ("trap: extra tokens held after trap! %zd/%zd", curstop - &td->td_toks_base, td->td_toks_stop - &td->td_toks_base)); #endif }