static void user_backtrace_raw(u_int pc, u_int fp) { int frame_number; int arg_number; for (frame_number = 0; frame_number < 100 && pc > HPPA_PC_PRIV_MASK && fp; frame_number++) { printf("%3d: pc=%08x%s fp=0x%08x", frame_number, pc & ~HPPA_PC_PRIV_MASK, USERMODE(pc) ? "" : "**", fp); for(arg_number = 0; arg_number < 4; arg_number++) printf(" arg%d=0x%08x", arg_number, (int) fuword(HPPA_FRAME_CARG(arg_number, fp))); printf("\n"); pc = fuword(((register_t *) fp) - 5); /* fetch rp */ if (pc == -1) { printf(" fuword for pc failed\n"); break; } fp = fuword(((register_t *) fp) + 0); /* fetch previous fp */ if (fp == -1) { printf(" fuword for fp failed\n"); break; } } printf(" backtrace stopped with pc %08x fp 0x%08x\n", pc, fp); }
int pmc_save_user_callchain(uintptr_t *cc, int maxsamples, struct trapframe *tf) { uintptr_t *osp, *sp; int frames = 0; cc[frames++] = PMC_TRAPFRAME_TO_PC(tf); sp = (uintptr_t *)PMC_TRAPFRAME_TO_FP(tf); osp = NULL; for (; frames < maxsamples; frames++) { if (sp <= osp) break; osp = sp; #ifdef __powerpc64__ /* Check if 32-bit mode. */ if (!(tf->srr1 & PSL_SF)) { cc[frames] = fuword32((uint32_t *)sp + 1); sp = (uintptr_t *)(uintptr_t)fuword32(sp); } else { cc[frames] = fuword(sp + 2); sp = (uintptr_t *)fuword(sp); } #else cc[frames] = fuword32((uint32_t *)sp + 1); sp = (uintptr_t *)fuword32(sp); #endif } return (frames); }
/* * Clear registers on exec */ void setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) { struct trapframe *tf; l->l_md.md_flags &= ~(MDL_USEDFPU | MDL_SSTEP); tf = l->l_md.md_regs; tf->tf_ssr = PSL_USERSET; tf->tf_spc = pack->ep_entry; tf->tf_pr = 0; tf->tf_gbr = 0; tf->tf_macl = 0; tf->tf_mach = 0; tf->tf_r0 = 0; tf->tf_r1 = 0; tf->tf_r2 = 0; tf->tf_r3 = 0; tf->tf_r4 = fuword((void *)stack); /* argc */ tf->tf_r5 = stack + 4; /* argv */ tf->tf_r6 = stack + 4 * tf->tf_r4 + 8; /* envp */ tf->tf_r7 = 0; tf->tf_r8 = 0; tf->tf_r9 = l->l_proc->p_psstrp; tf->tf_r10 = 0; tf->tf_r11 = 0; tf->tf_r12 = 0; tf->tf_r13 = 0; tf->tf_r14 = 0; tf->tf_r15 = stack; }
int pmc_save_user_callchain(uintptr_t *cc, int maxsamples, struct trapframe *tf) { uintptr_t *sp; int frames = 0; cc[frames++] = PMC_TRAPFRAME_TO_PC(tf); sp = (uintptr_t *)PMC_TRAPFRAME_TO_FP(tf); for (frames = 1; frames < maxsamples; frames++) { if (!INUSER(sp)) break; cc[frames++] = fuword(sp + 1); sp = (uintptr_t *)fuword(sp); } return (frames); }
void cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct ia64_fdesc *fd; struct trapframe *tf; uint64_t ndirty, sp; tf = td->td_frame; ndirty = tf->tf_special.ndirty + (tf->tf_special.bspstore & 0x1ffUL); KASSERT((ndirty & ~PAGE_MASK) == 0, ("Whoa there! We have more than 8KB of dirty registers!")); fd = (struct ia64_fdesc *)entry; sp = (uint64_t)stack->ss_sp; bzero(&tf->tf_special, sizeof(tf->tf_special)); tf->tf_special.iip = fuword(&fd->func); tf->tf_special.gp = fuword(&fd->gp); tf->tf_special.sp = (sp + stack->ss_size - 16) & ~15; tf->tf_special.rsc = 0xf; tf->tf_special.fpsr = IA64_FPSR_DEFAULT; tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN | IA64_PSR_CPL_USER; if (tf->tf_flags & FRAME_SYSCALL) { tf->tf_special.cfm = (3UL<<62) | (1UL<<7) | 1UL; tf->tf_special.bspstore = sp + 8; suword((caddr_t)sp, (uint64_t)arg); } else { tf->tf_special.cfm = (1UL<<63) | (1UL<<7) | 1UL; tf->tf_special.bspstore = sp; tf->tf_special.ndirty = 8; sp = td->td_kstack + ndirty - 8; if ((sp & 0x1ff) == 0x1f8) { *(uint64_t*)sp = 0; tf->tf_special.ndirty += 8; sp -= 8; } *(uint64_t*)sp = (uint64_t)arg; } }
void unix_syscall_return(int error) { thread_act_t thread; volatile int *rval; struct i386_saved_state *regs; struct proc *p; struct proc *current_proc(); unsigned short code; vm_offset_t params; struct sysent *callp; extern int nsysent; thread = current_act(); rval = (int *)get_bsduthreadrval(thread); p = current_proc(); regs = USER_REGS(thread); /* reconstruct code for tracing before blasting eax */ code = regs->eax; params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int)); callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; if (callp == sysent) { code = fuword(params); } if (error == ERESTART) { regs->eip -= 7; } else if (error != EJUSTRETURN) { if (error) { regs->eax = error; regs->efl |= EFL_CF; /* carry bit */ } else { /* (not error) */ regs->eax = rval[0]; regs->edx = rval[1]; regs->efl &= ~EFL_CF; } } ktrsysret(p, code, error, rval[0], callp->sy_funnel); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, error, rval[0], rval[1], 0, 0); if (callp->sy_funnel != NO_FUNNEL) (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); thread_exception_return(); /* NOTREACHED */ }
static void user_backtrace(struct trapframe *tf, struct lwp *l, int type) { struct proc *p = l->l_proc; u_int pc, fp, inst; /* * Display any trap type that we have. */ if (type >= 0) printf("pid %d (%s) trap #%d\n", p->p_pid, p->p_comm, type & ~T_USER); /* * Assuming that the frame pointer in r3 is valid, * dump out a stack trace. */ fp = tf->tf_r3; printf("pid %d (%s) backtrace, starting with fp 0x%08x\n", p->p_pid, p->p_comm, fp); user_backtrace_raw(tf->tf_iioq_head, fp); /* * In case the frame pointer in r3 is not valid, * assuming the stack pointer is valid and the * faulting function is a non-leaf, if we can * find its prologue we can recover its frame * pointer. */ pc = tf->tf_iioq_head; fp = tf->tf_sp - HPPA_FRAME_SIZE; printf("pid %d (%s) backtrace, starting with sp 0x%08x pc 0x%08x\n", p->p_pid, p->p_comm, tf->tf_sp, pc); for(pc &= ~HPPA_PC_PRIV_MASK; pc > 0; pc -= sizeof(inst)) { inst = fuword((register_t *) pc); if (inst == -1) { printf(" fuword for inst at pc %08x failed\n", pc); break; } /* Check for the prologue instruction that sets sp. */ if (STWM_R1_D_SR0_SP(inst)) { fp = tf->tf_sp - STWM_R1_D_SR0_SP(inst); printf(" sp from fp at pc %08x: %08x\n", pc, inst); break; } } user_backtrace_raw(tf->tf_iioq_head, fp); }
/* * Disassemble instruction at 'loc'. 'altfmt' specifies an * (optional) alternate format (altfmt for vax: don't assume * that each external label is a procedure entry mask). * Return address of start of next instruction. * Since this function is used by 'examine' and by 'step' * "next instruction" does NOT mean the next instruction to * be executed but the 'linear' next instruction. */ db_addr_t db_disasm(db_addr_t loc, bool altfmt) { u_int32_t instr; /* * Take some care with addresses to not UTLB here as it * loses the current debugging context. KSEG2 not checked. */ if (loc < MIPS_KSEG0_START) { instr = fuword((void *)loc); if (instr == 0xffffffff) { /* "sd ra, -1(ra)" is unlikely */ db_printf("invalid address.\n"); return loc; } } else { instr = *(u_int32_t *)loc; } return (db_disasm_insn(instr, loc, altfmt)); }
int compat_16_sys___sigreturn14(struct lwp *l, void *v, register_t *retval) { struct compat_16_sys___sigreturn14_args /* { syscallarg(struct sigcontext *) sigcntxp; } */ *uap = v; struct proc *p = l->l_proc; struct sigcontext *scp; struct trapframe *tf; struct sigcontext tsigc; struct sigstate tstate; int rf, flags; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp); #endif if ((int)scp & 3) return (EINVAL); if (copyin(scp, &tsigc, sizeof(tsigc)) != 0) return (EFAULT); scp = &tsigc; /* Make sure the user isn't pulling a fast one on us! */ /* XXX fredette - until this is done, huge security hole here. */ /* XXX fredette - requiring that PSL_R be zero will hurt debuggers. */ #define PSW_MBS (PSW_C|PSW_Q|PSW_P|PSW_D|PSW_I) #define PSW_MBZ (PSW_Y|PSW_Z|PSW_S|PSW_X|PSW_M|PSW_R) if ((scp->sc_ps & (PSW_MBS|PSW_MBZ)) != PSW_MBS) return (EINVAL); /* Restore register context. */ tf = (struct trapframe *)l->l_md.md_regs; /* * Grab pointer to hardware state information. * If zero, the user is probably doing a longjmp. */ if ((rf = scp->sc_ap) == 0) goto restore; /* * See if there is anything to do before we go to the * expense of copying in the trapframe */ flags = fuword((caddr_t)rf); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn(%d): sc_ap %x flags %x\n", p->p_pid, rf, flags); #endif /* fuword failed (bogus sc_ap value). */ if (flags == -1) return (EINVAL); if (flags == 0 || copyin((caddr_t)rf, &tstate, sizeof(tstate)) != 0) goto restore; #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sigreturn(%d): ssp %p usp %x scp %p\n", p->p_pid, &flags, scp->sc_sp, SCARG(uap, sigcntxp)); #endif /* * Restore most of the users registers except for those * in the sigcontext; they will be handled below. */ if (flags & SS_USERREGS) { /* * There are more registers that the user can tell * us to bash than registers that, for security * or other reasons, we must protect. So it's * easier (but not faster), to copy these sensitive * register values into the user-provided frame, * then bulk-copy the user-provided frame into * the process' frame. */ #define SIG_PROTECT(r) tstate.ss_frame.r = tf->r /* SRs 5,6,7 must be protected. */ SIG_PROTECT(tf_sr5); SIG_PROTECT(tf_sr6); SIG_PROTECT(tf_sr7); /* all CRs except CR11 must be protected. */ SIG_PROTECT(tf_rctr); /* CR0 */ /* CRs 1-8 are reserved */ SIG_PROTECT(tf_pidr1); /* CR8 */ SIG_PROTECT(tf_pidr2); /* CR9 */ SIG_PROTECT(tf_ccr); /* CR10 */ SIG_PROTECT(tf_pidr3); /* CR12 */ SIG_PROTECT(tf_pidr4); /* CR14 */ SIG_PROTECT(tf_eiem); /* CR15 */ /* CR17 is the IISQ head */ /* CR18 is the IIOQ head */ SIG_PROTECT(tf_iir); /* CR19 */ SIG_PROTECT(tf_isr); /* CR20 */ SIG_PROTECT(tf_ior); /* CR21 */ /* CR22 is the IPSW */ SIG_PROTECT(tf_eirr); /* CR23 */ SIG_PROTECT(tf_hptm); /* CR24 */ SIG_PROTECT(tf_vtop); /* CR25 */ /* XXX where are CR26, CR27, CR29, CR31? */ SIG_PROTECT(tf_cr28); /* CR28 */ SIG_PROTECT(tf_cr30); /* CR30 */ #undef SIG_PROTECT /* The bulk copy. */ *tf = tstate.ss_frame; } /* * Restore the original FP context */ /* XXX fredette */ restore: /* * Restore the user supplied information. * This should be at the last so that the error (EINVAL) * is reported to the sigreturn caller, not to the * jump destination. */ tf->tf_sp = scp->sc_sp; /* XXX should we be doing the space registers? */ tf->tf_iisq_head = scp->sc_pcsqh; tf->tf_iioq_head = scp->sc_pcoqh | HPPA_PC_PRIV_USER; tf->tf_iisq_tail = scp->sc_pcsqt; tf->tf_iioq_tail = scp->sc_pcoqt | HPPA_PC_PRIV_USER; tf->tf_ipsw = scp->sc_ps; /* Restore signal stack. */ if (scp->sc_onstack & SS_ONSTACK) p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(p, SIG_SETMASK, &scp->sc_mask, 0); #ifdef DEBUG #if 0 /* XXX FP state */ if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate) printf("sigreturn(%d): copied in FP state (%x) at %p\n", p->p_pid, *(u_int *)&tstate.ss_fpstate, &tstate.ss_fpstate); #endif if ((sigdebug & SDB_FOLLOW) || ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)) printf("sigreturn(%d): returns\n", p->p_pid); #endif return (EJUSTRETURN); }
void unix_syscall(struct i386_saved_state *regs) { thread_act_t thread; void *vt; unsigned short code; struct sysent *callp; int nargs, error; volatile int *rval; int funnel_type; vm_offset_t params; extern int nsysent; struct proc *p; struct proc *current_proc(); thread = current_act(); p = current_proc(); rval = (int *)get_bsduthreadrval(thread); //printf("[scall : eax %x]", regs->eax); code = regs->eax; params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int)); callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; if (callp == sysent) { code = fuword(params); params += sizeof (int); callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; } vt = get_bsduthreadarg(thread); if ((nargs = (callp->sy_narg * sizeof (int))) && (error = copyin((char *) params, (char *)vt , nargs)) != 0) { regs->eax = error; regs->efl |= EFL_CF; thread_exception_return(); /* NOTREACHED */ } rval[0] = 0; rval[1] = regs->edx; funnel_type = callp->sy_funnel; if(funnel_type == KERNEL_FUNNEL) (void) thread_funnel_set(kernel_flock, TRUE); else if (funnel_type == NETWORK_FUNNEL) (void) thread_funnel_set(network_flock, TRUE); set_bsduthreadargs(thread, regs, NULL); if (callp->sy_narg > 8) panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg); ktrsyscall(p, code, callp->sy_narg, vt, funnel_type); { int *ip = (int *)vt; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, *ip, *(ip+1), *(ip+2), *(ip+3), 0); } error = (*(callp->sy_call))(p, (void *) vt, (int *) &rval[0]); #if 0 /* May be needed with vfork changes */ regs = USER_REGS(thread); #endif if (error == ERESTART) { regs->eip -= 7; } else if (error != EJUSTRETURN) { if (error) { regs->eax = error; regs->efl |= EFL_CF; /* carry bit */ } else { /* (not error) */ regs->eax = rval[0]; regs->edx = rval[1]; regs->efl &= ~EFL_CF; } } ktrsysret(p, code, error, rval[0], funnel_type); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, error, rval[0], rval[1], 0, 0); if(funnel_type != NO_FUNNEL) (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); thread_exception_return(); /* NOTREACHED */ }
int32_t fuword32(const void *addr) { return ((int32_t)fuword(addr)); }
static unsigned long get_fs_long(unsigned long *adr) { return(fuword(adr)); }
static unsigned short get_fs_word(unsigned short *adr) { return(fuword(adr)); }
static int math_emulate(struct trapframe * tframe) { unsigned char FPU_modrm; unsigned short code; #ifdef LOOKAHEAD_LIMIT int lookahead_limit = LOOKAHEAD_LIMIT; #endif #ifdef PARANOID if (emulating) { printf("ERROR: wm-FPU-emu is not RE-ENTRANT!\n"); } REENTRANT_CHECK(ON); #endif /* PARANOID */ if ((((struct pcb *) curproc->p_addr)->pcb_flags & FP_SOFTFP) == 0) { finit(); control_word = __INITIAL_NPXCW__; ((struct pcb *) curproc->p_addr)->pcb_flags |= FP_SOFTFP; } FPU_info = tframe; FPU_ORIG_EIP = FPU_EIP; /* --pink-- */ if (FPU_CS != 0x001f) { printf("math_emulate: %x : %x\n", FPU_CS, FPU_EIP); panic("FPU emulation in kernel"); } #ifdef notyet /* We cannot handle emulation in v86-mode */ if (FPU_EFLAGS & 0x00020000) { FPU_ORIG_EIP = FPU_EIP; math_abort(FPU_info, SIGILL); } #endif FPU_lookahead = FPU_LOOKAHEAD; if (curproc->p_flag & P_TRACED) FPU_lookahead = 0; do_another_FPU_instruction: REENTRANT_CHECK(OFF); code = fuword((u_int *) FPU_EIP); REENTRANT_CHECK(ON); if ((code & 0xff) == 0x9b) { /* fwait */ if (status_word & SW_Summary) goto do_the_FPU_interrupt; else { FPU_EIP++; goto FPU_instruction_done; } } if (status_word & SW_Summary) { /* Ignore the error for now if the current instruction is a * no-wait control instruction */ /* The 80486 manual contradicts itself on this topic, so I use * the following list of such instructions until I can check * on a real 80486: fninit, fnstenv, fnsave, fnstsw, fnstenv, * fnclex. */ if (!((((code & 0xf803) == 0xe003) || /* fnclex, fninit, * fnstsw */ (((code & 0x3003) == 0x3001) && /* fnsave, fnstcw, * fnstenv, fnstsw */ ((code & 0xc000) != 0xc000))))) { /* This is a guess about what a real FPU might do to * this bit: */ /* status_word &= ~SW_Summary; ****/ /* We need to simulate the action of the kernel to FPU * interrupts here. Currently, the "real FPU" part of * the kernel (0.99.10) clears the exception flags, * sets the registers to empty, and passes information * back to the interrupted process via the cs selector * and operand selector, so we do the same. */ do_the_FPU_interrupt: cs_selector &= 0xffff0000; cs_selector |= (status_word & ~SW_Top) | ((top & 7) << SW_Top_Shift); operand_selector = tag_word(); status_word = 0; top = 0; { int r; for (r = 0; r < 8; r++) { regs[r].tag = TW_Empty; } } REENTRANT_CHECK(OFF); math_abort(SIGFPE); } } FPU_entry_eip = FPU_ORIG_EIP = FPU_EIP; if ((code & 0xff) == 0x66) { /* size prefix */ FPU_EIP++; REENTRANT_CHECK(OFF); code = fuword((u_int *) FPU_EIP); REENTRANT_CHECK(ON); } FPU_EIP += 2; FPU_modrm = code >> 8; FPU_rm = FPU_modrm & 7; if (FPU_modrm < 0300) { /* All of these instructions use the mod/rm byte to get a data * address */ get_address(FPU_modrm); if (!(code & 1)) { unsigned short status1 = status_word; FPU_st0_ptr = &st(0); FPU_st0_tag = FPU_st0_ptr->tag; /* Stack underflow has priority */ if (NOT_EMPTY_0) { switch ((code >> 1) & 3) { case 0: reg_load_single(); break; case 1: reg_load_int32(); break; case 2: reg_load_double(); break; case 3: reg_load_int16(); break; } /* No more access to user memory, it is safe * to use static data now */ FPU_st0_ptr = &st(0); FPU_st0_tag = FPU_st0_ptr->tag; /* NaN operands have the next priority. */ /* We have to delay looking at st(0) until * after loading the data, because that data * might contain an SNaN */ if ((FPU_st0_tag == TW_NaN) || (FPU_loaded_data.tag == TW_NaN)) { /* Restore the status word; we might * have loaded a denormal. */ status_word = status1; if ((FPU_modrm & 0x30) == 0x10) { /* fcom or fcomp */ EXCEPTION(EX_Invalid); setcc(SW_C3 | SW_C2 | SW_C0); if (FPU_modrm & 0x08) pop(); /* fcomp, so we pop. */ } else real_2op_NaN(FPU_st0_ptr, &FPU_loaded_data, FPU_st0_ptr); goto reg_mem_instr_done; } switch ((FPU_modrm >> 3) & 7) { case 0: /* fadd */ reg_add(FPU_st0_ptr, &FPU_loaded_data, FPU_st0_ptr, control_word); break; case 1: /* fmul */ reg_mul(FPU_st0_ptr, &FPU_loaded_data, FPU_st0_ptr, control_word); break; case 2: /* fcom */ compare_st_data(); break; case 3: /* fcomp */ compare_st_data(); pop(); break; case 4: /* fsub */ reg_sub(FPU_st0_ptr, &FPU_loaded_data, FPU_st0_ptr, control_word); break; case 5: /* fsubr */ reg_sub(&FPU_loaded_data, FPU_st0_ptr, FPU_st0_ptr, control_word); break; case 6: /* fdiv */ reg_div(FPU_st0_ptr, &FPU_loaded_data, FPU_st0_ptr, control_word); break; case 7: /* fdivr */ if (FPU_st0_tag == TW_Zero) status_word = status1; /* Undo any denorm tag, * zero-divide has * priority. */ reg_div(&FPU_loaded_data, FPU_st0_ptr, FPU_st0_ptr, control_word); break; } } else { if ((FPU_modrm & 0x30) == 0x10) {
int _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap) /* struct umtx *umtx */ { struct thread *blocked; struct umtx *umtx; struct umtx_q *uq; intptr_t owner; intptr_t old; umtx = uap->umtx; /* * Make sure we own this mtx. * * XXX Need a {fu,su}ptr this is not correct on arch where * sizeof(intptr_t) != sizeof(long). */ if ((owner = fuword(&umtx->u_owner)) == -1) return (EFAULT); if ((struct thread *)(owner & ~UMTX_CONTESTED) != td) return (EPERM); /* * If we own it but it isn't contested then we can just release and * return. */ if ((owner & UMTX_CONTESTED) == 0) { owner = casuptr((intptr_t *)&umtx->u_owner, (intptr_t)td, UMTX_UNOWNED); if (owner == -1) return (EFAULT); /* * If this failed someone modified the memory without going * through this api. */ if (owner != (intptr_t)td) return (EINVAL); return (0); } old = casuptr((intptr_t *)&umtx->u_owner, owner, UMTX_CONTESTED); if (old == -1) return (EFAULT); /* * This will only happen if someone modifies the lock without going * through this api. */ if (old != owner) return (EINVAL); /* * We have to wake up one of the blocked threads. */ UMTX_LOCK(); uq = umtx_lookup(td, umtx); if (uq != NULL) { blocked = TAILQ_FIRST(&uq->uq_tdq); wakeup(blocked); } UMTX_UNLOCK(); return (0); }
int32_t dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) { unsigned short code; /* The system call number */ systrace_sysent_t *sy; dtrace_id_t id; int32_t rval; #if 0 /* XXX */ proc_t *p; #endif syscall_arg_t *ip = (syscall_arg_t *)uap; #if defined (__x86_64__) { pal_register_cache_state(current_thread(), VALID); x86_saved_state_t *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread()); if (is_saved_state64(tagged_regs)) { x86_saved_state64_t *regs = saved_state64(tagged_regs); code = regs->rax & SYSCALL_NUMBER_MASK; /* * Check for indirect system call... system call number * passed as 'arg0' */ if (code == 0) { code = regs->rdi; } } else { code = saved_state32(tagged_regs)->eax & I386_SYSCALL_NUMBER_MASK; if (code == 0) { vm_offset_t params = (vm_offset_t) (saved_state32(tagged_regs)->uesp + sizeof (int)); code = fuword(params); } } } #else #error Unknown Architecture #endif // Bounds "check" the value of code a la unix_syscall sy = (code >= NUM_SYSENT) ? &systrace_sysent[63] : &systrace_sysent[code]; if ((id = sy->stsy_entry) != DTRACE_IDNONE) { uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (uthread) uthread->t_dtrace_syscall_args = (void *)ip; if (ip) (*systrace_probe)(id, *ip, *(ip+1), *(ip+2), *(ip+3), *(ip+4)); else (*systrace_probe)(id, 0, 0, 0, 0, 0); if (uthread) uthread->t_dtrace_syscall_args = (void *)0; } #if 0 /* XXX */ /* * We want to explicitly allow DTrace consumers to stop a process * before it actually executes the meat of the syscall. */ p = ttoproc(curthread); mutex_enter(&p->p_lock); if (curthread->t_dtrace_stop && !curthread->t_lwp->lwp_nostop) { curthread->t_dtrace_stop = 0; stop(PR_REQUESTED, 0); } mutex_exit(&p->p_lock); #endif rval = (*sy->stsy_underlying)(pp, uap, rv); if ((id = sy->stsy_return) != DTRACE_IDNONE) { uint64_t munged_rv0, munged_rv1; uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); if (uthread) uthread->t_dtrace_errno = rval; /* Establish t_dtrace_errno now in case this enabling refers to it. */ /* * "Decode" rv for use in the call to dtrace_probe() */ if (rval == ERESTART) { munged_rv0 = -1LL; /* System call will be reissued in user mode. Make DTrace report a -1 return. */ munged_rv1 = -1LL; } else if (rval != EJUSTRETURN) { if (rval) { munged_rv0 = -1LL; /* Mimic what libc will do. */ munged_rv1 = -1LL; } else { switch (sy->stsy_return_type) { case _SYSCALL_RET_INT_T: munged_rv0 = rv[0]; munged_rv1 = rv[1]; break; case _SYSCALL_RET_UINT_T: munged_rv0 = ((u_int)rv[0]); munged_rv1 = ((u_int)rv[1]); break; case _SYSCALL_RET_OFF_T: case _SYSCALL_RET_UINT64_T: munged_rv0 = *(u_int64_t *)rv; munged_rv1 = 0LL; break; case _SYSCALL_RET_ADDR_T: case _SYSCALL_RET_SIZE_T: case _SYSCALL_RET_SSIZE_T: munged_rv0 = *(user_addr_t *)rv; munged_rv1 = 0LL; break; case _SYSCALL_RET_NONE: munged_rv0 = 0LL; munged_rv1 = 0LL; break; default: munged_rv0 = 0LL; munged_rv1 = 0LL; break; } } } else { munged_rv0 = 0LL; munged_rv1 = 0LL; } /* * <http://mail.opensolaris.org/pipermail/dtrace-discuss/2007-January/003276.html> says: * * "This is a bit of an historical artifact. At first, the syscall provider just * had its return value in arg0, and the fbt and pid providers had their return * values in arg1 (so that we could use arg0 for the offset of the return site). * * We inevitably started writing scripts where we wanted to see the return * values from probes in all three providers, and we made this script easier * to write by replicating the syscall return values in arg1 to match fbt and * pid. We debated briefly about removing the return value from arg0, but * decided that it would be less confusing to have the same data in two places * than to have some non-helpful, non-intuitive value in arg0. * * This change was made 4/23/2003 according to the DTrace project's putback log." */ (*systrace_probe)(id, munged_rv0, munged_rv0, munged_rv1, (uint64_t)rval, 0); } return (rval); }
static u_int32_t get_fs_long(u_int32_t *adr) { return(fuword(adr)); }
static void sunos_syscall_fancy(register_t code, struct lwp *l, struct frame *frame) { struct proc *p = l->l_proc; char *params; const struct sysent *callp; int error, nsys; size_t argsize; register_t args[16], rval[2]; nsys = p->p_emul->e_nsysent; callp = p->p_emul->e_sysent; /* * SunOS passes the syscall-number on the stack, whereas * BSD passes it in D0. So, we have to get the real "code" * from the stack, and clean up the stack, as SunOS glue * code assumes the kernel pops the syscall argument the * glue pushed on the stack. Sigh... */ code = fuword((void *)frame->f_regs[SP]); /* * XXX * Don't do this for sunos_sigreturn, as there's no stored pc * on the stack to skip, the argument follows the syscall * number without a gap. */ if (code != SUNOS_SYS_sigreturn) { frame->f_regs[SP] += sizeof (int); /* * remember that we adjusted the SP, * might have to undo this if the system call * returns ERESTART. */ l->l_md.md_flags |= MDL_STACKADJ; } else l->l_md.md_flags &= ~MDL_STACKADJ; params = (char *)frame->f_regs[SP] + sizeof(int); switch (code) { case SUNOS_SYS_syscall: /* * Code is first argument, followed by actual args. */ code = fuword(params); params += sizeof(int); break; default: break; } if (code < 0 || code >= nsys) callp += p->p_emul->e_nosys; /* illegal */ else callp += code; argsize = callp->sy_argsize; if (argsize) { error = copyin(params, (void *)args, argsize); if (error) goto bad; } if ((error = trace_enter(code, args, callp->sy_narg)) != 0) goto out; rval[0] = 0; rval[1] = frame->f_regs[D1]; error = sy_call(callp, l, args, rval); out: switch (error) { case 0: /* * Reinitialize proc pointer `p' as it may be different * if this is a child returning from fork syscall. */ p = curproc; frame->f_regs[D0] = rval[0]; frame->f_regs[D1] = rval[1]; frame->f_sr &= ~PSL_C; /* carry bit */ break; case ERESTART: /* * We always enter through a `trap' instruction, which is 2 * bytes, so adjust the pc by that amount. */ frame->f_pc = frame->f_pc - 2; break; case EJUSTRETURN: /* nothing to do */ break; default: bad: frame->f_regs[D0] = error; frame->f_sr |= PSL_C; /* carry bit */ break; } /* need new p-value for this */ if (l->l_md.md_flags & MDL_STACKADJ) { l->l_md.md_flags &= ~MDL_STACKADJ; if (error == ERESTART) frame->f_regs[SP] -= sizeof (int); } trace_exit(code, rval, error); }
/* * Function: unix_syscall * * Inputs: regs - pointer to i386 save area * * Outputs: none */ void unix_syscall(x86_saved_state_t *state) { thread_t thread; void *vt; unsigned int code; struct sysent *callp; int error; vm_offset_t params; struct proc *p; struct uthread *uthread; x86_saved_state32_t *regs; boolean_t is_vfork; assert(is_saved_state32(state)); regs = saved_state32(state); #if DEBUG if (regs->eax == 0x800) thread_exception_return(); #endif thread = current_thread(); uthread = get_bsdthread_info(thread); /* Get the approriate proc; may be different from task's for vfork() */ is_vfork = uthread->uu_flag & UT_VFORK; if (__improbable(is_vfork != 0)) p = current_proc(); else p = (struct proc *)get_bsdtask_info(current_task()); /* Verify that we are not being called from a task without a proc */ if (__improbable(p == NULL)) { regs->eax = EPERM; regs->efl |= EFL_CF; task_terminate_internal(current_task()); thread_exception_return(); /* NOTREACHED */ } code = regs->eax & I386_SYSCALL_NUMBER_MASK; DEBUG_KPRINT_SYSCALL_UNIX("unix_syscall: code=%d(%s) eip=%u\n", code, syscallnames[code >= NUM_SYSENT ? 63 : code], (uint32_t)regs->eip); params = (vm_offset_t) (regs->uesp + sizeof (int)); regs->efl &= ~(EFL_CF); callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code]; if (__improbable(callp == sysent)) { code = fuword(params); params += sizeof(int); callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code]; } vt = (void *)uthread->uu_arg; if (callp->sy_arg_bytes != 0) { #if CONFIG_REQUIRES_U32_MUNGING sy_munge_t *mungerp; #else #error U32 syscalls on x86_64 kernel requires munging #endif uint32_t nargs; assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg)); nargs = callp->sy_arg_bytes; error = copyin((user_addr_t) params, (char *) vt, nargs); if (error) { regs->eax = error; regs->efl |= EFL_CF; thread_exception_return(); /* NOTREACHED */ } if (__probable(code != 180)) { int *ip = (int *)vt; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, *ip, *(ip+1), *(ip+2), *(ip+3), 0); } #if CONFIG_REQUIRES_U32_MUNGING mungerp = callp->sy_arg_munge32; if (mungerp != NULL) (*mungerp)(vt); #endif } else KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, 0, 0, 0, 0, 0); /* * Delayed binding of thread credential to process credential, if we * are not running with an explicitly set thread credential. */ kauth_cred_uthread_update(uthread, p); uthread->uu_rval[0] = 0; uthread->uu_rval[1] = 0; uthread->uu_flag |= UT_NOTCANCELPT; uthread->syscall_code = code; #ifdef JOE_DEBUG uthread->uu_iocount = 0; uthread->uu_vpindex = 0; #endif AUDIT_SYSCALL_ENTER(code, p, uthread); error = (*(callp->sy_call))((void *) p, (void *) vt, &(uthread->uu_rval[0])); AUDIT_SYSCALL_EXIT(code, p, uthread, error); #ifdef JOE_DEBUG if (uthread->uu_iocount) printf("system call returned with uu_iocount != 0\n"); #endif #if CONFIG_DTRACE uthread->t_dtrace_errno = error; #endif /* CONFIG_DTRACE */ if (__improbable(error == ERESTART)) { /* * Move the user's pc back to repeat the syscall: * 5 bytes for a sysenter, or 2 for an int 8x. * The SYSENTER_TF_CS covers single-stepping over a sysenter * - see debug trap handler in idt.s/idt64.s */ pal_syscall_restart(thread, state); } else if (error != EJUSTRETURN) { if (__improbable(error)) { regs->eax = error; regs->efl |= EFL_CF; /* carry bit */ } else { /* (not error) */ /* * We split retval across two registers, in case the * syscall had a 64-bit return value, in which case * eax/edx matches the function call ABI. */ regs->eax = uthread->uu_rval[0]; regs->edx = uthread->uu_rval[1]; } } DEBUG_KPRINT_SYSCALL_UNIX( "unix_syscall: error=%d retval=(%u,%u)\n", error, regs->eax, regs->edx); uthread->uu_flag &= ~UT_NOTCANCELPT; if (__improbable(uthread->uu_lowpri_window)) { /* * task is marked as a low priority I/O type * and the I/O we issued while in this system call * collided with normal I/O operations... we'll * delay in order to mitigate the impact of this * task on the normal operation of the system */ throttle_lowpri_io(1); } if (__probable(code != 180)) KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0); if (__improbable(!is_vfork && callp->sy_call == (sy_call_t *)execve && !error)) { pal_execve_return(thread); } thread_exception_return(); /* NOTREACHED */ }
/* * syscall2 - MP aware system call request C handler * * A system call is essentially treated as a trap. The MP lock is not * held on entry or return. We are responsible for handling ASTs * (e.g. a task switch) prior to return. * * MPSAFE */ void syscall2(struct trapframe *frame) { struct thread *td = curthread; struct proc *p = td->td_proc; struct lwp *lp = td->td_lwp; caddr_t params; struct sysent *callp; register_t orig_tf_eflags; int sticks; int error; int narg; #ifdef INVARIANTS int crit_count = td->td_critcount; #endif int have_mplock = 0; u_int code; union sysunion args; #ifdef DIAGNOSTIC if (ISPL(frame->tf_cs) != SEL_UPL) { get_mplock(); panic("syscall"); /* NOT REACHED */ } #endif KTR_LOG(kernentry_syscall, p->p_pid, lp->lwp_tid, frame->tf_eax); userenter(td, p); /* lazy raise our priority */ /* * Misc */ sticks = (int)td->td_sticks; orig_tf_eflags = frame->tf_eflags; /* * Virtual kernel intercept - if a VM context managed by a virtual * kernel issues a system call the virtual kernel handles it, not us. * Restore the virtual kernel context and return from its system * call. The current frame is copied out to the virtual kernel. */ if (lp->lwp_vkernel && lp->lwp_vkernel->ve) { vkernel_trap(lp, frame); error = EJUSTRETURN; callp = NULL; goto out; } /* * Get the system call parameters and account for time */ lp->lwp_md.md_regs = frame; params = (caddr_t)frame->tf_esp + sizeof(int); code = frame->tf_eax; if (p->p_sysent->sv_prepsyscall) { (*p->p_sysent->sv_prepsyscall)( frame, (int *)(&args.nosys.sysmsg + 1), &code, ¶ms); } else { /* * Need to check if this is a 32 bit or 64 bit syscall. * fuword is MP aware. */ if (code == SYS_syscall) { /* * Code is first argument, followed by actual args. */ code = fuword(params); params += sizeof(int); } else if (code == SYS___syscall) { /* * Like syscall, but code is a quad, so as to maintain * quad alignment for the rest of the arguments. */ code = fuword(params); params += sizeof(quad_t); } } code &= p->p_sysent->sv_mask; if (code >= p->p_sysent->sv_size) callp = &p->p_sysent->sv_table[0]; else callp = &p->p_sysent->sv_table[code]; narg = callp->sy_narg & SYF_ARGMASK; #if 0 if (p->p_sysent->sv_name[0] == 'L') kprintf("Linux syscall, code = %d\n", code); #endif /* * copyin is MP aware, but the tracing code is not */ if (narg && params) { error = copyin(params, (caddr_t)(&args.nosys.sysmsg + 1), narg * sizeof(register_t)); if (error) { #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) { MAKEMPSAFE(have_mplock); ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); } #endif goto bad; } } #ifdef KTRACE if (KTRPOINT(td, KTR_SYSCALL)) { MAKEMPSAFE(have_mplock); ktrsyscall(lp, code, narg, (void *)(&args.nosys.sysmsg + 1)); } #endif /* * For traditional syscall code edx is left untouched when 32 bit * results are returned. Since edx is loaded from fds[1] when the * system call returns we pre-set it here. */ args.sysmsg_fds[0] = 0; args.sysmsg_fds[1] = frame->tf_edx; /* * The syscall might manipulate the trap frame. If it does it * will probably return EJUSTRETURN. */ args.sysmsg_frame = frame; STOPEVENT(p, S_SCE, narg); /* MP aware */ /* * NOTE: All system calls run MPSAFE now. The system call itself * is responsible for getting the MP lock. */ error = (*callp->sy_call)(&args); out: /* * MP SAFE (we may or may not have the MP lock at this point) */ switch (error) { case 0: /* * Reinitialize proc pointer `p' as it may be different * if this is a child returning from fork syscall. */ p = curproc; lp = curthread->td_lwp; frame->tf_eax = args.sysmsg_fds[0]; frame->tf_edx = args.sysmsg_fds[1]; frame->tf_eflags &= ~PSL_C; break; case ERESTART: /* * Reconstruct pc, assuming lcall $X,y is 7 bytes, * int 0x80 is 2 bytes. We saved this in tf_err. */ frame->tf_eip -= frame->tf_err; break; case EJUSTRETURN: break; case EASYNC: panic("Unexpected EASYNC return value (for now)"); default: bad: if (p->p_sysent->sv_errsize) { if (error >= p->p_sysent->sv_errsize) error = -1; /* XXX */ else error = p->p_sysent->sv_errtbl[error]; } frame->tf_eax = error; frame->tf_eflags |= PSL_C; break; } /* * Traced syscall. trapsignal() is not MP aware. */ if ((orig_tf_eflags & PSL_T) && !(orig_tf_eflags & PSL_VM)) { MAKEMPSAFE(have_mplock); frame->tf_eflags &= ~PSL_T; trapsignal(lp, SIGTRAP, TRAP_TRACE); } /* * Handle reschedule and other end-of-syscall issues */ userret(lp, frame, sticks); #ifdef KTRACE if (KTRPOINT(td, KTR_SYSRET)) { MAKEMPSAFE(have_mplock); ktrsysret(lp, code, error, args.sysmsg_result); } #endif /* * This works because errno is findable through the * register set. If we ever support an emulation where this * is not the case, this code will need to be revisited. */ STOPEVENT(p, S_SCX, code); userexit(lp); /* * Release the MP lock if we had to get it */ if (have_mplock) rel_mplock(); KTR_LOG(kernentry_syscall_ret, p->p_pid, lp->lwp_tid, error); #ifdef INVARIANTS KASSERT(crit_count == td->td_critcount, ("syscall: critical section count mismatch! %d/%d", crit_count, td->td_pri)); KASSERT(&td->td_toks_base == td->td_toks_stop, ("syscall: extra tokens held after trap! %zd", td->td_toks_stop - &td->td_toks_base)); #endif }
static void syscall_fancy(register_t code, struct lwp *l, struct frame *frame) { char *params; const struct sysent *callp; int error, nsys; size_t argsize; register_t args[16], rval[2]; struct proc *p = l->l_proc; nsys = p->p_emul->e_nsysent; callp = p->p_emul->e_sysent; params = (char *)frame->f_regs[SP] + sizeof(int); switch (code) { case SYS_syscall: /* * Code is first argument, followed by actual args. */ code = fuword(params); params += sizeof(int); #if defined(COMPAT_13) || defined(COMPAT_16) /* * XXX sigreturn requires special stack manipulation * that is only done if entered via the sigreturn * trap. Cannot allow it here so make sure we fail. */ switch (code) { #ifdef COMPAT_13 case SYS_compat_13_sigreturn13: #endif #ifdef COMPAT_16 case SYS_compat_16___sigreturn14: #endif code = nsys; break; } #endif break; case SYS___syscall: /* * Like syscall, but code is a quad, so as to maintain * quad alignment for the rest of the arguments. */ code = fuword(params + _QUAD_LOWWORD * sizeof(int)); params += sizeof(quad_t); break; default: break; } if (code < 0 || code >= nsys) callp += p->p_emul->e_nosys; /* illegal */ else callp += code; argsize = callp->sy_argsize; if (argsize) { error = copyin(params, (void *)args, argsize); if (error) goto bad; } if ((error = trace_enter(code, args, callp->sy_narg)) != 0) goto out; rval[0] = 0; rval[1] = frame->f_regs[D1]; error = sy_call(callp, l, args, rval); out: switch (error) { case 0: /* * Reinitialize lwp/proc pointers as they may be different * if this is a child returning from fork syscall. */ l = curlwp; p = l->l_proc; frame->f_regs[D0] = rval[0]; frame->f_regs[D1] = rval[1]; frame->f_sr &= ~PSL_C; /* carry bit */ #ifdef COMPAT_50 /* see syscall_plain for a comment explaining this */ /* * Some pre-m68k ELF libc assembler stubs assume * %a0 is preserved across system calls... */ if (p->p_emul == &emul_netbsd) frame->f_regs[A0] = rval[0]; #endif break; case ERESTART: /* * We always enter through a `trap' instruction, which is 2 * bytes, so adjust the pc by that amount. */ frame->f_pc = frame->f_pc - 2; break; case EJUSTRETURN: /* nothing to do */ break; default: bad: /* * XXX: SVR4 uses this code-path, so we may have * to translate errno. */ if (p->p_emul->e_errno) error = p->p_emul->e_errno[error]; frame->f_regs[D0] = error; frame->f_sr |= PSL_C; /* carry bit */ break; } trace_exit(code, rval, error); }
/* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * psl to gain improper privileges or to cause * a machine fault. */ int compat_16_sys___sigreturn14(struct lwp *l, const struct compat_16_sys___sigreturn14_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext *) sigcntxp; } */ struct proc *p = l->l_proc; struct sigcontext *scp; struct frame *frame; struct sigcontext tsigc; struct sigstate tstate; int rf, flags; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn: pid %d, scp %p\n", p->p_pid, scp); #endif if ((int)scp & 1) return EINVAL; if (copyin(scp, &tsigc, sizeof(tsigc)) != 0) return EFAULT; scp = &tsigc; /* Make sure the user isn't pulling a fast one on us! */ if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0) return EINVAL; /* Restore register context. */ frame = (struct frame *) l->l_md.md_regs; /* * Grab pointer to hardware state information. * If zero, the user is probably doing a longjmp. */ if ((rf = scp->sc_ap) == 0) goto restore; /* * See if there is anything to do before we go to the * expense of copying in close to 1/2K of data */ flags = fuword((void *)rf); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn(%d): sc_ap %x flags %x\n", p->p_pid, rf, flags); #endif /* fuword failed (bogus sc_ap value). */ if (flags == -1) return EINVAL; if (flags == 0 || copyin((void *)rf, &tstate, sizeof(tstate)) != 0) goto restore; #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sigreturn(%d): ssp %p usp %x scp %p ft %d\n", p->p_pid, &flags, scp->sc_sp, SCARG(uap, sigcntxp), (flags & SS_RTEFRAME) ? tstate.ss_frame.f_format : -1); #endif /* * Restore long stack frames. Note that we do not copy * back the saved SR or PC, they were picked up above from * the sigcontext structure. */ if (flags & SS_RTEFRAME) { register int sz; /* grab frame type and validate */ sz = tstate.ss_frame.f_format; if (sz > 15 || (sz = exframesize[sz]) < 0 || frame->f_stackadj < sz) return EINVAL; frame->f_stackadj -= sz; frame->f_format = tstate.ss_frame.f_format; frame->f_vector = tstate.ss_frame.f_vector; memcpy(&frame->F_u, &tstate.ss_frame.F_u, sz); #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn(%d): copy in %d of frame type %d\n", p->p_pid, sz, tstate.ss_frame.f_format); #endif } /* * Restore most of the users registers except for A6 and SP * which will be handled below. */ if (flags & SS_USERREGS) memcpy(frame->f_regs, tstate.ss_frame.f_regs, sizeof(frame->f_regs) - (2 * NBPW)); /* * Restore the original FP context */ if (fputype && (flags & SS_FPSTATE)) m68881_restore(&tstate.ss_fpstate); restore: /* * Restore the user supplied information. * This should be at the last so that the error (EINVAL) * is reported to the sigreturn caller, not to the * jump destination. */ frame->f_regs[SP] = scp->sc_sp; frame->f_regs[A6] = scp->sc_fp; frame->f_pc = scp->sc_pc; frame->f_sr = scp->sc_ps; mutex_enter(p->p_lock); /* Restore signal stack. */ if (scp->sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(l, SIG_SETMASK, &scp->sc_mask, 0); mutex_exit(p->p_lock); #ifdef DEBUG if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate) printf("sigreturn(%d): copied in FP state (%x) at %p\n", p->p_pid, *(u_int *)&tstate.ss_fpstate, &tstate.ss_fpstate); if ((sigdebug & SDB_FOLLOW) || ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)) printf("sigreturn(%d): returns\n", p->p_pid); #endif return EJUSTRETURN; }
/* * Code that the child process * executes to implement the command * of the parent process in tracing. */ procxmt() { register int i; register *p; register struct text *xp; if (ipc.ip_lock != u.u_procp->p_pid) return (0); u.u_procp->p_slptime = 0; i = ipc.ip_req; ipc.ip_req = 0; switch (i) { case PT_READ_I: /* read the child's text space */ if (!useracc((caddr_t)ipc.ip_addr, 4, B_READ)) goto error; ipc.ip_data = fuiword((caddr_t)ipc.ip_addr); break; case PT_READ_D: /* read the child's data space */ if (!useracc((caddr_t)ipc.ip_addr, 4, B_READ)) goto error; ipc.ip_data = fuword((caddr_t)ipc.ip_addr); break; case PT_READ_U: /* read the child's u. */ i = (int)ipc.ip_addr; if (i<0 || i >= ctob(UPAGES)) goto error; ipc.ip_data = *(int *)PHYSOFF(&u, i); break; case PT_WRITE_I: /* write the child's text space */ /* * If text, must assure exclusive use */ if (xp = u.u_procp->p_textp) { if (xp->x_count!=1 || xp->x_iptr->i_mode&ISVTX) goto error; xp->x_flag |= XTRC; } i = -1; if ((i = suiword((caddr_t)ipc.ip_addr, ipc.ip_data)) < 0) { if (chgprot((caddr_t)ipc.ip_addr, RW) && chgprot((caddr_t)ipc.ip_addr+(sizeof(int)-1), RW)) i = suiword((caddr_t)ipc.ip_addr, ipc.ip_data); (void) chgprot((caddr_t)ipc.ip_addr, RO); (void) chgprot((caddr_t)ipc.ip_addr+(sizeof(int)-1), RO); } if (i < 0) goto error; if (xp) xp->x_flag |= XWRIT; break; case PT_WRITE_D: /* write the child's data space */ if (suword((caddr_t)ipc.ip_addr, 0) < 0) goto error; (void) suword((caddr_t)ipc.ip_addr, ipc.ip_data); break; case PT_WRITE_U: /* write the child's u. */ i = (int)ipc.ip_addr; p = (int *)PHYSOFF(&u, i); for (i=0; i<NIPCREG; i++) if (p == &u.u_ar0[ipcreg[i]]) goto ok; if (p == &u.u_ar0[PS]) { ipc.ip_data |= PSL_USERSET; ipc.ip_data &= ~PSL_USERCLR; goto ok; } goto error; ok: *p = ipc.ip_data; break; case PT_STEP: /* single step the child */ case PT_CONTINUE: /* continue the child */ if ((int)ipc.ip_addr != 1) u.u_ar0[PC] = (int)ipc.ip_addr; if ((unsigned)ipc.ip_data > NSIG) goto error; u.u_procp->p_cursig = ipc.ip_data; /* see issig */ if (i == PT_STEP) u.u_ar0[PS] |= PSL_T; wakeup((caddr_t)&ipc); return (1); case PT_KILL: /* kill the child process */ wakeup((caddr_t)&ipc); exit(u.u_procp->p_cursig); default: error: ipc.ip_req = -1; } wakeup((caddr_t)&ipc); return (0); }