/* * exit -- death of process. */ void sys_sys_exit(struct thread *td, struct sys_exit_args *uap) { exit1(td, W_EXITCODE(uap->rval, 0)); /* NOTREACHED */ }
TEST(logcat, logrotate_nodir) { // expect logcat to error out on writing content and exit(1) for nodir EXPECT_EQ(W_EXITCODE(1, 0), system("logcat -b all -d" " -f /das/nein/gerfingerpoken/logcat/log.txt" " -n 256 -r 1024")); }
static pid_t process_wait(pid_t pid, int *status, int options, struct rusage *rusage) { if (options & WNOHANG) log_error("Unhandled option WNOHANG\n"); if (options & WUNTRACED) log_error("Unhandled option WUNTRACED\n"); if (options & WCONTINUED) log_error("Unhandled option WCONTINUED\n"); if (rusage) log_error("rusage not supported.\n"); int id = -1; if (pid > 0) { for (int i = 0; i < process->child_count; i++) if (process->child_pids[i] == pid) { DWORD result = WaitForSingleObject(process->child_handles[i], INFINITE); id = i; break; } if (id == -1) { log_warning("pid %d is not a child.\n", pid); return -ECHILD; } } else if (pid == -1) { if (process->child_count == 0) { log_warning("No childs.\n"); return -ECHILD; } DWORD result = WaitForMultipleObjects(process->child_count, process->child_handles, FALSE, INFINITE); if (result < WAIT_OBJECT_0 || result >= WAIT_OBJECT_0 + process->child_count) { log_error("WaitForMultipleObjects(): Unexpected return.\n"); return -1; } id = result - WAIT_OBJECT_0; } else { log_error("pid != %d unhandled\n"); return -EINVAL; } DWORD exitCode; GetExitCodeProcess(process->child_handles[id], &exitCode); CloseHandle(process->child_handles[id]); pid = process->child_pids[id]; for (int i = id; i + 1 < process->child_count; i++) { process->child_pids[i] = process->child_pids[i + 1]; process->child_handles[i] = process->child_handles[i + 1]; } process->child_count--; if (status) *status = W_EXITCODE(exitCode, 0); return pid; }
/* * Extended exit -- * Death of a lwp or process with optional bells and whistles. * * MPALMOSTSAFE */ int sys_extexit(struct extexit_args *uap) { struct proc *p = curproc; int action, who; int error; action = EXTEXIT_ACTION(uap->how); who = EXTEXIT_WHO(uap->how); /* Check parameters before we might perform some action */ switch (who) { case EXTEXIT_PROC: case EXTEXIT_LWP: break; default: return (EINVAL); } switch (action) { case EXTEXIT_SIMPLE: break; case EXTEXIT_SETINT: error = copyout(&uap->status, uap->addr, sizeof(uap->status)); if (error) return (error); break; default: return (EINVAL); } lwkt_gettoken(&p->p_token); switch (who) { case EXTEXIT_LWP: /* * Be sure only to perform a simple lwp exit if there is at * least one more lwp in the proc, which will call exit1() * later, otherwise the proc will be an UNDEAD and not even a * SZOMB! */ if (p->p_nthreads > 1) { lwp_exit(0); /* called w/ p_token held */ /* NOT REACHED */ } /* else last lwp in proc: do the real thing */ /* FALLTHROUGH */ default: /* to help gcc */ case EXTEXIT_PROC: lwkt_reltoken(&p->p_token); exit1(W_EXITCODE(uap->status, 0)); /* NOTREACHED */ } /* NOTREACHED */ lwkt_reltoken(&p->p_token); /* safety */ }
static int printstatus(int status) { int exited = 0; /* * Here is a tricky presentation problem. This solution * is still not entirely satisfactory but since there * are no wait status constructors it will have to do. */ if (WIFSTOPPED(status)) { int sig = WSTOPSIG(status); tprintf("[{WIFSTOPPED(s) && WSTOPSIG(s) == %s%s}", signame(sig & 0x7f), sig & 0x80 ? " | 0x80" : ""); status &= ~W_STOPCODE(sig); } else if (WIFSIGNALED(status)) { tprintf("[{WIFSIGNALED(s) && WTERMSIG(s) == %s%s}", signame(WTERMSIG(status)), WCOREDUMP(status) ? " && WCOREDUMP(s)" : ""); status &= ~(W_EXITCODE(0, WTERMSIG(status)) | WCOREFLAG); } else if (WIFEXITED(status)) { tprintf("[{WIFEXITED(s) && WEXITSTATUS(s) == %d}", WEXITSTATUS(status)); exited = 1; status &= ~W_EXITCODE(WEXITSTATUS(status), 0); } #ifdef WIFCONTINUED else if (WIFCONTINUED(status)) { tprints("[{WIFCONTINUED(s)}"); status &= ~W_CONTINUED; } #endif else { tprintf("[%#x]", status); return 0; } if (status) { unsigned int event = (unsigned int) status >> 16; if (event) { tprints(" | "); printxval(ptrace_events, event, "PTRACE_EVENT_???"); tprints(" << 16"); status &= 0xffff; } if (status) tprintf(" | %#x", status); } tprints("]"); return exited; }
/* * MPALMOSTSAFE */ int sys_linux_execve(struct linux_execve_args *args) { struct nlookupdata nd; struct image_args exec_args; char *path; int error; error = linux_copyin_path(args->path, &path, LINUX_PATH_EXISTS); if (error) return (error); #ifdef DEBUG if (ldebug(execve)) kprintf(ARGS(execve, "%s"), path); #endif get_mplock(); error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW); bzero(&exec_args, sizeof(exec_args)); if (error == 0) { error = exec_copyin_args(&exec_args, path, PATH_SYSSPACE, args->argp, args->envp); } if (error == 0) error = kern_execve(&nd, &exec_args); nlookup_done(&nd); /* * The syscall result is returned in registers to the new program. * Linux will register %edx as an atexit function and we must be * sure to set it to 0. XXX */ if (error == 0) { args->sysmsg_result64 = 0; if (curproc->p_sysent == &elf_linux_sysvec) error = emuldata_init(curproc, NULL, 0); } exec_free_args(&exec_args); linux_free_path(&path); if (error < 0) { /* We hit a lethal error condition. Let's die now. */ exit1(W_EXITCODE(0, SIGABRT)); /* NOTREACHED */ } rel_mplock(); return(error); }
int main(int argc, char **argv) { int r, pid; u_int envid; char *prog; int reap_flag = 0; prog = argv[0]; argv++; argc--; if (argc >= 1 && !strcmp(*argv,"-r")) { reap_flag = 1; argc--; argv++; } if (argc > 1 || !(pid = atoi(*argv))) { printf("Usage: %s [-r] <pid>\n",prog); return -1; } envid = pid2envid(pid); if (envid == 0) { printf("pid %d not registered\n",pid); return -1; } #ifdef PROCD { extern int proc_pid_exit(pid_t pid, int); proc_pid_exit(pid, (W_EXITCODE(0,SIGKILL))); } if (reap_flag) { extern int proc_pid_reap(pid_t pid); printf("reaping pid: %d\n",pid); proc_pid_reap(pid); } #endif if (r = sys_env_free(CAP_ROOT, envid)) printf("Not killed\n"); else printf("Killed\n"); return r; }
/* A process is dying. Send SIGCHLD to the parent. Wake the parent if it is waiting for us to exit. */ void alert_parent (struct proc *p) { /* We accumulate the aggregate usage stats of all our dead children. */ rusage_add (&p->p_parent->p_child_rusage, &p->p_rusage); send_signal (p->p_parent->p_msgport, SIGCHLD, p->p_parent->p_task); if (!p->p_exiting) { p->p_status = W_EXITCODE (0, SIGKILL); p->p_sigcode = -1; } if (p->p_parent->p_waiting) { condition_broadcast (&p->p_parent->p_wakeup); p->p_parent->p_waiting = 0; } }
int linux_exit(struct thread *td, struct linux_exit_args *args) { struct linux_emuldata *em; em = em_find(td); KASSERT(em != NULL, ("exit: emuldata not found.\n")); LINUX_CTR2(exit, "thread(%d) (%d)", em->em_tid, args->rval); linux_thread_detach(td); /* * XXX. When the last two threads of a process * exit via pthread_exit() try thr_exit() first. */ kern_thr_exit(td); exit1(td, W_EXITCODE(args->rval, 0)); /* NOTREACHED */ }
/* * Cause a kernel thread to exit. Assumes the exiting thread is the * current context. */ void kthread_exit(int ecode) { /* * XXX What do we do with the exit code? Should we even bother * XXX with it? The parent (proc0) isn't going to do much with * XXX it. */ if (ecode != 0) printf("WARNING: thread `%s' (%d) exits with status %d\n", curproc->p_comm, curproc->p_pid, ecode); exit1(curproc, W_EXITCODE(ecode, 0)); /* * XXX Fool the compiler. Making exit1() __dead is a can * XXX of worms right now. */ for (;;); }
pid_t _SCDPluginExecCommand2(SCDPluginExecCallBack callout, void *context, uid_t uid, gid_t gid, const char *path, char * const argv[], SCDPluginExecSetup setup, void *setupContext ) { char buf[1024]; pid_t pid; struct passwd pwd; struct passwd *result = NULL; char *username = NULL; // grab the activeChildren mutex pthread_mutex_lock(&lock); // cache the getpwuid_r result here to avoid spinning that can happen // when calling it between fork and execv. if ((getpwuid_r(uid, &pwd, buf, sizeof(buf), &result) == 0) && (result != NULL)) { username = result->pw_name; } // if needed, initialize if (childReaped == NULL) { _SCDPluginExecInit(); } pid = fork(); switch (pid) { case -1 : { /* if error */ int status; status = errno; printf("fork() failed: %s\n", strerror(status)); errno = status; break; } case 0 : { /* if child */ gid_t egid; uid_t euid; int i; int status; if (setup != NULL) { (setup)(pid, setupContext); } else { /* close any open FDs */ for (i = getdtablesize()-1; i>=0; i--) close(i); open(_PATH_DEVNULL, O_RDWR, 0); dup(0); dup(0); } egid = getegid(); euid = geteuid(); if (egid != gid) { (void) setgid(gid); } if (((euid != uid) || (egid != gid)) && username) { initgroups(username, gid); } if (euid != uid) { (void) setuid(uid); } /* ensure that our PATH environment variable is somewhat reasonable */ if (setenv("PATH", "/bin:/sbin:/usr/bin:/usr/sbin", 0) == -1) { printf("setenv() failed: %s\n", strerror(errno)); exit(EX_OSERR); } /* execute requested command */ (void) execv(path, argv); /* if the execv failed */ status = W_EXITCODE(errno, 0); _exit (WEXITSTATUS(status)); } default : { /* if parent */ if (setup != NULL) { (setup)(pid, setupContext); } if (callout != NULL) { childInfoRef child; // create child process info child = CFAllocatorAllocate(NULL, sizeof(struct childInfo), 0); bzero(child, sizeof(struct childInfo)); child->pid = pid; child->callout = callout; child->context = context; // add the new child to the activeChildren list child->next = activeChildren; activeChildren = child; } break; } } // release the activeChildren mutex pthread_mutex_unlock(&lock); return pid; }
/* * Send an interrupt to process. * */ void sendsig( struct proc * p, user_addr_t catcher, int sig, int mask, __unused uint32_t code ) { union { struct ts32 { arm_thread_state_t ss; } ts32; #if defined(__arm64__) struct ts64 { arm_thread_state64_t ss; } ts64; #endif } ts; union { struct user_sigframe32 uf32; #if defined(__arm64__) struct user_sigframe64 uf64; #endif } user_frame; user_siginfo_t sinfo; user_addr_t sp = 0, trampact; struct sigacts *ps = p->p_sigacts; int oonstack, infostyle; thread_t th_act; struct uthread *ut; user_size_t stack_size = 0; user_addr_t p_uctx, token_uctx; kern_return_t kr; th_act = current_thread(); ut = get_bsdthread_info(th_act); bzero(&ts, sizeof(ts)); bzero(&user_frame, sizeof(user_frame)); if (p->p_sigacts->ps_siginfo & sigmask(sig)) infostyle = UC_FLAVOR; else infostyle = UC_TRAD; trampact = ps->ps_trampact[sig]; oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; /* * Get sundry thread state. */ if (proc_is64bit_data(p)) { #ifdef __arm64__ if (sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx) != 0) { goto bad2; } #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { if (sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx) != 0) { goto bad2; } } /* * Figure out where our new stack lives. */ if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { sp = ps->ps_sigstk.ss_sp; sp += ps->ps_sigstk.ss_size; stack_size = ps->ps_sigstk.ss_size; ps->ps_sigstk.ss_flags |= SA_ONSTACK; } else { /* * Get stack pointer, and allocate enough space * for signal handler data. */ if (proc_is64bit_data(p)) { #if defined(__arm64__) sp = CAST_USER_ADDR_T(ts.ts64.ss.sp); sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN) & ~0xf; /* Make sure to align to 16 bytes and respect red zone */ #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sp = CAST_USER_ADDR_T(ts.ts32.ss.sp); sp -= sizeof(user_frame.uf32); #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4) sp &= ~0xf; /* Make sure to align to 16 bytes for armv7k */ #endif } } proc_unlock(p); /* * Fill in ucontext (points to mcontext, i.e. thread states). */ if (proc_is64bit_data(p)) { #if defined(__arm64__) sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size, (user64_addr_t)&((struct user_sigframe64*)sp)->mctx); #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size, (user32_addr_t)&((struct user_sigframe32*)sp)->mctx); } /* * Setup siginfo. */ bzero((caddr_t) & sinfo, sizeof(sinfo)); sinfo.si_signo = sig; if (proc_is64bit_data(p)) { #if defined(__arm64__) sinfo.si_addr = ts.ts64.ss.pc; sinfo.pad[0] = ts.ts64.ss.sp; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sinfo.si_addr = ts.ts32.ss.pc; sinfo.pad[0] = ts.ts32.ss.sp; } switch (sig) { case SIGILL: #ifdef BER_XXX if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) sinfo.si_code = ILL_ILLOPC; else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) sinfo.si_code = ILL_PRVOPC; else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) sinfo.si_code = ILL_ILLTRP; else sinfo.si_code = ILL_NOOP; #else sinfo.si_code = ILL_ILLTRP; #endif break; case SIGFPE: break; case SIGBUS: if (proc_is64bit_data(p)) { #if defined(__arm64__) sinfo.si_addr = user_frame.uf64.mctx.es.far; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sinfo.si_addr = user_frame.uf32.mctx.es.far; } sinfo.si_code = BUS_ADRALN; break; case SIGSEGV: if (proc_is64bit_data(p)) { #if defined(__arm64__) sinfo.si_addr = user_frame.uf64.mctx.es.far; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sinfo.si_addr = user_frame.uf32.mctx.es.far; } #ifdef BER_XXX /* First check in srr1 and then in dsisr */ if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else sinfo.si_code = SEGV_MAPERR; #else sinfo.si_code = SEGV_ACCERR; #endif break; default: { int status_and_exitcode; /* * All other signals need to fill out a minimum set of * information for the siginfo structure passed into * the signal handler, if SA_SIGINFO was specified. * * p->si_status actually contains both the status and * the exit code; we save it off in its own variable * for later breakdown. */ proc_lock(p); sinfo.si_pid = p->si_pid; p->si_pid = 0; status_and_exitcode = p->si_status; p->si_status = 0; sinfo.si_uid = p->si_uid; p->si_uid = 0; sinfo.si_code = p->si_code; p->si_code = 0; proc_unlock(p); if (sinfo.si_code == CLD_EXITED) { if (WIFEXITED(status_and_exitcode)) sinfo.si_code = CLD_EXITED; else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo.si_code = CLD_DUMPED; status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); } else { sinfo.si_code = CLD_KILLED; status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); } } } /* * The recorded status contains the exit code and the * signal information, but the information to be passed * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); p->p_xhighbits = 0; break; } } #if CONFIG_DTRACE sendsig_do_dtrace(ut, &sinfo, sig, catcher); #endif /* CONFIG_DTRACE */ /* * Copy signal-handling frame out to user space, set thread state. */ if (proc_is64bit_data(p)) { #if defined(__arm64__) user64_addr_t token; /* * mctx filled in when we get state. uctx filled in by * sendsig_fill_uctx64(). We fill in the sinfo now. */ siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo); p_uctx = (user_addr_t)&((struct user_sigframe64*)sp)->uctx; /* * Generate the validation token for sigreturn */ token_uctx = p_uctx; kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); assert(kr == KERN_SUCCESS); token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; if (copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64)) != 0) { goto bad; } if (sendsig_set_thread_state64(&ts.ts64.ss, catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo, (user64_addr_t)p_uctx, token, trampact, sp, th_act) != KERN_SUCCESS) goto bad; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { user32_addr_t token; /* * mctx filled in when we get state. uctx filled in by * sendsig_fill_uctx32(). We fill in the sinfo, *pointer* * to uctx and token now. */ siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo); p_uctx = (user_addr_t)&((struct user_sigframe32*)sp)->uctx; /* * Generate the validation token for sigreturn */ token_uctx = (user_addr_t)p_uctx; kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); assert(kr == KERN_SUCCESS); token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token; user_frame.uf32.puctx = (user32_addr_t)p_uctx; user_frame.uf32.token = token; if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) { goto bad; } if (sendsig_set_thread_state32(&ts.ts32.ss, CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo, CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) goto bad; } proc_lock(p); return; bad: proc_lock(p); bad2: SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); p->p_sigignore &= ~sig; p->p_sigcatch &= ~sig; ut->uu_sigmask &= ~sig; /* sendsig is called with signal lock held */ proc_unlock(p); psignal_locked(p, SIGILL); proc_lock(p); }
/* ARGSUSED */ int sys_execve(struct proc *p, void *v, register_t *retval) { struct sys_execve_args /* { syscallarg(const char *) path; syscallarg(char *const *) argp; syscallarg(char *const *) envp; } */ *uap = v; int error; struct exec_package pack; struct nameidata nid; struct vattr attr; struct ucred *cred = p->p_ucred; char *argp; char * const *cpp, *dp, *sp; #ifdef KTRACE char *env_start; #endif struct process *pr = p->p_p; long argc, envc; size_t len, sgap; #ifdef MACHINE_STACK_GROWS_UP size_t slen; #endif char *stack; struct ps_strings arginfo; struct vmspace *vm = pr->ps_vmspace; char **tmpfap; extern struct emul emul_native; #if NSYSTRACE > 0 int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC); size_t pathbuflen; #endif char *pathbuf = NULL; struct vnode *otvp; /* get other threads to stop */ if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) return (error); /* * Cheap solution to complicated problems. * Mark this process as "leave me alone, I'm execing". */ atomic_setbits_int(&pr->ps_flags, PS_INEXEC); #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE)) { systrace_execve0(p); pathbuf = pool_get(&namei_pool, PR_WAITOK); error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN, &pathbuflen); if (error != 0) goto clrflag; } #endif if (pathbuf != NULL) { NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p); } else { NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); } /* * initialize the fields of the exec package. */ if (pathbuf != NULL) pack.ep_name = pathbuf; else pack.ep_name = (char *)SCARG(uap, path); pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); pack.ep_hdrlen = exec_maxhdrsz; pack.ep_hdrvalid = 0; pack.ep_ndp = &nid; pack.ep_interp = NULL; pack.ep_emul_arg = NULL; VMCMDSET_INIT(&pack.ep_vmcmds); pack.ep_vap = &attr; pack.ep_emul = &emul_native; pack.ep_flags = 0; /* see if we can run it. */ if ((error = check_exec(p, &pack)) != 0) { goto freehdr; } /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ /* allocate an argument buffer */ argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); #ifdef DIAGNOSTIC if (argp == NULL) panic("execve: argp == NULL"); #endif dp = argp; argc = 0; /* copy the fake args list, if there's one, freeing it as we go */ if (pack.ep_flags & EXEC_HASARGL) { tmpfap = pack.ep_fa; while (*tmpfap != NULL) { char *cp; cp = *tmpfap; while (*cp) *dp++ = *cp++; *dp++ = '\0'; free(*tmpfap, M_EXEC, 0); tmpfap++; argc++; } free(pack.ep_fa, M_EXEC, 0); pack.ep_flags &= ~EXEC_HASARGL; } /* Now get argv & environment */ if (!(cpp = SCARG(uap, argp))) { error = EFAULT; goto bad; } if (pack.ep_flags & EXEC_SKIPARG) cpp++; while (1) { len = argp + ARG_MAX - dp; if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) goto bad; if (!sp) break; if ((error = copyinstr(sp, dp, len, &len)) != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto bad; } dp += len; cpp++; argc++; } /* must have at least one argument */ if (argc == 0) { error = EINVAL; goto bad; } #ifdef KTRACE if (KTRPOINT(p, KTR_EXECARGS)) ktrexec(p, KTR_EXECARGS, argp, dp - argp); #endif envc = 0; /* environment does not need to be there */ if ((cpp = SCARG(uap, envp)) != NULL ) { #ifdef KTRACE env_start = dp; #endif while (1) { len = argp + ARG_MAX - dp; if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) goto bad; if (!sp) break; if ((error = copyinstr(sp, dp, len, &len)) != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto bad; } dp += len; cpp++; envc++; } #ifdef KTRACE if (KTRPOINT(p, KTR_EXECENV)) ktrexec(p, KTR_EXECENV, env_start, dp - env_start); #endif } dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); sgap = STACKGAPLEN; /* * If we have enabled random stackgap, the stack itself has already * been moved from a random location, but is still aligned to a page * boundary. Provide the lower bits of random placement now. */ if (stackgap_random != 0) { sgap += arc4random() & PAGE_MASK; sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; } /* Now check if args & environ fit into new stack */ len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) + sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ error = ENOMEM; goto bad; } /* adjust "active stack depth" for process VSZ */ pack.ep_ssize = len; /* maybe should go elsewhere, but... */ /* * we're committed: any further errors will kill the process, so * kill the other threads now. */ single_thread_set(p, SINGLE_EXIT, 0); /* * Prepare vmspace for remapping. Note that uvmspace_exec can replace * pr_vmspace! */ uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); vm = pr->ps_vmspace; /* Now map address space */ vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - trunc_page(pack.ep_taddr)); vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - trunc_page(pack.ep_daddr)); vm->vm_dused = 0; vm->vm_ssize = atop(round_page(pack.ep_ssize)); vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; vm->vm_minsaddr = (char *)pack.ep_minsaddr; /* create the new process's VM space by running the vmcmds */ #ifdef DIAGNOSTIC if (pack.ep_vmcmds.evs_used == 0) panic("execve: no vmcmds"); #endif error = exec_process_vmcmds(p, &pack); /* if an error happened, deallocate and punt */ if (error) goto exec_abort; /* old "stackgap" is gone now */ pr->ps_stackgap = 0; #ifdef MACHINE_STACK_GROWS_UP pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, trunc_page(pr->ps_strings), PROT_NONE, TRUE)) goto exec_abort; #else pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; if (uvm_map_protect(&vm->vm_map, round_page(pr->ps_strings + sizeof(arginfo)), (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE)) goto exec_abort; #endif /* remember information about the process */ arginfo.ps_nargvstr = argc; arginfo.ps_nenvstr = envc; #ifdef MACHINE_STACK_GROWS_UP stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; slen = len - sizeof(arginfo) - sgap; #else stack = (char *)(vm->vm_minsaddr - len); #endif /* Now copy argc, args & environ to new stack */ if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp)) goto exec_abort; /* copy out the process's ps_strings structure */ if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) goto exec_abort; stopprofclock(pr); /* stop profiling */ fdcloseexec(p); /* handle close on exec */ execsigs(p); /* reset caught signals */ TCB_SET(p, NULL); /* reset the TCB address */ pr->ps_kbind_addr = 0; /* reset the kbind bits */ pr->ps_kbind_cookie = 0; /* set command name & other accounting info */ memset(p->p_comm, 0, sizeof(p->p_comm)); len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN); memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len); pr->ps_acflag &= ~AFORK; /* record proc's vnode, for use by sysctl */ otvp = pr->ps_textvp; vref(pack.ep_vp); pr->ps_textvp = pack.ep_vp; if (otvp) vrele(otvp); atomic_setbits_int(&pr->ps_flags, PS_EXEC); if (pr->ps_flags & PS_PPWAIT) { atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); wakeup(pr->ps_pptr); } /* * If process does execve() while it has a mismatched real, * effective, or saved uid/gid, we set PS_SUGIDEXEC. */ if (cred->cr_uid != cred->cr_ruid || cred->cr_uid != cred->cr_svuid || cred->cr_gid != cred->cr_rgid || cred->cr_gid != cred->cr_svgid) atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); else atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); atomic_clearbits_int(&pr->ps_flags, PS_TAMED); tame_dropwpaths(pr); /* * deal with set[ug]id. * MNT_NOEXEC has already been used to disable s[ug]id. */ if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { int i; atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); #ifdef KTRACE /* * If process is being ktraced, turn off - unless * root set it. */ if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) ktrcleartrace(pr); #endif p->p_ucred = cred = crcopy(cred); if (attr.va_mode & VSUID) cred->cr_uid = attr.va_uid; if (attr.va_mode & VSGID) cred->cr_gid = attr.va_gid; /* * For set[ug]id processes, a few caveats apply to * stdin, stdout, and stderr. */ error = 0; fdplock(p->p_fd); for (i = 0; i < 3; i++) { struct file *fp = NULL; /* * NOTE - This will never return NULL because of * immature fds. The file descriptor table is not * shared because we're suid. */ fp = fd_getfile(p->p_fd, i); /* * Ensure that stdin, stdout, and stderr are already * allocated. We do not want userland to accidentally * allocate descriptors in this range which has implied * meaning to libc. */ if (fp == NULL) { short flags = FREAD | (i == 0 ? 0 : FWRITE); struct vnode *vp; int indx; if ((error = falloc(p, &fp, &indx)) != 0) break; #ifdef DIAGNOSTIC if (indx != i) panic("sys_execve: falloc indx != i"); #endif if ((error = cdevvp(getnulldev(), &vp)) != 0) { fdremove(p->p_fd, indx); closef(fp, p); break; } if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { fdremove(p->p_fd, indx); closef(fp, p); vrele(vp); break; } if (flags & FWRITE) vp->v_writecount++; fp->f_flag = flags; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (caddr_t)vp; FILE_SET_MATURE(fp, p); } } fdpunlock(p->p_fd); if (error) goto exec_abort; } else atomic_clearbits_int(&pr->ps_flags, PS_SUGID); /* * Reset the saved ugids and update the process's copy of the * creds if the creds have been changed */ if (cred->cr_uid != cred->cr_svuid || cred->cr_gid != cred->cr_svgid) { /* make sure we have unshared ucreds */ p->p_ucred = cred = crcopy(cred); cred->cr_svuid = cred->cr_uid; cred->cr_svgid = cred->cr_gid; } if (pr->ps_ucred != cred) { struct ucred *ocred; ocred = pr->ps_ucred; crhold(cred); pr->ps_ucred = cred; crfree(ocred); } if (pr->ps_flags & PS_SUGIDEXEC) { int i, s = splclock(); timeout_del(&pr->ps_realit_to); for (i = 0; i < nitems(pr->ps_timer); i++) { timerclear(&pr->ps_timer[i].it_interval); timerclear(&pr->ps_timer[i].it_value); } splx(s); } /* reset CPU time usage for the thread, but not the process */ timespecclear(&p->p_tu.tu_runtime); p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; km_free(argp, NCARGS, &kv_exec, &kp_pageable); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); vn_close(pack.ep_vp, FREAD, cred, p); /* * notify others that we exec'd */ KNOTE(&pr->ps_klist, NOTE_EXEC); /* setup new registers and do misc. setup. */ if (pack.ep_emul->e_fixup != NULL) { if ((*pack.ep_emul->e_fixup)(p, &pack) != 0) goto free_pack_abort; } #ifdef MACHINE_STACK_GROWS_UP (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval); #else (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval); #endif /* map the process's signal trampoline code */ if (exec_sigcode_map(pr, pack.ep_emul)) goto free_pack_abort; #ifdef __HAVE_EXEC_MD_MAP /* perform md specific mappings that process might need */ if (exec_md_map(p, &pack)) goto free_pack_abort; #endif if (pr->ps_flags & PS_TRACED) psignal(p, SIGTRAP); free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); /* * Call emulation specific exec hook. This can setup per-process * p->p_emuldata or do any other per-process stuff an emulation needs. * * If we are executing process of different emulation than the * original forked process, call e_proc_exit() of the old emulation * first, then e_proc_exec() of new emulation. If the emulation is * same, the exec hook code should deallocate any old emulation * resources held previously by this process. */ if (pr->ps_emul && pr->ps_emul->e_proc_exit && pr->ps_emul != pack.ep_emul) (*pr->ps_emul->e_proc_exit)(p); p->p_descfd = 255; if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) p->p_descfd = pack.ep_fd; /* * Call exec hook. Emulation code may NOT store reference to anything * from &pack. */ if (pack.ep_emul->e_proc_exec) (*pack.ep_emul->e_proc_exec)(p, &pack); #if defined(KTRACE) && defined(COMPAT_LINUX) /* update ps_emul, but don't ktrace it if native-execing-native */ if (pr->ps_emul != pack.ep_emul || pack.ep_emul != &emul_native) { pr->ps_emul = pack.ep_emul; if (KTRPOINT(p, KTR_EMUL)) ktremul(p); } #else /* update ps_emul, the old value is no longer needed */ pr->ps_emul = pack.ep_emul; #endif atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); single_thread_clear(p, P_SUSPSIG); #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE) && wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC)) systrace_execve1(pathbuf, p); #endif if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); return (0); bad: /* free the vmspace-creation commands, and release their references */ kill_vmcmds(&pack.ep_vmcmds); /* kill any opened file descriptor, if necessary */ if (pack.ep_flags & EXEC_HASFD) { pack.ep_flags &= ~EXEC_HASFD; fdplock(p->p_fd); (void) fdrelease(p, pack.ep_fd); fdpunlock(p->p_fd); } if (pack.ep_interp != NULL) pool_put(&namei_pool, pack.ep_interp); if (pack.ep_emul_arg != NULL) free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); /* close and put the exec'd file */ vn_close(pack.ep_vp, FREAD, cred, p); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); km_free(argp, NCARGS, &kv_exec, &kp_pageable); freehdr: free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); #if NSYSTRACE > 0 clrflag: #endif atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); single_thread_clear(p, P_SUSPSIG); if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); return (error); exec_abort: /* * the old process doesn't exist anymore. exit gracefully. * get rid of the (new) address space we have created, if any, get rid * of our namei data and vnode, and exit noting failure */ uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); if (pack.ep_interp != NULL) pool_put(&namei_pool, pack.ep_interp); if (pack.ep_emul_arg != NULL) free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); vn_close(pack.ep_vp, FREAD, cred, p); km_free(argp, NCARGS, &kv_exec, &kp_pageable); free_pack_abort: free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL); /* NOTREACHED */ atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); return (0); }
/** * Read a request, run the compiler, and send a response. **/ static int dcc_run_job(int in_fd, int out_fd) { char **argv; int status; char *temp_i, *temp_o, *err_fname, *out_fname; int ret, compile_ret; char *orig_input, *orig_output; pid_t cc_pid; enum dcc_protover protover; enum dcc_compress compr; dcc_indirection indirect; if ((ret = dcc_make_tmpnam("distcc", ".stderr", &err_fname))) goto out_cleanup; if ((ret = dcc_make_tmpnam("distcc", ".stdout", &out_fname))) goto out_cleanup; dcc_remove_if_exists(err_fname); dcc_remove_if_exists(out_fname); /* Capture any messages relating to this compilation to the same file as * compiler errors so that they can all be sent back to the client. */ dcc_add_log_to_file(err_fname); /* Ignore SIGPIPE; we consistently check error codes and will see the * EPIPE. Note that it is set back to the default behaviour when spawning * a child, to handle cases like the assembler dying while its being fed * from the compiler */ dcc_ignore_sigpipe(1); /* Allow output to accumulate into big packets. */ tcp_cork_sock(out_fd, 1); if ((ret = dcc_r_request_header(in_fd, &protover)) || (ret = dcc_r_argv(in_fd, &argv)) || (ret = dcc_scan_args(argv, &orig_input, &orig_output, &argv))) goto out_cleanup; if (strcmp(argv[0],"--host-info") == 0) { if ((ret = dcc_x_result_header(out_fd, protover)) || (ret = dcc_send_host_info(out_fd))) dcc_x_token_int(out_fd, "DOTO", 0); } else { rs_trace("output file %s", orig_output); if ((ret = dcc_input_tmpnam(orig_input, &temp_i))) goto out_cleanup; if ((ret = dcc_make_tmpnam("distccd", ".o", &temp_o))) goto out_cleanup; compr = (protover == 2) ? DCC_COMPRESS_LZO1X : DCC_COMPRESS_NONE; if ((ret = dcc_r_token_file(in_fd, "DOTI", temp_i, compr)) || (ret = dcc_set_input(argv, temp_i)) || (ret = dcc_set_output(argv, temp_o))) goto out_cleanup; if ((ret = dcc_check_compiler_masq(argv[0]))) goto out_cleanup; indirect.in_fd = in_fd; indirect.out_fd = out_fd; if ((compile_ret = dcc_spawn_child(argv, &cc_pid, "/dev/null", out_fname, err_fname, &indirect)) || (compile_ret = dcc_collect_child("cc", cc_pid, &status))) { /* We didn't get around to finding a wait status from the actual compiler */ status = W_EXITCODE(compile_ret, 0); } if ((ret = dcc_x_result_header(out_fd, protover)) || (ret = dcc_send_system_info(out_fd)) || (ret = dcc_send_compiler_version(out_fd, argv[0])) || (ret = dcc_x_cc_status(out_fd, status)) || (ret = dcc_x_file(out_fd, err_fname, "SERR", compr, NULL)) || (ret = dcc_x_file(out_fd, out_fname, "SOUT", compr, NULL)) || WIFSIGNALED(status) || WEXITSTATUS(status)) { /* Something went wrong, so send DOTO 0 */ dcc_x_token_int(out_fd, "DOTO", 0); } else { ret = dcc_x_file(out_fd, temp_o, "DOTO", compr, NULL); } dcc_critique_status(status, argv[0], orig_input, dcc_hostdef_local, 0); } tcp_cork_sock(out_fd, 0); rs_log(RS_LOG_INFO|RS_LOG_NONAME, "job complete"); out_cleanup: dcc_remove_log_to_file(); dcc_cleanup_tempfiles(); return ret; }
int ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval) { struct proc *t = current_proc(); /* target process */ task_t task; thread_t th_act; struct uthread *ut; int tr_sigexc = 0; int error = 0; int stopped = 0; AUDIT_ARG(cmd, uap->req); AUDIT_ARG(pid, uap->pid); AUDIT_ARG(addr, uap->addr); AUDIT_ARG(value32, uap->data); if (uap->req == PT_DENY_ATTACH) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { proc_unlock(p); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE, p->p_pid, W_EXITCODE(ENOTSUP, 0), 4, 0, 0); exit1(p, W_EXITCODE(ENOTSUP, 0), retval); thread_exception_return(); /* NOTREACHED */ } SET(p->p_lflag, P_LNOATTACH); proc_unlock(p); return(0); } if (uap->req == PT_FORCEQUOTA) { if (kauth_cred_issuser(kauth_cred_get())) { OSBitOrAtomic(P_FORCEQUOTA, &t->p_flag); return (0); } else return (EPERM); } /* * Intercept and deal with "please trace me" request. */ if (uap->req == PT_TRACE_ME) { retry_trace_me:; proc_t pproc = proc_parent(p); if (pproc == NULL) return (EINVAL); #if CONFIG_MACF /* * NB: Cannot call kauth_authorize_process(..., KAUTH_PROCESS_CANTRACE, ...) * since that assumes the process being checked is the current process * when, in this case, it is the current process's parent. * Most of the other checks in cantrace() don't apply either. */ if ((error = mac_proc_check_debug(pproc, p)) == 0) { #endif proc_lock(p); /* Make sure the process wasn't re-parented. */ if (p->p_ppid != pproc->p_pid) { proc_unlock(p); proc_rele(pproc); goto retry_trace_me; } SET(p->p_lflag, P_LTRACED); /* Non-attached case, our tracer is our parent. */ p->p_oppid = p->p_ppid; proc_unlock(p); /* Child and parent will have to be able to run modified code. */ cs_allow_invalid(p); cs_allow_invalid(pproc); #if CONFIG_MACF } #endif proc_rele(pproc); return (error); } if (uap->req == PT_SIGEXC) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { SET(p->p_lflag, P_LSIGEXC); proc_unlock(p); return(0); } else { proc_unlock(p); return(EINVAL); } } /* * We do not want ptrace to do anything with kernel or launchd */ if (uap->pid < 2) { return(EPERM); } /* * Locate victim, and make sure it is traceable. */ if ((t = proc_find(uap->pid)) == NULL) return (ESRCH); AUDIT_ARG(process, t); task = t->task; if (uap->req == PT_ATTACHEXC) { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" uap->req = PT_ATTACH; tr_sigexc = 1; } if (uap->req == PT_ATTACH) { #pragma clang diagnostic pop int err; if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, t, (uintptr_t)&err, 0, 0) == 0 ) { /* it's OK to attach */ proc_lock(t); SET(t->p_lflag, P_LTRACED); if (tr_sigexc) SET(t->p_lflag, P_LSIGEXC); t->p_oppid = t->p_ppid; /* Check whether child and parent are allowed to run modified * code (they'll have to) */ proc_unlock(t); cs_allow_invalid(t); cs_allow_invalid(p); if (t->p_pptr != p) proc_reparentlocked(t, p, 1, 0); proc_lock(t); if (get_task_userstop(task) > 0 ) { stopped = 1; } t->p_xstat = 0; proc_unlock(t); psignal(t, SIGSTOP); /* * If the process was stopped, wake up and run through * issignal() again to properly connect to the tracing * process. */ if (stopped) task_resume(task); error = 0; goto out; } else { /* not allowed to attach, proper error code returned by kauth_authorize_process */ if (ISSET(t->p_lflag, P_LNOATTACH)) { psignal(p, SIGSEGV); } error = err; goto out; } } /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ proc_lock(t); if (!ISSET(t->p_lflag, P_LTRACED)) { proc_unlock(t); error = EPERM; goto out; } /* * (2) it's not being traced by _you_, or */ if (t->p_pptr != p) { proc_unlock(t); error = EBUSY; goto out; } /* * (3) it's not currently stopped. */ if (t->p_stat != SSTOP) { proc_unlock(t); error = EBUSY; goto out; } /* * Mach version of ptrace executes request directly here, * thus simplifying the interaction of ptrace and signals. */ /* proc lock is held here */ switch (uap->req) { case PT_DETACH: if (t->p_oppid != t->p_ppid) { struct proc *pp; proc_unlock(t); pp = proc_find(t->p_oppid); if (pp != PROC_NULL) { proc_reparentlocked(t, pp, 1, 0); proc_rele(pp); } else { /* original parent exited while traced */ proc_list_lock(); t->p_listflag |= P_LIST_DEADPARENT; proc_list_unlock(); proc_reparentlocked(t, initproc, 1, 0); } proc_lock(t); } t->p_oppid = 0; CLR(t->p_lflag, P_LTRACED); CLR(t->p_lflag, P_LSIGEXC); proc_unlock(t); goto resume; case PT_KILL: /* * Tell child process to kill itself after it * is resumed by adding NSIG to p_cursig. [see issig] */ proc_unlock(t); #if CONFIG_MACF error = mac_proc_check_signal(p, t, SIGKILL); if (0 != error) goto resume; #endif psignal(t, SIGKILL); goto resume; case PT_STEP: /* single step the child */ case PT_CONTINUE: /* continue the child */ proc_unlock(t); th_act = (thread_t)get_firstthread(task); if (th_act == THREAD_NULL) { error = EINVAL; goto out; } /* force use of Mach SPIs (and task_for_pid security checks) to adjust PC */ if (uap->addr != (user_addr_t)1) { error = ENOTSUP; goto out; } if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } if (uap->data != 0) { #if CONFIG_MACF error = mac_proc_check_signal(p, t, uap->data); if (0 != error) goto out; #endif psignal(t, uap->data); } if (uap->req == PT_STEP) { /* * set trace bit * we use sending SIGSTOP as a comparable security check. */ #if CONFIG_MACF error = mac_proc_check_signal(p, t, SIGSTOP); if (0 != error) { goto out; } #endif if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } else { /* * clear trace bit if on * we use sending SIGCONT as a comparable security check. */ #if CONFIG_MACF error = mac_proc_check_signal(p, t, SIGCONT); if (0 != error) { goto out; } #endif if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } resume: proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; if (t->sigwait) { wakeup((caddr_t)&(t->sigwait)); proc_unlock(t); if ((t->p_lflag & P_LSIGEXC) == 0) { task_resume(task); } } else proc_unlock(t); break; case PT_THUPDATE: { proc_unlock(t); if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } th_act = port_name_to_thread(CAST_MACH_PORT_TO_NAME(uap->addr)); if (th_act == THREAD_NULL) { error = ESRCH; goto out; } ut = (uthread_t)get_bsdthread_info(th_act); if (uap->data) ut->uu_siglist |= sigmask(uap->data); proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; proc_unlock(t); thread_deallocate(th_act); error = 0; } break; default: proc_unlock(t); error = EINVAL; goto out; } error = 0; out: proc_rele(t); return(error); }
/* * exit -- * Death of process. * * SYS_EXIT_ARGS(int rval) */ int sys_exit(struct exit_args *uap) { exit1(W_EXITCODE(uap->rval, 0)); /* NOTREACHED */ }
/* * vmm guest system call: * - init the calling thread structure * - prepare for running in non-root mode */ int sys_vmm_guest_ctl(struct vmm_guest_ctl_args *uap) { int error = 0; struct vmm_guest_options options; struct trapframe *tf = uap->sysmsg_frame; unsigned long stack_limit = USRSTACK; unsigned char stack_page[PAGE_SIZE]; clear_quickret(); switch (uap->op) { case VMM_GUEST_RUN: error = copyin(uap->options, &options, sizeof(struct vmm_guest_options)); if (error) { kprintf("%s: error copyin vmm_guest_options\n", __func__); goto out; } while(stack_limit > tf->tf_sp) { stack_limit -= PAGE_SIZE; options.new_stack -= PAGE_SIZE; error = copyin((const void *)stack_limit, (void *)stack_page, PAGE_SIZE); if (error) { kprintf("%s: error copyin stack\n", __func__); goto out; } error = copyout((const void *)stack_page, (void *)options.new_stack, PAGE_SIZE); if (error) { kprintf("%s: error copyout stack\n", __func__); goto out; } } bcopy(tf, &options.tf, sizeof(struct trapframe)); error = vmm_vminit(&options); if (error) { if (error == ENODEV) { kprintf("%s: vmm_vminit failed - " "no VMM available \n", __func__); goto out; } kprintf("%s: vmm_vminit failed\n", __func__); goto out_exit; } generic_lwp_return(curthread->td_lwp, tf); error = vmm_vmrun(); break; default: kprintf("%s: INVALID op\n", __func__); error = EINVAL; goto out; } out_exit: exit1(W_EXITCODE(error, 0)); out: return (error); }
__pid_t __libc_waitpid (__pid_t pid, int *stat_loc, int options) { __idtype_t idtype; __pid_t tmp_pid = pid; __siginfo_t infop; if (pid <= WAIT_MYPGRP) { if (pid == WAIT_ANY) { /* Request the status for any child. */ idtype = P_ALL; } else if (pid == WAIT_MYPGRP) { /* Request the status for any child process that has a pgid that's equal to that of our parent. */ tmp_pid = __getpgid (0); idtype = P_PGID; } else /* PID < -1 */ { /* Request the status for any child whose pgid is equal to the absolute value of PID. */ tmp_pid = pid & ~0; /* XXX not pseudo-insn */ idtype = P_PGID; } } else { /* Request the status for the child whose pid is PID. */ idtype = P_PID; } if (__waitid (idtype, tmp_pid, &infop, options | WEXITED | WTRAPPED) < 0) return -1; switch (infop.__code) { case EXITED: *stat_loc = W_EXITCODE (infop.__status, 0); break; case STOPPED: case TRAPPED: *stat_loc = W_STOPCODE (infop.__status); break; case KILLED: /* Don't know what to do with continue, since it isn't documented. Putting it here seemed the right place though. */ case CONTINUED: *stat_loc = infop.__status; /* FALLTHROUGH */ case CORED: *stat_loc |= WCOREFLAG; break; } /* Return the PID out of the INFOP structure instead of the one we were called with, to account for cases of being called with -1 to signify any PID. */ return infop.__pid; }
void sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code) { user_addr_t ua_sp; user_addr_t ua_sip; user_addr_t trampact; user_addr_t ua_uctxp; user_addr_t ua_mctxp; user_siginfo_t sinfo32; struct uthread *ut; struct mcontext mctx32; struct user_ucontext32 uctx32; struct sigacts *ps = p->p_sigacts; void *state; arm_thread_state_t *tstate32; mach_msg_type_number_t state_count; int stack_size = 0; int infostyle = UC_TRAD; int oonstack, flavor, error; proc_unlock(p); thread_t thread = current_thread(); ut = get_bsdthread_info(thread); /* * Set up thread state info. */ flavor = ARM_THREAD_STATE; state = (void *)&mctx32.ss; state_count = ARM_THREAD_STATE_COUNT; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; flavor = ARM_EXCEPTION_STATE; state = (void *)&mctx32.es; state_count = ARM_EXCEPTION_STATE_COUNT; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; flavor = ARM_VFP_STATE; state = (void *)&mctx32.fs; state_count = ARM_VFP_STATE_COUNT; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; tstate32 = &mctx32.ss; /* * Set the signal style. */ if (p->p_sigacts->ps_siginfo & sigmask(sig)) infostyle = UC_FLAVOR; /* * Get the signal disposition. */ trampact = ps->ps_trampact[sig]; /* * Figure out where our new stack lives. */ oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { ua_sp = ut->uu_sigstk.ss_sp; ua_sp += ut->uu_sigstk.ss_size; stack_size = ut->uu_sigstk.ss_size; ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { ua_sp = tstate32->sp; } /* * Set up the stack. */ ua_sp -= UC_FLAVOR_SIZE; ua_mctxp = ua_sp; ua_sp -= sizeof(struct user_ucontext32); ua_uctxp = ua_sp; ua_sp -= sizeof(siginfo_t); ua_sip = ua_sp; /* * Align the stack pointer. */ ua_sp = TRUNC_DOWN32(ua_sp, C_32_STK_ALIGN); /* * Build the signal context to be used by sigreturn. */ uctx32.uc_onstack = oonstack; uctx32.uc_sigmask = mask; uctx32.uc_stack.ss_sp = ua_sp; uctx32.uc_stack.ss_size = stack_size; if (oonstack) uctx32.uc_stack.ss_flags |= SS_ONSTACK; uctx32.uc_link = 0; uctx32.uc_mcsize = UC_FLAVOR_SIZE; uctx32.uc_mcontext = ua_mctxp; /* * init siginfo */ bzero((caddr_t)&sinfo32, sizeof(user_siginfo_t)); sinfo32.si_signo = sig; sinfo32.pad[0] = tstate32->sp; sinfo32.si_addr = tstate32->lr; switch (sig) { case SIGILL: sinfo32.si_code = ILL_NOOP; break; case SIGFPE: sinfo32.si_code = FPE_NOOP; break; case SIGBUS: sinfo32.si_code = BUS_ADRALN; break; case SIGSEGV: sinfo32.si_code = SEGV_ACCERR; break; default: { int status_and_exitcode; /* * All other signals need to fill out a minimum set of * information for the siginfo structure passed into * the signal handler, if SA_SIGINFO was specified. * * p->si_status actually contains both the status and * the exit code; we save it off in its own variable * for later breakdown. */ proc_lock(p); sinfo32.si_pid = p->si_pid; p->si_pid = 0; status_and_exitcode = p->si_status; p->si_status = 0; sinfo32.si_uid = p->si_uid; p->si_uid = 0; sinfo32.si_code = p->si_code; p->si_code = 0; proc_unlock(p); if (sinfo32.si_code == CLD_EXITED) { if (WIFEXITED(status_and_exitcode)) sinfo32.si_code = CLD_EXITED; else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo32.si_code = CLD_DUMPED; status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } else { sinfo32.si_code = CLD_KILLED; status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } } } /* * The recorded status contains the exit code and the * signal information, but the information to be passed * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ sinfo32.si_status = WEXITSTATUS(status_and_exitcode); break; } } /* * Copy out context info. */ if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof(struct user_ucontext32)) != KERN_SUCCESS) goto bad; if (copyout((caddr_t)&sinfo32, ua_sip, sizeof(user_siginfo_t)) != KERN_SUCCESS) goto bad; if (copyout((caddr_t)&mctx32, ua_mctxp, sizeof(struct mcontext)) != KERN_SUCCESS) goto bad; if (copyout((caddr_t)&ua_uctxp, ua_sp, sizeof(user_addr_t)) != KERN_SUCCESS) goto bad; /* * Set up regsiters for the trampoline. */ tstate32->r[0] = ua_catcher; tstate32->r[1] = infostyle; tstate32->r[2] = sig; tstate32->r[3] = ua_sip; tstate32->sp = ua_sp; if (trampact & 0x01) { tstate32->lr = trampact; tstate32->cpsr = 0x10; /* User mode */ } else { trampact &= ~0x01; tstate32->lr = trampact; tstate32->cpsr = 0x10; /* User mode */ tstate32->cpsr |= (1 << 5); /* T-bit */ } /* * Call the trampoline. */ flavor = ARM_THREAD_STATE; state_count = ARM_THREAD_STATE_COUNT; state = (void *)tstate32; if ((error = thread_setstatus(thread, flavor, (thread_state_t)state, state_count)) != KERN_SUCCESS) panic("sendsig: thread_setstatus failed, ret = %08X\n", error); proc_lock(p); return; bad: proc_lock(p); SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); p->p_sigignore &= ~sig; p->p_sigcatch &= ~sig; ut->uu_sigmask &= ~sig; /* * sendsig is called with signal lock held */ proc_unlock(p); psignal_locked(p, SIGILL); proc_lock(p); return; }
void sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused u_long code) { kern_return_t kretn; struct mcontext mctx; user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */ struct mcontext64 mctx64; user_addr_t p_mctx64 = USER_ADDR_NULL; /* mcontext dest. */ struct user_ucontext64 uctx; user_addr_t p_uctx; /* user stack addr top copy ucontext */ user_siginfo_t sinfo; user_addr_t p_sinfo; /* user stack addr top copy siginfo */ struct sigacts *ps = p->p_sigacts; int oonstack; user_addr_t sp; mach_msg_type_number_t state_count; thread_t th_act; struct uthread *ut; int infostyle = UC_TRAD; int dualcontext =0; user_addr_t trampact; int vec_used = 0; int stack_size = 0; void * tstate; int flavor; int ctx32 = 1; th_act = current_thread(); ut = get_bsdthread_info(th_act); /* * XXX We conditionalize type passed here based on SA_SIGINFO, but * XXX we always send up all the information, regardless; perhaps * XXX this should not be conditionalized? Defer making this change * XXX now, due to possible tools impact. */ if (p->p_sigacts->ps_siginfo & sigmask(sig)) { /* * If SA_SIGINFO is set, then we must provide the user * process both a siginfo_t and a context argument. We call * this "FLAVORED", as opposed to "TRADITIONAL", which doesn't * expect a context. "DUAL" is a type of "FLAVORED". */ if (is_64signalregset()) { /* * If this is a 64 bit CPU, we must include a 64 bit * context in the data we pass to user space; we may * or may not also include a 32 bit context at the * same time, for non-leaf functions. * * The user may also explicitly choose to not receive * a 32 bit context, at their option; we only allow * this to happen on 64 bit processors, for obvious * reasons. */ if (IS_64BIT_PROCESS(p) || (p->p_sigacts->ps_64regset & sigmask(sig))) { /* * For a 64 bit process, there is no 32 bit * context. */ ctx32 = 0; infostyle = UC_FLAVOR64; } else { /* * For a 32 bit process on a 64 bit CPU, we * may have 64 bit leaf functions, so we need * both contexts. */ dualcontext = 1; infostyle = UC_DUAL; } } else { /* * If this is a 32 bit CPU, then we only have a 32 bit * context to contend with. */ infostyle = UC_FLAVOR; } } else { /* * If SA_SIGINFO is not set, then we have a traditional style * call which does not need additional context passed. The * default is 32 bit traditional. * * XXX The second check is redundant on PPC32; keep it anyway. */ if (is_64signalregset() || IS_64BIT_PROCESS(p)) { /* * However, if this is a 64 bit CPU, we need to change * this to 64 bit traditional, and drop the 32 bit * context. */ ctx32 = 0; infostyle = UC_TRAD64; } } proc_unlock(p); /* I need this for SIGINFO anyway */ flavor = PPC_THREAD_STATE; tstate = (void *)&mctx.ss; state_count = PPC_THREAD_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; if ((ctx32 == 0) || dualcontext) { flavor = PPC_THREAD_STATE64; tstate = (void *)&mctx64.ss; state_count = PPC_THREAD_STATE64_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if ((ctx32 == 1) || dualcontext) { flavor = PPC_EXCEPTION_STATE; tstate = (void *)&mctx.es; state_count = PPC_EXCEPTION_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if ((ctx32 == 0) || dualcontext) { flavor = PPC_EXCEPTION_STATE64; tstate = (void *)&mctx64.es; state_count = PPC_EXCEPTION_STATE64_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if ((ctx32 == 1) || dualcontext) { flavor = PPC_FLOAT_STATE; tstate = (void *)&mctx.fs; state_count = PPC_FLOAT_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if ((ctx32 == 0) || dualcontext) { flavor = PPC_FLOAT_STATE; tstate = (void *)&mctx64.fs; state_count = PPC_FLOAT_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if (find_user_vec_curr()) { vec_used = 1; if ((ctx32 == 1) || dualcontext) { flavor = PPC_VECTOR_STATE; tstate = (void *)&mctx.vs; state_count = PPC_VECTOR_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; infostyle += 5; } if ((ctx32 == 0) || dualcontext) { flavor = PPC_VECTOR_STATE; tstate = (void *)&mctx64.vs; state_count = PPC_VECTOR_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; infostyle += 5; } } trampact = ps->ps_trampact[sig]; oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; /* figure out where our new stack lives */ if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { sp = ut->uu_sigstk.ss_sp; sp += ut->uu_sigstk.ss_size; stack_size = ut->uu_sigstk.ss_size; ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { if (ctx32 == 0) sp = mctx64.ss.r1; else sp = CAST_USER_ADDR_T(mctx.ss.r1); } /* put siginfo on top */ /* preserve RED ZONE area */ if (IS_64BIT_PROCESS(p)) sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN); else sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN); /* next are the saved registers */ if ((ctx32 == 0) || dualcontext) { sp -= sizeof(struct mcontext64); p_mctx64 = sp; } if ((ctx32 == 1) || dualcontext) { sp -= sizeof(struct mcontext); p_mctx = sp; } if (IS_64BIT_PROCESS(p)) { /* context goes first on stack */ sp -= sizeof(struct user_ucontext64); p_uctx = sp; /* this is where siginfo goes on stack */ sp -= sizeof(user_siginfo_t); p_sinfo = sp; sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN); } else { /* * struct ucontext and struct ucontext64 are identical in * size and content; the only difference is the internal * pointer type for the last element, which makes no * difference for the copyout(). */ /* context goes first on stack */ sp -= sizeof(struct ucontext64); p_uctx = sp; /* this is where siginfo goes on stack */ sp -= sizeof(siginfo_t); p_sinfo = sp; sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN); } uctx.uc_onstack = oonstack; uctx.uc_sigmask = mask; uctx.uc_stack.ss_sp = sp; uctx.uc_stack.ss_size = stack_size; if (oonstack) uctx.uc_stack.ss_flags |= SS_ONSTACK; uctx.uc_link = 0; if (ctx32 == 0) uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)); else uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)); if (vec_used) uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int)); if (ctx32 == 0) uctx.uc_mcontext64 = p_mctx64; else uctx.uc_mcontext64 = p_mctx; /* setup siginfo */ bzero((caddr_t)&sinfo, sizeof(user_siginfo_t)); sinfo.si_signo = sig; if (ctx32 == 0) { sinfo.si_addr = mctx64.ss.srr0; sinfo.pad[0] = mctx64.ss.r1; } else { sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0); sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1); } switch (sig) { case SIGILL: /* * If it's 64 bit and not a dual context, mctx will * contain uninitialized data, so we have to use * mctx64 here. */ if(ctx32 == 0) { if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) sinfo.si_code = ILL_ILLOPC; else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) sinfo.si_code = ILL_PRVOPC; else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) sinfo.si_code = ILL_ILLTRP; else sinfo.si_code = ILL_NOOP; } else { if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) sinfo.si_code = ILL_ILLOPC; else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) sinfo.si_code = ILL_PRVOPC; else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) sinfo.si_code = ILL_ILLTRP; else sinfo.si_code = ILL_NOOP; } break; case SIGFPE: #define FPSCR_VX 2 #define FPSCR_OX 3 #define FPSCR_UX 4 #define FPSCR_ZX 5 #define FPSCR_XX 6 /* * If it's 64 bit and not a dual context, mctx will * contain uninitialized data, so we have to use * mctx64 here. */ if(ctx32 == 0) { if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX))) sinfo.si_code = FPE_FLTINV; else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX))) sinfo.si_code = FPE_FLTOVF; else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX))) sinfo.si_code = FPE_FLTUND; else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX))) sinfo.si_code = FPE_FLTDIV; else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX))) sinfo.si_code = FPE_FLTRES; else sinfo.si_code = FPE_NOOP; } else { if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX))) sinfo.si_code = FPE_FLTINV; else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX))) sinfo.si_code = FPE_FLTOVF; else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX))) sinfo.si_code = FPE_FLTUND; else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX))) sinfo.si_code = FPE_FLTDIV; else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX))) sinfo.si_code = FPE_FLTRES; else sinfo.si_code = FPE_NOOP; } break; case SIGBUS: if (ctx32 == 0) { sinfo.si_addr = mctx64.es.dar; } else { sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar); } /* on ppc we generate only if EXC_PPC_UNALIGNED */ sinfo.si_code = BUS_ADRALN; break; case SIGSEGV: /* * If it's 64 bit and not a dual context, mctx will * contain uninitialized data, so we have to use * mctx64 here. */ if (ctx32 == 0) { sinfo.si_addr = mctx64.es.dar; /* First check in srr1 and then in dsisr */ if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else sinfo.si_code = SEGV_MAPERR; } else { sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar); /* First check in srr1 and then in dsisr */ if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else sinfo.si_code = SEGV_MAPERR; } break; default: { int status_and_exitcode; /* * All other signals need to fill out a minimum set of * information for the siginfo structure passed into * the signal handler, if SA_SIGINFO was specified. * * p->si_status actually contains both the status and * the exit code; we save it off in its own variable * for later breakdown. */ proc_lock(p); sinfo.si_pid = p->si_pid; p->si_pid = 0; status_and_exitcode = p->si_status; p->si_status = 0; sinfo.si_uid = p->si_uid; p->si_uid = 0; sinfo.si_code = p->si_code; p->si_code = 0; proc_unlock(p); if (sinfo.si_code == CLD_EXITED) { if (WIFEXITED(status_and_exitcode)) sinfo.si_code = CLD_EXITED; else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo.si_code = CLD_DUMPED; status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); } else { sinfo.si_code = CLD_KILLED; status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); } } } /* * The recorded status contains the exit code and the * signal information, but the information to be passed * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ sinfo.si_status = WEXITSTATUS(status_and_exitcode); break; } } /* copy info out to user space */ if (IS_64BIT_PROCESS(p)) { /* XXX truncates catcher address to uintptr_t */ DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &sinfo, void (*)(void), CAST_DOWN(sig_t, catcher)); if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64))) goto bad; if (copyout(&sinfo, p_sinfo, sizeof(user_siginfo_t))) goto bad; } else {
int ptrace(struct proc *p, struct ptrace_args *uap, register_t *retval) { struct proc *t = current_proc(); /* target process */ task_t task; thread_t th_act; struct uthread *ut; int tr_sigexc = 0; int error = 0; int stopped = 0; AUDIT_ARG(cmd, uap->req); AUDIT_ARG(pid, uap->pid); AUDIT_ARG(addr, uap->addr); AUDIT_ARG(value, uap->data); if (uap->req == PT_DENY_ATTACH) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { proc_unlock(p); exit1(p, W_EXITCODE(ENOTSUP, 0), retval); /* drop funnel before we return */ thread_exception_return(); /* NOTREACHED */ } SET(p->p_lflag, P_LNOATTACH); proc_unlock(p); return(0); } if (uap->req == PT_FORCEQUOTA) { if (is_suser()) { OSBitOrAtomic(P_FORCEQUOTA, (UInt32 *)&t->p_flag); return (0); } else return (EPERM); } /* * Intercept and deal with "please trace me" request. */ if (uap->req == PT_TRACE_ME) { proc_lock(p); SET(p->p_lflag, P_LTRACED); /* Non-attached case, our tracer is our parent. */ p->p_oppid = p->p_ppid; proc_unlock(p); return(0); } if (uap->req == PT_SIGEXC) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { SET(p->p_lflag, P_LSIGEXC); proc_unlock(p); return(0); } else { proc_unlock(p); return(EINVAL); } } /* * We do not want ptrace to do anything with kernel or launchd */ if (uap->pid < 2) { return(EPERM); } /* * Locate victim, and make sure it is traceable. */ if ((t = proc_find(uap->pid)) == NULL) return (ESRCH); AUDIT_ARG(process, t); task = t->task; if (uap->req == PT_ATTACHEXC) { uap->req = PT_ATTACH; tr_sigexc = 1; } if (uap->req == PT_ATTACH) { int err; if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, t, (uintptr_t)&err, 0, 0) == 0 ) { /* it's OK to attach */ proc_lock(t); SET(t->p_lflag, P_LTRACED); if (tr_sigexc) SET(t->p_lflag, P_LSIGEXC); t->p_oppid = t->p_ppid; proc_unlock(t); if (t->p_pptr != p) proc_reparentlocked(t, p, 1, 0); proc_lock(t); if (get_task_userstop(task) > 0 ) { stopped = 1; } t->p_xstat = 0; proc_unlock(t); psignal(t, SIGSTOP); /* * If the process was stopped, wake up and run through * issignal() again to properly connect to the tracing * process. */ if (stopped) task_resume(task); error = 0; goto out; } else { /* not allowed to attach, proper error code returned by kauth_authorize_process */ if (ISSET(t->p_lflag, P_LNOATTACH)) { psignal(p, SIGSEGV); } error = err; goto out; } } /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ proc_lock(t); if (!ISSET(t->p_lflag, P_LTRACED)) { proc_unlock(t); error = EPERM; goto out; } /* * (2) it's not being traced by _you_, or */ if (t->p_pptr != p) { proc_unlock(t); error = EBUSY; goto out; } /* * (3) it's not currently stopped. */ if (t->p_stat != SSTOP) { proc_unlock(t); error = EBUSY; goto out; } /* * Mach version of ptrace executes request directly here, * thus simplifying the interaction of ptrace and signals. */ /* proc lock is held here */ switch (uap->req) { case PT_DETACH: if (t->p_oppid != t->p_ppid) { struct proc *pp; proc_unlock(t); pp = proc_find(t->p_oppid); proc_reparentlocked(t, pp ? pp : initproc, 1, 0); if (pp != PROC_NULL) proc_rele(pp); proc_lock(t); } t->p_oppid = 0; CLR(t->p_lflag, P_LTRACED); CLR(t->p_lflag, P_LSIGEXC); proc_unlock(t); goto resume; case PT_KILL: /* * Tell child process to kill itself after it * is resumed by adding NSIG to p_cursig. [see issig] */ proc_unlock(t); psignal(t, SIGKILL); goto resume; case PT_STEP: /* single step the child */ case PT_CONTINUE: /* continue the child */ proc_unlock(t); th_act = (thread_t)get_firstthread(task); if (th_act == THREAD_NULL) { error = EINVAL; goto out; } if (uap->addr != (user_addr_t)1) { #if defined(ppc) #define ALIGNED(addr,size) (((unsigned)(addr)&((size)-1))==0) if (!ALIGNED((int)uap->addr, sizeof(int))) return (ERESTART); #undef ALIGNED #endif thread_setentrypoint(th_act, uap->addr); } if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } if (uap->data != 0) { psignal(t, uap->data); } if (uap->req == PT_STEP) { /* * set trace bit */ if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } else { /* * clear trace bit if on */ if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } resume: proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; if (t->sigwait) { wakeup((caddr_t)&(t->sigwait)); proc_unlock(t); if ((t->p_lflag & P_LSIGEXC) == 0) { task_resume(task); } } else proc_unlock(t); break; case PT_THUPDATE: { proc_unlock(t); if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } th_act = port_name_to_thread(CAST_DOWN(mach_port_name_t, uap->addr)); if (th_act == THREAD_NULL) return (ESRCH); ut = (uthread_t)get_bsdthread_info(th_act); if (uap->data) ut->uu_siglist |= sigmask(uap->data); proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; proc_unlock(t); thread_deallocate(th_act); error = 0; } break; default: proc_unlock(t); error = EINVAL; goto out; } error = 0; out: proc_rele(t); return(error); }
int sys_abort2(struct thread *td, struct abort2_args *uap) { struct proc *p = td->td_proc; struct sbuf *sb; void *uargs[16]; int error, i, sig; /* * Do it right now so we can log either proper call of abort2(), or * note, that invalid argument was passed. 512 is big enough to * handle 16 arguments' descriptions with additional comments. */ sb = sbuf_new(NULL, NULL, 512, SBUF_FIXEDLEN); sbuf_clear(sb); sbuf_printf(sb, "%s(pid %d uid %d) aborted: ", p->p_comm, p->p_pid, td->td_ucred->cr_uid); /* * Since we can't return from abort2(), send SIGKILL in cases, where * abort2() was called improperly */ sig = SIGKILL; /* Prevent from DoSes from user-space. */ if (uap->nargs < 0 || uap->nargs > 16) goto out; if (uap->nargs > 0) { if (uap->args == NULL) goto out; error = copyin(uap->args, uargs, uap->nargs * sizeof(void *)); if (error != 0) goto out; } /* * Limit size of 'reason' string to 128. Will fit even when * maximal number of arguments was chosen to be logged. */ if (uap->why != NULL) { error = sbuf_copyin(sb, uap->why, 128); if (error < 0) goto out; } else { sbuf_printf(sb, "(null)"); } if (uap->nargs > 0) { sbuf_printf(sb, "("); for (i = 0;i < uap->nargs; i++) sbuf_printf(sb, "%s%p", i == 0 ? "" : ", ", uargs[i]); sbuf_printf(sb, ")"); } /* * Final stage: arguments were proper, string has been * successfully copied from userspace, and copying pointers * from user-space succeed. */ sig = SIGABRT; out: if (sig == SIGKILL) { sbuf_trim(sb); sbuf_printf(sb, " (Reason text inaccessible)"); } sbuf_cat(sb, "\n"); sbuf_finish(sb); log(LOG_INFO, "%s", sbuf_data(sb)); sbuf_delete(sb); exit1(td, W_EXITCODE(0, sig)); return (0); }
static ATTRIBUTE_NO_SANITIZE_THREAD void backtrace_sigaction(int signum, siginfo_t *info, void* ptr) { void *array[42]; size_t size; void * caller_address; ucontext_t *uc = (ucontext_t *) ptr; int gdb_pid = -1; /* get all entries on the stack */ size = backtrace(array, 42); /* Get the address at the time the signal was raised */ #if defined(REG_RIP) caller_address = (void *) uc->uc_mcontext.gregs[REG_RIP]; #elif defined(REG_EIP) caller_address = (void *) uc->uc_mcontext.gregs[REG_EIP]; #elif defined(__arm__) caller_address = (void *) uc->uc_mcontext.arm_pc; #elif defined(__mips__) caller_address = (void *) uc->uc_mcontext.sc_pc; #elif defined(REG_PC) && defined(__e2k__) caller_address = (void *) ((mcontext_t*) &uc->uc_mcontext)->mc_gregs[REG_PC]; #else /* TODO support more arch(s) */ # warning Unsupported architecture. caller_address = info->si_addr; #endif int should_die = 0; switch(info->si_signo) { case SIGXCPU: case SIGXFSZ: should_die = 1; break; case SIGABRT: case SIGALRM: if (info->si_pid == getpid()) should_die = 1; case SIGBUS: case SIGFPE: case SIGILL: case SIGSEGV: #ifndef SI_FROMKERNEL # define SI_FROMKERNEL(info) (info->si_code > 0) #endif if (SI_FROMKERNEL(info)) should_die = 1; } char time_buf[64]; time_t t = ldap_time_unsteady(); strftime(time_buf, sizeof(time_buf), "%F-%H%M%S", localtime(&t)); char name_buf[PATH_MAX]; int fd = -1; #ifdef snprintf # undef snprintf #endif if (snprintf(name_buf, sizeof(name_buf), "%s/slapd-backtrace.%s-%i.log%c", backtrace_homedir ? backtrace_homedir : ".", time_buf, getpid(), 0) > 0) fd = open(name_buf, O_CREAT | O_EXCL | O_WRONLY | O_APPEND, 0644); if (fd < 0) { if (backtrace_homedir) fd = open(strrchr(name_buf, '/') + 1, O_CREAT | O_EXCL | O_WRONLY | O_APPEND, 0644); if (fd < 0) fd = STDERR_FILENO; dprintf(fd, "\n\n*** Unable create \"%s\": %s!", name_buf, STRERROR(errno)); } dprintf(fd, "\n\n*** Signal %d (%s), address is %p from %p\n", signum, strsignal(signum), info->si_addr, (void *)caller_address); int n = readlink("/proc/self/exe", name_buf, sizeof(name_buf) - 1); if (n > 0) { name_buf[n] = 0; dprintf(fd, " Executable file %s\n", name_buf); } else { dprintf(fd, " Unable read executable name: %s\n", STRERROR(errno)); strcpy(name_buf, "unknown"); } void** actual = array; int frame = 0; for (n = 0; n < size; ++n) if (array[n] == caller_address) { frame = n; actual = array + frame; size -= frame; break; } dprintf(fd, "\n*** Backtrace by glibc:\n"); backtrace_symbols_fd(actual, size, fd); int mem_map_fd = open("/proc/self/smaps", O_RDONLY, 0); if (mem_map_fd >= 0) { char buf[1024]; dprintf(fd, "\n*** Memory usage map (by /proc/self/smaps):\n"); while(1) { int n = read(mem_map_fd, &buf, sizeof(buf)); if (n < 1) break; if (write(fd, &buf, n) != n) break; } close(mem_map_fd); } /* avoid ECHILD from waitpid() */ signal(SIGCHLD, SIG_DFL); dprintf(fd, "\n*** Backtrace by addr2line:\n"); for(n = 0; n < size; ++n) { int status = EXIT_FAILURE, child_pid = fork(); if (! child_pid) { char addr_buf[64]; close(STDIN_FILENO); dup2(fd, STDOUT_FILENO); close(fd); dprintf(STDOUT_FILENO, "(%d) %p: ", n, actual[n]); sprintf(addr_buf, "%p", actual[n]); execlp("addr2line", "addr2line", addr_buf, "-C", "-f", "-i", #if __GLIBC_PREREQ(2,14) "-p", /* LY: not available on RHEL6, guest by glibc version */ #endif "-e", name_buf, NULL); exit(EXIT_FAILURE); } else if (child_pid < 0) { dprintf(fd, "\n*** Unable complete backtrace by addr2line, sorry (%s, %d).\n", "fork", errno); break; } else if (waitpid(child_pid, &status, 0) < 0 || status != W_EXITCODE(EXIT_SUCCESS, 0)) { dprintf(fd, "\n*** Unable complete backtrace by addr2line, sorry (%s, pid %d, errno %d, status 0x%x).\n", "waitpid", child_pid, errno, status); break; } } if (is_debugger_present()) { dprintf(fd, "*** debugger already present\n"); goto ballout; } int retry_by_return = 0; if (should_die && SI_FROMKERNEL(info)) { /* LY: Expect kernel kill us again, * therefore for switch to 'guilty' thread and we may just return, * instead of sending SIGTRAP and later switch stack frame by GDB. */ retry_by_return = 1; } if (is_valgrind_present()) { dprintf(fd, "*** valgrind present, skip backtrace by gdb\n"); goto ballout; } int pipe_fd[2]; if (pipe(pipe_fd)) { pipe_fd[0] = pipe_fd[1] = -1; goto ballout; } gdb_is_ready_for_backtrace = 0; pid_t tid = syscall(SYS_gettid); gdb_pid = fork(); if (!gdb_pid) { char pid_buf[16]; sprintf(pid_buf, "%d", getppid()); dup2(pipe_fd[0], STDIN_FILENO); close(pipe_fd[1]); pipe_fd[0] = pipe_fd[1] =-1; dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); for(fd = getdtablesize(); fd > STDERR_FILENO; --fd) close(fd); setsid(); setpgid(0, 0); dprintf(STDOUT_FILENO, "\n*** Backtrace by GDB " #if GDB_SWITCH2GUILTY_THREAD "(pid %s, LWP %i, frame #%d):\n", pid_buf, tid, frame); #else "(pid %s, LWP %i, please find frame manually):\n", pid_buf, tid); #endif execlp("gdb", "gdb", "-q", "-se", name_buf, "-n", NULL); kill(getppid(), SIGKILL); dprintf(STDOUT_FILENO, "\n*** Sorry, GDB launch failed: %s\n", STRERROR(errno)); fsync(STDOUT_FILENO); exit(EXIT_FAILURE); }
void sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code) { struct mcontext mctx; thread_t th_act; struct uthread *ut; void *tstate; int flavor; user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */ int infostyle = UC_TRAD; mach_msg_type_number_t state_count; user_addr_t trampact; int oonstack; struct user_ucontext32 uctx; user_addr_t sp; user_addr_t p_uctx; /* user stack addr top copy ucontext */ user_siginfo_t sinfo; user_addr_t p_sinfo; /* user stack addr top copy siginfo */ struct sigacts *ps = p->p_sigacts; int stack_size = 0; kern_return_t kretn; th_act = current_thread(); kprintf("sendsig: Sending signal to thread %p, code %d.\n", th_act, sig); return; ut = get_bsdthread_info(th_act); if (p->p_sigacts->ps_siginfo & sigmask(sig)) { infostyle = UC_FLAVOR; } flavor = ARM_THREAD_STATE; tstate = (void *) &mctx.ss; state_count = ARM_THREAD_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) goto bad; trampact = ps->ps_trampact[sig]; oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; /* * figure out where our new stack lives */ if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { sp = ut->uu_sigstk.ss_sp; sp += ut->uu_sigstk.ss_size; stack_size = ut->uu_sigstk.ss_size; ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { sp = CAST_USER_ADDR_T(mctx.ss.sp); } /* * context goes first on stack */ sp -= sizeof(struct ucontext); p_uctx = sp; /* * this is where siginfo goes on stack */ sp -= sizeof(user32_siginfo_t); p_sinfo = sp; /* * final stack pointer */ sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN + C_32_LINKAGE_LEN, C_32_STK_ALIGN); uctx.uc_mcsize = (size_t) ((ARM_THREAD_STATE_COUNT) * sizeof(int)); uctx.uc_onstack = oonstack; uctx.uc_sigmask = mask; uctx.uc_stack.ss_sp = sp; uctx.uc_stack.ss_size = stack_size; if (oonstack) uctx.uc_stack.ss_flags |= SS_ONSTACK; uctx.uc_link = 0; /* * setup siginfo */ bzero((caddr_t) & sinfo, sizeof(sinfo)); sinfo.si_signo = sig; sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.pc); sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.sp); switch (sig) { case SIGILL: sinfo.si_code = ILL_NOOP; break; case SIGFPE: sinfo.si_code = FPE_NOOP; break; case SIGBUS: sinfo.si_code = BUS_ADRALN; break; case SIGSEGV: sinfo.si_code = SEGV_ACCERR; break; default: { int status_and_exitcode; /* * All other signals need to fill out a minimum set of * information for the siginfo structure passed into * the signal handler, if SA_SIGINFO was specified. * * p->si_status actually contains both the status and * the exit code; we save it off in its own variable * for later breakdown. */ proc_lock(p); sinfo.si_pid = p->si_pid; p->si_pid = 0; status_and_exitcode = p->si_status; p->si_status = 0; sinfo.si_uid = p->si_uid; p->si_uid = 0; sinfo.si_code = p->si_code; p->si_code = 0; proc_unlock(p); if (sinfo.si_code == CLD_EXITED) { if (WIFEXITED(status_and_exitcode)) sinfo.si_code = CLD_EXITED; else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo.si_code = CLD_DUMPED; status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } else { sinfo.si_code = CLD_KILLED; status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } } } /* * The recorded status contains the exit code and the * signal information, but the information to be passed * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ sinfo.si_status = WEXITSTATUS(status_and_exitcode); break; } } if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext32))) goto bad; if (copyout(&sinfo, p_sinfo, sizeof(sinfo))) goto bad; /* * set signal registers, these are probably wrong.. */ { mctx.ss.r[0] = CAST_DOWN(uint32_t, ua_catcher); mctx.ss.r[1] = (uint32_t) infostyle; mctx.ss.r[2] = (uint32_t) sig; mctx.ss.r[3] = CAST_DOWN(uint32_t, p_sinfo); mctx.ss.r[4] = CAST_DOWN(uint32_t, p_uctx); mctx.ss.pc = CAST_DOWN(uint32_t, trampact); mctx.ss.sp = CAST_DOWN(uint32_t, sp); state_count = ARM_THREAD_STATE_COUNT; printf("sendsig: Sending signal to thread %p, code %d, new pc 0x%08x\n", th_act, sig, trampact); if ((kretn = thread_setstatus(th_act, ARM_THREAD_STATE, (void *) &mctx.ss, state_count)) != KERN_SUCCESS) { panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn); } } proc_lock(p); return; bad: proc_lock(p); SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); p->p_sigignore &= ~sig; p->p_sigcatch &= ~sig; ut->uu_sigmask &= ~sig; /* * sendsig is called with signal lock held */ proc_unlock(p); psignal_locked(p, SIGILL); proc_lock(p); return; }