int exec_process_vmcmds(struct proc *p, struct exec_package *epp) { struct exec_vmcmd *base_vc = NULL; int error = 0; int i; for (i = 0; i < epp->ep_vmcmds.evs_used && !error; i++) { struct exec_vmcmd *vcp; vcp = &epp->ep_vmcmds.evs_cmds[i]; if (vcp->ev_flags & VMCMD_RELATIVE) { #ifdef DIAGNOSTIC if (base_vc == NULL) panic("exec_process_vmcmds: RELATIVE no base"); #endif vcp->ev_addr += base_vc->ev_addr; } error = (*vcp->ev_proc)(p, vcp); if (vcp->ev_flags & VMCMD_BASE) { base_vc = vcp; } } kill_vmcmds(&epp->ep_vmcmds); return (error); }
int osf1_exec_ecoff_hook(struct proc *p, struct exec_package *epp) { struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; struct osf1_exec_emul_arg *emul_arg; int error; /* if we're here and we're exec-able at all, we're an OSF/1 binary */ epp->ep_emul = &emul_osf1; /* set up the exec package emul arg as appropriate */ emul_arg = malloc(sizeof *emul_arg, M_TEMP, M_WAITOK); epp->ep_emul_arg = emul_arg; emul_arg->flags = 0; if (epp->ep_ndp->ni_segflg == UIO_SYSSPACE) error = copystr(epp->ep_ndp->ni_dirp, emul_arg->exec_name, MAXPATHLEN + 1, NULL); else error = copyinstr(epp->ep_ndp->ni_dirp, emul_arg->exec_name, MAXPATHLEN + 1, NULL); #ifdef DIAGNOSTIC if (error != 0) panic("osf1_exec_ecoff_hook: copyinstr failed"); #endif /* do any special object file handling */ switch (execp->f.f_flags & ECOFF_FLAG_OBJECT_TYPE_MASK) { case ECOFF_OBJECT_TYPE_SHARABLE: /* can't exec a shared library! */ uprintf("can't execute OSF/1 shared libraries\n"); error = ENOEXEC; break; case ECOFF_OBJECT_TYPE_CALL_SHARED: error = osf1_exec_ecoff_dynamic(p, epp); break; default: /* just let the normal ECOFF handlers deal with it. */ error = 0; break; } if (error) { free(epp->ep_emul_arg, M_TEMP); epp->ep_emul_arg = NULL; kill_vmcmds(&epp->ep_vmcmds); /* if any */ } return (error); }
int osf1_exec_ecoff_probe(struct lwp *l, struct exec_package *epp) { struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; struct osf1_exec_emul_arg *emul_arg; int error; /* check if the binary is osf1 ecoff */ if (execp->f.f_magic != ECOFF_MAGIC_ALPHA) return ENOEXEC; /* set up the exec package emul arg as appropriate */ emul_arg = kmem_alloc(sizeof(*emul_arg), KM_SLEEP); epp->ep_emul_arg = emul_arg; epp->ep_emul_arg_free = osf1_free_emul_arg; emul_arg->flags = 0; /* this cannot overflow because both are size PATH_MAX */ strcpy(emul_arg->exec_name, epp->ep_kname); /* do any special object file handling */ switch (execp->f.f_flags & ECOFF_FLAG_OBJECT_TYPE_MASK) { case ECOFF_OBJECT_TYPE_SHARABLE: /* can't exec a shared library! */ #if 0 uprintf("can't execute OSF/1 shared libraries\n"); #endif error = ENOEXEC; break; case ECOFF_OBJECT_TYPE_CALL_SHARED: error = osf1_exec_ecoff_dynamic(l, epp); break; default: /* just let the normal ECOFF handlers deal with it. */ error = 0; break; } if (error) { exec_free_emul_arg(epp); kill_vmcmds(&epp->ep_vmcmds); /* if any */ } return (error); }
int exec_pecoff_makecmds(struct lwp *l, struct exec_package *epp) { int error, peofs; struct pecoff_dos_filehdr *dp = epp->ep_hdr; struct coff_filehdr *fp; struct proc *p; p = l->l_proc; /* * mmap EXE file (PE format) * 1. read header (DOS,PE) * 2. mmap code section (READ|EXEC) * 3. mmap other section, such as data (READ|WRITE|EXEC) */ if (epp->ep_hdrvalid < PECOFF_DOS_HDR_SIZE) { return ENOEXEC; } if ((error = pecoff_signature(l, epp->ep_vp, dp)) != 0) return error; if ((error = vn_marktext(epp->ep_vp)) != 0) return error; peofs = dp->d_peofs + sizeof(signature) - 1; fp = malloc(PECOFF_HDR_SIZE, M_TEMP, M_WAITOK); error = exec_read_from(l, epp->ep_vp, peofs, fp, PECOFF_HDR_SIZE); if (error) { free(fp, M_TEMP); return error; } error = exec_pecoff_coff_makecmds(l, epp, fp, peofs); if (error != 0) kill_vmcmds(&epp->ep_vmcmds); free(fp, M_TEMP); return error; }
int exec_coff_makecmds(struct lwp *l, struct exec_package *epp) { int error; struct coff_filehdr *fp = epp->ep_hdr; struct coff_aouthdr *ap; if (epp->ep_hdrvalid < COFF_HDR_SIZE) return ENOEXEC; if (COFF_BADMAG(fp)) return ENOEXEC; ap = (void *)((char *)epp->ep_hdr + sizeof(struct coff_filehdr)); switch (ap->a_magic) { case COFF_OMAGIC: error = exec_coff_prep_omagic(l, epp, fp, ap); break; case COFF_NMAGIC: error = exec_coff_prep_nmagic(l, epp, fp, ap); break; case COFF_ZMAGIC: error = exec_coff_prep_zmagic(l, epp, fp, ap); break; default: return ENOEXEC; } #ifdef TODO if (error == 0) error = cpu_exec_coff_hook(p, epp); #endif if (error) kill_vmcmds(&epp->ep_vmcmds); return error; }
/* ARGSUSED */ int sys_execve(struct proc *p, void *v, register_t *retval) { struct sys_execve_args /* { syscallarg(const char *) path; syscallarg(char *const *) argp; syscallarg(char *const *) envp; } */ *uap = v; int error; struct exec_package pack; struct nameidata nid; struct vattr attr; struct ucred *cred = p->p_ucred; char *argp; char * const *cpp, *dp, *sp; #ifdef KTRACE char *env_start; #endif struct process *pr = p->p_p; long argc, envc; size_t len, sgap; #ifdef MACHINE_STACK_GROWS_UP size_t slen; #endif char *stack; struct ps_strings arginfo; struct vmspace *vm = pr->ps_vmspace; char **tmpfap; extern struct emul emul_native; #if NSYSTRACE > 0 int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC); size_t pathbuflen; #endif char *pathbuf = NULL; struct vnode *otvp; /* get other threads to stop */ if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) return (error); /* * Cheap solution to complicated problems. * Mark this process as "leave me alone, I'm execing". */ atomic_setbits_int(&pr->ps_flags, PS_INEXEC); #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE)) { systrace_execve0(p); pathbuf = pool_get(&namei_pool, PR_WAITOK); error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN, &pathbuflen); if (error != 0) goto clrflag; } #endif if (pathbuf != NULL) { NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p); } else { NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); } /* * initialize the fields of the exec package. */ if (pathbuf != NULL) pack.ep_name = pathbuf; else pack.ep_name = (char *)SCARG(uap, path); pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); pack.ep_hdrlen = exec_maxhdrsz; pack.ep_hdrvalid = 0; pack.ep_ndp = &nid; pack.ep_interp = NULL; pack.ep_emul_arg = NULL; VMCMDSET_INIT(&pack.ep_vmcmds); pack.ep_vap = &attr; pack.ep_emul = &emul_native; pack.ep_flags = 0; /* see if we can run it. */ if ((error = check_exec(p, &pack)) != 0) { goto freehdr; } /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ /* allocate an argument buffer */ argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); #ifdef DIAGNOSTIC if (argp == NULL) panic("execve: argp == NULL"); #endif dp = argp; argc = 0; /* copy the fake args list, if there's one, freeing it as we go */ if (pack.ep_flags & EXEC_HASARGL) { tmpfap = pack.ep_fa; while (*tmpfap != NULL) { char *cp; cp = *tmpfap; while (*cp) *dp++ = *cp++; *dp++ = '\0'; free(*tmpfap, M_EXEC, 0); tmpfap++; argc++; } free(pack.ep_fa, M_EXEC, 0); pack.ep_flags &= ~EXEC_HASARGL; } /* Now get argv & environment */ if (!(cpp = SCARG(uap, argp))) { error = EFAULT; goto bad; } if (pack.ep_flags & EXEC_SKIPARG) cpp++; while (1) { len = argp + ARG_MAX - dp; if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) goto bad; if (!sp) break; if ((error = copyinstr(sp, dp, len, &len)) != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto bad; } dp += len; cpp++; argc++; } /* must have at least one argument */ if (argc == 0) { error = EINVAL; goto bad; } #ifdef KTRACE if (KTRPOINT(p, KTR_EXECARGS)) ktrexec(p, KTR_EXECARGS, argp, dp - argp); #endif envc = 0; /* environment does not need to be there */ if ((cpp = SCARG(uap, envp)) != NULL ) { #ifdef KTRACE env_start = dp; #endif while (1) { len = argp + ARG_MAX - dp; if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) goto bad; if (!sp) break; if ((error = copyinstr(sp, dp, len, &len)) != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto bad; } dp += len; cpp++; envc++; } #ifdef KTRACE if (KTRPOINT(p, KTR_EXECENV)) ktrexec(p, KTR_EXECENV, env_start, dp - env_start); #endif } dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); sgap = STACKGAPLEN; /* * If we have enabled random stackgap, the stack itself has already * been moved from a random location, but is still aligned to a page * boundary. Provide the lower bits of random placement now. */ if (stackgap_random != 0) { sgap += arc4random() & PAGE_MASK; sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; } /* Now check if args & environ fit into new stack */ len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) + sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ error = ENOMEM; goto bad; } /* adjust "active stack depth" for process VSZ */ pack.ep_ssize = len; /* maybe should go elsewhere, but... */ /* * we're committed: any further errors will kill the process, so * kill the other threads now. */ single_thread_set(p, SINGLE_EXIT, 0); /* * Prepare vmspace for remapping. Note that uvmspace_exec can replace * pr_vmspace! */ uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); vm = pr->ps_vmspace; /* Now map address space */ vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - trunc_page(pack.ep_taddr)); vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - trunc_page(pack.ep_daddr)); vm->vm_dused = 0; vm->vm_ssize = atop(round_page(pack.ep_ssize)); vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; vm->vm_minsaddr = (char *)pack.ep_minsaddr; /* create the new process's VM space by running the vmcmds */ #ifdef DIAGNOSTIC if (pack.ep_vmcmds.evs_used == 0) panic("execve: no vmcmds"); #endif error = exec_process_vmcmds(p, &pack); /* if an error happened, deallocate and punt */ if (error) goto exec_abort; /* old "stackgap" is gone now */ pr->ps_stackgap = 0; #ifdef MACHINE_STACK_GROWS_UP pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, trunc_page(pr->ps_strings), PROT_NONE, TRUE)) goto exec_abort; #else pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; if (uvm_map_protect(&vm->vm_map, round_page(pr->ps_strings + sizeof(arginfo)), (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE)) goto exec_abort; #endif /* remember information about the process */ arginfo.ps_nargvstr = argc; arginfo.ps_nenvstr = envc; #ifdef MACHINE_STACK_GROWS_UP stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; slen = len - sizeof(arginfo) - sgap; #else stack = (char *)(vm->vm_minsaddr - len); #endif /* Now copy argc, args & environ to new stack */ if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp)) goto exec_abort; /* copy out the process's ps_strings structure */ if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) goto exec_abort; stopprofclock(pr); /* stop profiling */ fdcloseexec(p); /* handle close on exec */ execsigs(p); /* reset caught signals */ TCB_SET(p, NULL); /* reset the TCB address */ pr->ps_kbind_addr = 0; /* reset the kbind bits */ pr->ps_kbind_cookie = 0; /* set command name & other accounting info */ memset(p->p_comm, 0, sizeof(p->p_comm)); len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN); memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len); pr->ps_acflag &= ~AFORK; /* record proc's vnode, for use by sysctl */ otvp = pr->ps_textvp; vref(pack.ep_vp); pr->ps_textvp = pack.ep_vp; if (otvp) vrele(otvp); atomic_setbits_int(&pr->ps_flags, PS_EXEC); if (pr->ps_flags & PS_PPWAIT) { atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); wakeup(pr->ps_pptr); } /* * If process does execve() while it has a mismatched real, * effective, or saved uid/gid, we set PS_SUGIDEXEC. */ if (cred->cr_uid != cred->cr_ruid || cred->cr_uid != cred->cr_svuid || cred->cr_gid != cred->cr_rgid || cred->cr_gid != cred->cr_svgid) atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); else atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); atomic_clearbits_int(&pr->ps_flags, PS_TAMED); tame_dropwpaths(pr); /* * deal with set[ug]id. * MNT_NOEXEC has already been used to disable s[ug]id. */ if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { int i; atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); #ifdef KTRACE /* * If process is being ktraced, turn off - unless * root set it. */ if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) ktrcleartrace(pr); #endif p->p_ucred = cred = crcopy(cred); if (attr.va_mode & VSUID) cred->cr_uid = attr.va_uid; if (attr.va_mode & VSGID) cred->cr_gid = attr.va_gid; /* * For set[ug]id processes, a few caveats apply to * stdin, stdout, and stderr. */ error = 0; fdplock(p->p_fd); for (i = 0; i < 3; i++) { struct file *fp = NULL; /* * NOTE - This will never return NULL because of * immature fds. The file descriptor table is not * shared because we're suid. */ fp = fd_getfile(p->p_fd, i); /* * Ensure that stdin, stdout, and stderr are already * allocated. We do not want userland to accidentally * allocate descriptors in this range which has implied * meaning to libc. */ if (fp == NULL) { short flags = FREAD | (i == 0 ? 0 : FWRITE); struct vnode *vp; int indx; if ((error = falloc(p, &fp, &indx)) != 0) break; #ifdef DIAGNOSTIC if (indx != i) panic("sys_execve: falloc indx != i"); #endif if ((error = cdevvp(getnulldev(), &vp)) != 0) { fdremove(p->p_fd, indx); closef(fp, p); break; } if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { fdremove(p->p_fd, indx); closef(fp, p); vrele(vp); break; } if (flags & FWRITE) vp->v_writecount++; fp->f_flag = flags; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (caddr_t)vp; FILE_SET_MATURE(fp, p); } } fdpunlock(p->p_fd); if (error) goto exec_abort; } else atomic_clearbits_int(&pr->ps_flags, PS_SUGID); /* * Reset the saved ugids and update the process's copy of the * creds if the creds have been changed */ if (cred->cr_uid != cred->cr_svuid || cred->cr_gid != cred->cr_svgid) { /* make sure we have unshared ucreds */ p->p_ucred = cred = crcopy(cred); cred->cr_svuid = cred->cr_uid; cred->cr_svgid = cred->cr_gid; } if (pr->ps_ucred != cred) { struct ucred *ocred; ocred = pr->ps_ucred; crhold(cred); pr->ps_ucred = cred; crfree(ocred); } if (pr->ps_flags & PS_SUGIDEXEC) { int i, s = splclock(); timeout_del(&pr->ps_realit_to); for (i = 0; i < nitems(pr->ps_timer); i++) { timerclear(&pr->ps_timer[i].it_interval); timerclear(&pr->ps_timer[i].it_value); } splx(s); } /* reset CPU time usage for the thread, but not the process */ timespecclear(&p->p_tu.tu_runtime); p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; km_free(argp, NCARGS, &kv_exec, &kp_pageable); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); vn_close(pack.ep_vp, FREAD, cred, p); /* * notify others that we exec'd */ KNOTE(&pr->ps_klist, NOTE_EXEC); /* setup new registers and do misc. setup. */ if (pack.ep_emul->e_fixup != NULL) { if ((*pack.ep_emul->e_fixup)(p, &pack) != 0) goto free_pack_abort; } #ifdef MACHINE_STACK_GROWS_UP (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval); #else (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval); #endif /* map the process's signal trampoline code */ if (exec_sigcode_map(pr, pack.ep_emul)) goto free_pack_abort; #ifdef __HAVE_EXEC_MD_MAP /* perform md specific mappings that process might need */ if (exec_md_map(p, &pack)) goto free_pack_abort; #endif if (pr->ps_flags & PS_TRACED) psignal(p, SIGTRAP); free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); /* * Call emulation specific exec hook. This can setup per-process * p->p_emuldata or do any other per-process stuff an emulation needs. * * If we are executing process of different emulation than the * original forked process, call e_proc_exit() of the old emulation * first, then e_proc_exec() of new emulation. If the emulation is * same, the exec hook code should deallocate any old emulation * resources held previously by this process. */ if (pr->ps_emul && pr->ps_emul->e_proc_exit && pr->ps_emul != pack.ep_emul) (*pr->ps_emul->e_proc_exit)(p); p->p_descfd = 255; if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) p->p_descfd = pack.ep_fd; /* * Call exec hook. Emulation code may NOT store reference to anything * from &pack. */ if (pack.ep_emul->e_proc_exec) (*pack.ep_emul->e_proc_exec)(p, &pack); #if defined(KTRACE) && defined(COMPAT_LINUX) /* update ps_emul, but don't ktrace it if native-execing-native */ if (pr->ps_emul != pack.ep_emul || pack.ep_emul != &emul_native) { pr->ps_emul = pack.ep_emul; if (KTRPOINT(p, KTR_EMUL)) ktremul(p); } #else /* update ps_emul, the old value is no longer needed */ pr->ps_emul = pack.ep_emul; #endif atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); single_thread_clear(p, P_SUSPSIG); #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE) && wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC)) systrace_execve1(pathbuf, p); #endif if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); return (0); bad: /* free the vmspace-creation commands, and release their references */ kill_vmcmds(&pack.ep_vmcmds); /* kill any opened file descriptor, if necessary */ if (pack.ep_flags & EXEC_HASFD) { pack.ep_flags &= ~EXEC_HASFD; fdplock(p->p_fd); (void) fdrelease(p, pack.ep_fd); fdpunlock(p->p_fd); } if (pack.ep_interp != NULL) pool_put(&namei_pool, pack.ep_interp); if (pack.ep_emul_arg != NULL) free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); /* close and put the exec'd file */ vn_close(pack.ep_vp, FREAD, cred, p); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); km_free(argp, NCARGS, &kv_exec, &kp_pageable); freehdr: free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); #if NSYSTRACE > 0 clrflag: #endif atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); single_thread_clear(p, P_SUSPSIG); if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); return (error); exec_abort: /* * the old process doesn't exist anymore. exit gracefully. * get rid of the (new) address space we have created, if any, get rid * of our namei data and vnode, and exit noting failure */ uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); if (pack.ep_interp != NULL) pool_put(&namei_pool, pack.ep_interp); if (pack.ep_emul_arg != NULL) free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); vn_close(pack.ep_vp, FREAD, cred, p); km_free(argp, NCARGS, &kv_exec, &kp_pageable); free_pack_abort: free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL); /* NOTREACHED */ atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); return (0); }
/* * check exec: * given an "executable" described in the exec package's namei info, * see what we can do with it. * * ON ENTRY: * exec package with appropriate namei info * proc pointer of exec'ing proc * NO SELF-LOCKED VNODES * * ON EXIT: * error: nothing held, etc. exec header still allocated. * ok: filled exec package, one locked vnode. * * EXEC SWITCH ENTRY: * Locked vnode to check, exec package, proc. * * EXEC SWITCH EXIT: * ok: return 0, filled exec package, one locked vnode. * error: destructive: * everything deallocated except exec header. * non-destructive: * error code, locked vnode, exec header unmodified */ int check_exec(struct proc *p, struct exec_package *epp) { int error, i; struct vnode *vp; struct nameidata *ndp; size_t resid; ndp = epp->ep_ndp; ndp->ni_cnd.cn_nameiop = LOOKUP; ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME; /* first get the vnode */ if ((error = namei(ndp)) != 0) return (error); epp->ep_vp = vp = ndp->ni_vp; /* check for regular file */ if (vp->v_type == VDIR) { error = EISDIR; goto bad1; } if (vp->v_type != VREG) { error = EACCES; goto bad1; } /* get attributes */ if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) goto bad1; /* Check mount point */ if (vp->v_mount->mnt_flag & MNT_NOEXEC) { error = EACCES; goto bad1; } if ((vp->v_mount->mnt_flag & MNT_NOSUID)) epp->ep_vap->va_mode &= ~(VSUID | VSGID); /* check access. for root we have to see if any exec bit on */ if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0) goto bad1; if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { error = EACCES; goto bad1; } /* try to open it */ if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0) goto bad1; /* unlock vp, we need it unlocked from here */ VOP_UNLOCK(vp, 0, p); /* now we have the file, get the exec header */ error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0, UIO_SYSSPACE, 0, p->p_ucred, &resid, p); if (error) goto bad2; epp->ep_hdrvalid = epp->ep_hdrlen - resid; /* * set up the vmcmds for creation of the process * address space */ error = ENOEXEC; for (i = 0; i < nexecs && error != 0; i++) { int newerror; if (execsw[i].es_check == NULL) continue; newerror = (*execsw[i].es_check)(p, epp); if (!newerror && !(epp->ep_emul->e_flags & EMUL_ENABLED)) newerror = EPERM; /* make sure the first "interesting" error code is saved. */ if (!newerror || error == ENOEXEC) error = newerror; if (epp->ep_flags & EXEC_DESTR && error != 0) return (error); } if (!error) { /* check that entry point is sane */ if (epp->ep_entry > VM_MAXUSER_ADDRESS) { error = ENOEXEC; } /* check limits */ if ((epp->ep_tsize > MAXTSIZ) || (epp->ep_dsize > p->p_rlimit[RLIMIT_DATA].rlim_cur)) error = ENOMEM; if (!error) return (0); } /* * free any vmspace-creation commands, * and release their references */ kill_vmcmds(&epp->ep_vmcmds); bad2: /* * close the vnode, free the pathname buf, and punt. */ vn_close(vp, FREAD, p->p_ucred, p); pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); return (error); bad1: /* * free the namei pathname buffer, and put the vnode * (which we don't yet have open). */ pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf); vput(vp); return (error); }
/* * exec_script_makecmds(): Check if it's an executable shell script. * * Given a proc pointer and an exec package pointer, see if the referent * of the epp is in shell script. If it is, then set things up so that * the script can be run. This involves preparing the address space * and arguments for the shell which will run the script. * * This function is ultimately responsible for creating a set of vmcmds * which can be used to build the process's vm space and inserting them * into the exec package. */ int exec_script_makecmds(struct proc *p, struct exec_package *epp) { int error, hdrlinelen, shellnamelen, shellarglen; char *hdrstr = epp->ep_hdr; char *cp, *shellname, *shellarg, *oldpnbuf; char **shellargp = NULL, **tmpsap; struct vnode *scriptvp; #ifdef SETUIDSCRIPTS uid_t script_uid = -1; gid_t script_gid = -1; u_short script_sbits; #endif /* * remember the old vp and pnbuf for later, so we can restore * them if check_exec() fails. */ scriptvp = epp->ep_vp; oldpnbuf = epp->ep_ndp->ni_cnd.cn_pnbuf; /* * if the magic isn't that of a shell script, or we've already * done shell script processing for this exec, punt on it. */ if ((epp->ep_flags & EXEC_INDIR) != 0 || epp->ep_hdrvalid < EXEC_SCRIPT_MAGICLEN || strncmp(hdrstr, EXEC_SCRIPT_MAGIC, EXEC_SCRIPT_MAGICLEN)) return ENOEXEC; /* * check that the shell spec is terminated by a newline, * and that it isn't too large. Don't modify the * buffer unless we're ready to commit to handling it. * (The latter requirement means that we have to check * for both spaces and tabs later on.) */ hdrlinelen = min(epp->ep_hdrvalid, MAXINTERP); for (cp = hdrstr + EXEC_SCRIPT_MAGICLEN; cp < hdrstr + hdrlinelen; cp++) { if (*cp == '\n') { *cp = '\0'; break; } } if (cp >= hdrstr + hdrlinelen) return ENOEXEC; shellname = NULL; shellarg = NULL; shellarglen = 0; /* strip spaces before the shell name */ for (cp = hdrstr + EXEC_SCRIPT_MAGICLEN; *cp == ' ' || *cp == '\t'; cp++) ; /* collect the shell name; remember its length for later */ shellname = cp; shellnamelen = 0; if (*cp == '\0') goto check_shell; for ( /* cp = cp */ ; *cp != '\0' && *cp != ' ' && *cp != '\t'; cp++) shellnamelen++; if (*cp == '\0') goto check_shell; *cp++ = '\0'; /* skip spaces before any argument */ for ( /* cp = cp */ ; *cp == ' ' || *cp == '\t'; cp++) ; if (*cp == '\0') goto check_shell; /* * collect the shell argument. everything after the shell name * is passed as ONE argument; that's the correct (historical) * behaviour. */ shellarg = cp; for ( /* cp = cp */ ; *cp != '\0'; cp++) shellarglen++; *cp++ = '\0'; check_shell: #ifdef SETUIDSCRIPTS /* * MNT_NOSUID and STRC are already taken care of by check_exec, * so we don't need to worry about them now or later. */ script_sbits = epp->ep_vap->va_mode & (VSUID | VSGID); if (script_sbits != 0) { script_uid = epp->ep_vap->va_uid; script_gid = epp->ep_vap->va_gid; } #endif #ifdef FDSCRIPTS /* * if the script isn't readable, or it's set-id, then we've * gotta supply a "/dev/fd/..." for the shell to read. * Note that stupid shells (csh) do the wrong thing, and * close all open fd's when they start. That kills this * method of implementing "safe" set-id and x-only scripts. */ vn_lock(scriptvp, LK_EXCLUSIVE|LK_RETRY, p); error = VOP_ACCESS(scriptvp, VREAD, p->p_ucred, p); VOP_UNLOCK(scriptvp, 0, p); if (error == EACCES #ifdef SETUIDSCRIPTS || script_sbits #endif ) { struct file *fp; #ifdef DIAGNOSTIC if (epp->ep_flags & EXEC_HASFD) panic("exec_script_makecmds: epp already has a fd"); #endif if ((error = falloc(p, &fp, &epp->ep_fd))) goto fail; epp->ep_flags |= EXEC_HASFD; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (caddr_t) scriptvp; fp->f_flag = FREAD; FILE_SET_MATURE(fp); } #endif /* set up the parameters for the recursive check_exec() call */ epp->ep_ndp->ni_dirp = shellname; epp->ep_ndp->ni_segflg = UIO_SYSSPACE; epp->ep_flags |= EXEC_INDIR; /* and set up the fake args list, for later */ MALLOC(shellargp, char **, 4 * sizeof(char *), M_EXEC, M_WAITOK); tmpsap = shellargp; *tmpsap = malloc(shellnamelen + 1, M_EXEC, M_WAITOK); strlcpy(*tmpsap++, shellname, shellnamelen + 1); if (shellarg != NULL) { *tmpsap = malloc(shellarglen + 1, M_EXEC, M_WAITOK); strlcpy(*tmpsap++, shellarg, shellarglen + 1); } *tmpsap = malloc(MAXPATHLEN, M_EXEC, M_WAITOK); #ifdef FDSCRIPTS if ((epp->ep_flags & EXEC_HASFD) == 0) { #endif /* normally can't fail, but check for it if diagnostic */ #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE)) { error = systrace_scriptname(p, *tmpsap); if (error == 0) tmpsap++; else /* * Since systrace_scriptname() provides a * convenience, not a security issue, we are * safe to do this. */ error = copystr(epp->ep_name, *tmpsap++, MAXPATHLEN, NULL); } else error = copyinstr(epp->ep_name, *tmpsap++, MAXPATHLEN, NULL); #else error = copyinstr(epp->ep_name, *tmpsap++, MAXPATHLEN, (size_t *)0); #endif #ifdef DIAGNOSTIC if (error != 0) panic("exec_script: copyinstr couldn't fail"); #endif #ifdef FDSCRIPTS } else snprintf(*tmpsap++, MAXPATHLEN, "/dev/fd/%d", epp->ep_fd); #endif *tmpsap = NULL; /* * mark the header we have as invalid; check_exec will read * the header from the new executable */ epp->ep_hdrvalid = 0; if ((error = check_exec(p, epp)) == 0) { /* note that we've clobbered the header */ epp->ep_flags |= EXEC_DESTR; /* * It succeeded. Unlock the script and * close it if we aren't using it any more. * Also, set things up so that the fake args * list will be used. */ if ((epp->ep_flags & EXEC_HASFD) == 0) vn_close(scriptvp, FREAD, p->p_ucred, p); /* free the old pathname buffer */ pool_put(&namei_pool, oldpnbuf); epp->ep_flags |= (EXEC_HASARGL | EXEC_SKIPARG); epp->ep_fa = shellargp; #ifdef SETUIDSCRIPTS /* * set things up so that set-id scripts will be * handled appropriately */ epp->ep_vap->va_mode |= script_sbits; if (script_sbits & VSUID) epp->ep_vap->va_uid = script_uid; if (script_sbits & VSGID) epp->ep_vap->va_gid = script_gid; #endif return (0); } /* XXX oldpnbuf not set for "goto fail" path */ epp->ep_ndp->ni_cnd.cn_pnbuf = oldpnbuf; #ifdef FDSCRIPTS fail: #endif /* note that we've clobbered the header */ epp->ep_flags |= EXEC_DESTR; /* kill the opened file descriptor, else close the file */ if (epp->ep_flags & EXEC_HASFD) { epp->ep_flags &= ~EXEC_HASFD; (void) fdrelease(p, epp->ep_fd); } else vn_close(scriptvp, FREAD, p->p_ucred, p); pool_put(&namei_pool, epp->ep_ndp->ni_cnd.cn_pnbuf); /* free the fake arg list, because we're not returning it */ if ((tmpsap = shellargp) != NULL) { while (*tmpsap != NULL) { free(*tmpsap, M_EXEC); tmpsap++; } FREE(shellargp, M_EXEC); } /* * free any vmspace-creation commands, * and release their references */ kill_vmcmds(&epp->ep_vmcmds); return error; }
int linux_sys_uselib(struct proc *p, void *v, register_t *retval) { struct linux_sys_uselib_args /* { syscallarg(char *) path; } */ *uap = v; caddr_t sg; long bsize, dsize, tsize, taddr, baddr, daddr; struct nameidata ni; struct vnode *vp; struct exec hdr; struct exec_vmcmd_set vcset; int i, magic, error; size_t rem; sg = stackgap_init(p->p_emul); LINUX_CHECK_ALT_EXIST(p, &sg, SCARG(uap, path)); NDINIT(&ni, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p); if ((error = namei(&ni))) return (error); vp = ni.ni_vp; if ((error = vn_rdwr(UIO_READ, vp, (caddr_t) &hdr, LINUX_AOUT_HDR_SIZE, 0, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &rem, p))) { vrele(vp); return (error); } if (rem != 0) { vrele(vp); return (ENOEXEC); } if (LINUX_N_MACHTYPE(&hdr) != LINUX_MID_MACHINE) return (ENOEXEC); magic = LINUX_N_MAGIC(&hdr); taddr = trunc_page(hdr.a_entry); tsize = hdr.a_text; daddr = taddr + tsize; dsize = hdr.a_data + hdr.a_bss; if ((hdr.a_text != 0 || hdr.a_data != 0) && vp->v_writecount != 0) { vrele(vp); return (ETXTBSY); } vn_marktext(vp); VMCMDSET_INIT(&vcset); NEW_VMCMD( &vcset, magic == ZMAGIC ? vmcmd_map_readvn : vmcmd_map_pagedvn, hdr.a_text + hdr.a_data, taddr, vp, LINUX_N_TXTOFF(hdr, magic), VM_PROT_READ|VM_PROT_EXECUTE|VM_PROT_WRITE); baddr = round_page(daddr + hdr.a_data); bsize = daddr + dsize - baddr; if (bsize > 0) { NEW_VMCMD(&vcset, vmcmd_map_zero, bsize, baddr, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); } for (i = 0; i < vcset.evs_used && !error; i++) { struct exec_vmcmd *vcp; vcp = &vcset.evs_cmds[i]; error = (*vcp->ev_proc)(p, vcp); } kill_vmcmds(&vcset); vrele(vp); return (error); }
/* * Phase II of load. It is now safe to load the interpreter. Info collected * when loading the program is available for setup of the interpreter. */ int ELFNAME2(exec,fixup)(struct proc *p, struct exec_package *epp) { char *interp; int error; struct elf_args *ap; AuxInfo ai[ELF_AUX_ENTRIES], *a; Elf_Addr pos = epp->ep_interp_pos; if (epp->ep_interp == NULL) { return (0); } interp = (char *)epp->ep_interp; ap = (struct elf_args *)epp->ep_emul_arg; if ((error = ELFNAME(load_file)(p, interp, epp, ap, &pos)) != 0) { free((char *)ap, M_TEMP); free((char *)interp, M_TEMP); kill_vmcmds(&epp->ep_vmcmds); return (error); } /* * We have to do this ourselves... */ error = exec_process_vmcmds(p, epp); /* * Push extra arguments on the stack needed by dynamically * linked binaries */ if (error == 0) { a = ai; a->au_id = AUX_phdr; a->au_v = ap->arg_phaddr; a++; a->au_id = AUX_phent; a->au_v = ap->arg_phentsize; a++; a->au_id = AUX_phnum; a->au_v = ap->arg_phnum; a++; a->au_id = AUX_pagesz; a->au_v = PAGE_SIZE; a++; a->au_id = AUX_base; a->au_v = ap->arg_interp; a++; a->au_id = AUX_flags; a->au_v = 0; a++; a->au_id = AUX_entry; a->au_v = ap->arg_entry; a++; a->au_id = AUX_null; a->au_v = 0; a++; error = copyout(ai, epp->ep_emul_argp, sizeof ai); } free((char *)ap, M_TEMP); free((char *)interp, M_TEMP); return (error); }
/* * Prepare an Elf binary's exec package * * First, set of the various offsets/lengths in the exec package. * * Then, mark the text image busy (so it can be demand paged) or error out if * this is not possible. Finally, set up vmcmds for the text, data, bss, and * stack segments. */ int ELFNAME2(exec,makecmds)(struct proc *p, struct exec_package *epp) { Elf_Ehdr *eh = epp->ep_hdr; Elf_Phdr *ph, *pp; Elf_Addr phdr = 0; int error, i; char interp[MAXPATHLEN]; u_long pos = 0, phsize; u_int8_t os = OOS_NULL; if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) return (ENOEXEC); if (ELFNAME(check_header)(eh, ET_EXEC) && ELFNAME(olf_check_header)(eh, ET_EXEC, &os)) return (ENOEXEC); /* * check if vnode is in open for writing, because we want to demand- * page out of it. if it is, don't do it, for various reasons. */ if (epp->ep_vp->v_writecount != 0) { #ifdef DIAGNOSTIC if (epp->ep_vp->v_flag & VTEXT) panic("exec: a VTEXT vnode has writecount != 0"); #endif return (ETXTBSY); } /* * Allocate space to hold all the program headers, and read them * from the file */ phsize = eh->e_phnum * sizeof(Elf_Phdr); ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK); if ((error = ELFNAME(read_from)(p, epp->ep_vp, eh->e_phoff, (caddr_t)ph, phsize)) != 0) goto bad; epp->ep_tsize = ELFDEFNNAME(NO_ADDR); epp->ep_dsize = ELFDEFNNAME(NO_ADDR); interp[0] = '\0'; for (i = 0; i < eh->e_phnum; i++) { pp = &ph[i]; if (pp->p_type == PT_INTERP) { if (pp->p_filesz >= sizeof(interp)) goto bad; if ((error = ELFNAME(read_from)(p, epp->ep_vp, pp->p_offset, (caddr_t)interp, pp->p_filesz)) != 0) goto bad; break; } } /* * OK, we want a slightly different twist of the * standard emulation package for "real" elf. */ epp->ep_emul = &ELFNAMEEND(emul); pos = ELFDEFNNAME(NO_ADDR); /* * On the same architecture, we may be emulating different systems. * See which one will accept this executable. * * Probe functions would normally see if the interpreter (if any) * exists. Emulation packages may possibly replace the interpreter in * interp[] with a changed path (/emul/xxx/<path>), and also * set the ep_emul field in the exec package structure. */ error = ENOEXEC; p->p_os = OOS_OPENBSD; #ifdef NATIVE_EXEC_ELF if (ELFNAME(os_pt_note)(p, epp, epp->ep_hdr, "OpenBSD", 8, 4) == 0) { goto native; } #endif for (i = 0; i < sizeof(ELFNAME(probes)) / sizeof(ELFNAME(probes)[0]) && error; i++) { if (os == OOS_NULL || ((1 << os) & ELFNAME(probes)[i].os_mask)) error = ELFNAME(probes)[i].func ? (*ELFNAME(probes)[i].func)(p, epp, interp, &pos, &os) : 0; } if (!error) p->p_os = os; #ifndef NATIVE_EXEC_ELF else goto bad; #else native: #endif /* NATIVE_EXEC_ELF */ /* * Load all the necessary sections */ for (i = 0; i < eh->e_phnum; i++) { Elf_Addr addr = ELFDEFNNAME(NO_ADDR), size = 0; int prot = 0; pp = &ph[i]; switch (ph[i].p_type) { case PT_LOAD: /* * Calcuates size of text and data segments * by starting at first and going to end of last. * 'rwx' sections are treated as data. * this is correct for BSS_PLT, but may not be * for DATA_PLT, is fine for TEXT_PLT. */ ELFNAME(load_psection)(&epp->ep_vmcmds, epp->ep_vp, &ph[i], &addr, &size, &prot, 0); /* * Decide whether it's text or data by looking * at the protection of the section */ if (prot & VM_PROT_WRITE) { /* data section */ if (epp->ep_dsize == ELFDEFNNAME(NO_ADDR)) { epp->ep_daddr = addr; epp->ep_dsize = size; } else { if (addr < epp->ep_daddr) { epp->ep_dsize = epp->ep_dsize + epp->ep_daddr - addr; epp->ep_daddr = addr; } else epp->ep_dsize = addr+size - epp->ep_daddr; } } else if (prot & VM_PROT_EXECUTE) { /* text section */ if (epp->ep_tsize == ELFDEFNNAME(NO_ADDR)) { epp->ep_taddr = addr; epp->ep_tsize = size; } else { if (addr < epp->ep_taddr) { epp->ep_tsize = epp->ep_tsize + epp->ep_taddr - addr; epp->ep_taddr = addr; } else epp->ep_tsize = addr+size - epp->ep_taddr; } } break; case PT_SHLIB: error = ENOEXEC; goto bad; case PT_INTERP: /* Already did this one */ case PT_DYNAMIC: case PT_NOTE: break; case PT_PHDR: /* Note address of program headers (in text segment) */ phdr = pp->p_vaddr; break; default: /* * Not fatal, we don't need to understand everything * :-) */ break; } } /* * Check if we found a dynamically linked binary and arrange to load * it's interpreter when the exec file is released. */ if (interp[0]) { char *ip; struct elf_args *ap; ip = (char *)malloc(MAXPATHLEN, M_TEMP, M_WAITOK); ap = (struct elf_args *) malloc(sizeof(struct elf_args), M_TEMP, M_WAITOK); bcopy(interp, ip, MAXPATHLEN); epp->ep_interp = ip; epp->ep_interp_pos = pos; ap->arg_phaddr = phdr; ap->arg_phentsize = eh->e_phentsize; ap->arg_phnum = eh->e_phnum; ap->arg_entry = eh->e_entry; ap->arg_os = os; epp->ep_emul_arg = ap; epp->ep_entry = eh->e_entry; /* keep check_exec() happy */ } else { epp->ep_interp = NULL; epp->ep_entry = eh->e_entry; } #if defined(COMPAT_SVR4) && defined(i386) #ifndef ELF_MAP_PAGE_ZERO /* Dell SVR4 maps page zero, yeuch! */ if (p->p_os == OOS_DELL) #endif NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0, epp->ep_vp, 0, VM_PROT_READ); #endif free((char *)ph, M_TEMP); vn_marktext(epp->ep_vp); return (exec_setup_stack(p, epp)); bad: free((char *)ph, M_TEMP); kill_vmcmds(&epp->ep_vmcmds); return (ENOEXEC); }