/* * sunos_exec_aout_prep_zmagic(): Prepare a SunOS ZMAGIC binary's exec package * * First, set of the various offsets/lengths in the exec package. * * Then, mark the text image busy (so it can be demand paged) or error * out if this is not possible. Finally, set up vmcmds for the * text, data, bss, and stack segments. */ int sunos_exec_aout_prep_zmagic(struct lwp *l, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; int error; epp->ep_taddr = SUNOS_N_TXTADDR(*execp, ZMAGIC); epp->ep_tsize = execp->a_text; epp->ep_daddr = SUNOS_N_DATADDR(*execp, ZMAGIC); epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; error = vn_marktext(epp->ep_vp); if (error) return (error); /* set up command for text segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_text, epp->ep_taddr, epp->ep_vp, SUNOS_N_TXTOFF(*execp, ZMAGIC), VM_PROT_READ|VM_PROT_EXECUTE); /* set up command for data segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_data, epp->ep_daddr, epp->ep_vp, SUNOS_N_DATOFF(*execp, ZMAGIC), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ if (execp->a_bss) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, execp->a_bss, epp->ep_daddr + execp->a_data, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (*epp->ep_esch->es_setup_stack)(l, epp); }
/* * sunos_exec_aout_prep_omagic(): Prepare a SunOS OMAGIC binary's exec package */ int sunos_exec_aout_prep_omagic(struct lwp *l, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; long bsize, baddr; epp->ep_taddr = SUNOS_N_TXTADDR(*execp, OMAGIC); epp->ep_tsize = execp->a_text; epp->ep_daddr = SUNOS_N_DATADDR(*execp, OMAGIC); epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; /* set up command for text and data segments */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, execp->a_text + execp->a_data, epp->ep_taddr, epp->ep_vp, SUNOS_N_TXTOFF(*execp, OMAGIC), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ baddr = roundup(epp->ep_daddr + execp->a_data, PAGE_SIZE); bsize = epp->ep_daddr + epp->ep_dsize - baddr; if (bsize > 0) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, bsize, baddr, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (*epp->ep_esch->es_setup_stack)(l, epp); }
int exec_coff_prep_omagic(struct lwp *l, struct exec_package *epp, struct coff_filehdr *fp, struct coff_aouthdr *ap) { epp->ep_taddr = COFF_SEGMENT_ALIGN(fp, ap, ap->a_tstart); epp->ep_tsize = ap->a_tsize; epp->ep_daddr = COFF_SEGMENT_ALIGN(fp, ap, ap->a_dstart); epp->ep_dsize = ap->a_dsize; epp->ep_entry = ap->a_entry; /* set up command for text and data segments */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, ap->a_tsize + ap->a_dsize, epp->ep_taddr, epp->ep_vp, COFF_TXTOFF(fp, ap), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ #ifdef __sh__ if (ap->a_bsize > 0) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, ap->a_bsize, COFF_ROUND(ap->a_dstart + ap->a_dsize, COFF_LDPGSZ), NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); #else if (ap->a_bsize > 0) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, ap->a_bsize, COFF_SEGMENT_ALIGN(fp, ap, ap->a_dstart + ap->a_dsize), NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); #endif return (*epp->ep_esch->es_setup_stack)(l, epp); }
/* * mmap one section. */ void pecoff_load_section(struct exec_vmcmd_set *vcset, struct vnode *vp, struct coff_scnhdr *sh, long *addr, u_long *size, int *prot) { u_long diff, offset; *addr = COFF_ALIGN(sh->s_vaddr); diff = (sh->s_vaddr - *addr); offset = sh->s_scnptr - diff; *size = COFF_ROUND(sh->s_size + diff, COFF_LDPGSZ); *prot |= (sh->s_flags & COFF_STYP_EXEC) ? VM_PROT_EXECUTE : 0; *prot |= (sh->s_flags & COFF_STYP_READ) ? VM_PROT_READ : 0; *prot |= (sh->s_flags & COFF_STYP_WRITE) ? VM_PROT_WRITE : 0; if (diff == 0 && offset == COFF_ALIGN(offset)) NEW_VMCMD(vcset, vmcmd_map_pagedvn, *size, *addr, vp, offset, *prot); else NEW_VMCMD(vcset, vmcmd_map_readvn, sh->s_size, sh->s_vaddr, vp, sh->s_scnptr, *prot); if (*size < sh->s_paddr) { u_long baddr, bsize; baddr = *addr + COFF_ROUND(*size, COFF_LDPGSZ); bsize = sh->s_paddr - COFF_ROUND(*size, COFF_LDPGSZ); DPRINTF(("additional zero space (addr %lx size %ld)\n", baddr, bsize)); NEW_VMCMD(vcset, vmcmd_map_zero, bsize, baddr, NULLVP, 0, *prot); *size = COFF_ROUND(sh->s_paddr, COFF_LDPGSZ); } DPRINTF(("section %s loaded. (addr %lx size %ld prot %d)\n", sh->s_name, sh->s_vaddr, sh->s_size, *prot)); }
int exec_linux_aout_prep_nmagic(struct proc *p, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; long bsize, baddr; epp->ep_taddr = LINUX_N_TXTADDR(*execp, NMAGIC); epp->ep_tsize = execp->a_text; epp->ep_daddr = LINUX_N_DATADDR(*execp, NMAGIC); epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; /* set up command for text segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, execp->a_text, epp->ep_taddr, epp->ep_vp, LINUX_N_TXTOFF(*execp, NMAGIC), VM_PROT_READ|VM_PROT_EXECUTE); /* set up command for data segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, execp->a_data, epp->ep_daddr, epp->ep_vp, LINUX_N_DATOFF(*execp, NMAGIC), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ baddr = round_page(epp->ep_daddr + execp->a_data); bsize = epp->ep_daddr + epp->ep_dsize - baddr; if (bsize > 0) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, bsize, baddr, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (exec_setup_stack(p, epp)); }
/* * exec_aout_prep_oldnmagic(): * Prepare the vmcmds to build a vmspace for an old NMAGIC * binary. [BSDI] * * Cloned from exec_aout_prep_nmagic() in kern/exec_aout.c; with text starting * at 0. * XXX: There must be a better way to share this code. */ int exec_aout_prep_oldnmagic(struct lwp *l, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; long bsize, baddr; epp->ep_taddr = 0; epp->ep_tsize = execp->a_text; epp->ep_daddr = roundup(epp->ep_taddr + execp->a_text, AOUT_LDPGSZ); epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; /* set up command for text segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, execp->a_text, epp->ep_taddr, epp->ep_vp, sizeof(struct exec), VM_PROT_READ|VM_PROT_EXECUTE); /* set up command for data segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, execp->a_data, epp->ep_daddr, epp->ep_vp, execp->a_text + sizeof(struct exec), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ baddr = roundup(epp->ep_daddr + execp->a_data, PAGE_SIZE); bsize = epp->ep_daddr + epp->ep_dsize - baddr; if (bsize > 0) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, bsize, baddr, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (*epp->ep_esch->es_setup_stack)(l, epp); }
/* * exec_aout_prep_oldzmagic(): * Prepare the vmcmds to build a vmspace for an old ZMAGIC * binary. [386BSD/BSDI/4.4BSD/NetBSD0.8] * * Cloned from exec_aout_prep_zmagic() in kern/exec_aout.c; a more verbose * description of operation is there. * There were copies of this in the mac68k, hp300, and i386 ports. */ int exec_aout_prep_oldzmagic(struct lwp *l, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; int error; epp->ep_taddr = 0; epp->ep_tsize = execp->a_text; epp->ep_daddr = epp->ep_taddr + execp->a_text; epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; error = vn_marktext(epp->ep_vp); if (error) return (error); /* set up command for text segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_text, epp->ep_taddr, epp->ep_vp, PAGE_SIZE, /* XXX CLBYTES? */ VM_PROT_READ|VM_PROT_EXECUTE); /* set up command for data segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_data, epp->ep_daddr, epp->ep_vp, execp->a_text + PAGE_SIZE, /* XXX CLBYTES? */ VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ if (execp->a_bss) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, execp->a_bss, epp->ep_daddr + execp->a_data, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (*epp->ep_esch->es_setup_stack)(l, epp); }
int exec_setup_stack(struct proc *p, struct exec_package *epp) { vaddr_t sgap; #ifdef MACHINE_STACK_GROWS_UP epp->ep_maxsaddr = USRSTACK; epp->ep_minsaddr = USRSTACK + MAXSSIZ; #else epp->ep_maxsaddr = USRSTACK - MAXSSIZ; epp->ep_minsaddr = USRSTACK; #endif epp->ep_ssize = round_page(p->p_rlimit[RLIMIT_STACK].rlim_cur); if (stackgap_random != 0) { sgap = arc4random() & (stackgap_random - 1); sgap = trunc_page(sgap); #ifdef MACHINE_STACK_GROWS_UP epp->ep_maxsaddr += sgap; epp->ep_minsaddr += sgap; #else epp->ep_maxsaddr -= sgap; epp->ep_minsaddr -= sgap; #endif } /* * set up commands for stack. note that this takes *two*, one to * map the part of the stack which we can access, and one to map * the part which we can't. * * arguably, it could be made into one, but that would require the * addition of another mapping proc, which is unnecessary * * note that in memory, things assumed to be: 0 ....... ep_maxsaddr * <stack> ep_minsaddr */ #ifdef MACHINE_STACK_GROWS_UP NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr), epp->ep_maxsaddr + epp->ep_ssize, NULLVP, 0, PROT_NONE); NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize, epp->ep_maxsaddr, NULLVP, 0, PROT_READ | PROT_WRITE); #else NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr), epp->ep_maxsaddr, NULLVP, 0, PROT_NONE); NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize, (epp->ep_minsaddr - epp->ep_ssize), NULLVP, 0, PROT_READ | PROT_WRITE); #endif return (0); }
int exec_linux_aout_prep_qmagic(struct proc *p, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; epp->ep_taddr = LINUX_N_TXTADDR(*execp, QMAGIC); epp->ep_tsize = execp->a_text; epp->ep_daddr = LINUX_N_DATADDR(*execp, QMAGIC); epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; /* * check if vnode is in open for writing, because we want to * demand-page out of it. if it is, don't do it, for various * reasons */ if ((execp->a_text != 0 || execp->a_data != 0) && epp->ep_vp->v_writecount != 0) { #ifdef DIAGNOSTIC if (epp->ep_vp->v_flag & VTEXT) panic("exec: a VTEXT vnode has writecount != 0"); #endif return (ETXTBSY); } vn_marktext(epp->ep_vp); /* set up command for text segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_text, epp->ep_taddr, epp->ep_vp, LINUX_N_TXTOFF(*execp, QMAGIC), VM_PROT_READ|VM_PROT_EXECUTE); /* set up command for data segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_data, epp->ep_daddr, epp->ep_vp, LINUX_N_DATOFF(*execp, QMAGIC), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, execp->a_bss, epp->ep_daddr + execp->a_data, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (exec_setup_stack(p, epp)); }
/* * exec_aout_prep_oldomagic(): * Prepare the vmcmds to build a vmspace for an old OMAGIC * binary. [BSDI] * * Cloned from exec_aout_prep_omagic() in kern/exec_aout.c; with text starting * at 0. * XXX: There must be a better way to share this code. */ int exec_aout_prep_oldomagic(struct lwp *l, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; long dsize, bsize, baddr; epp->ep_taddr = 0; epp->ep_tsize = execp->a_text; epp->ep_daddr = epp->ep_taddr + execp->a_text; epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; /* set up command for text and data segments */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, execp->a_text + execp->a_data, epp->ep_taddr, epp->ep_vp, sizeof(struct exec), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ baddr = roundup(epp->ep_daddr + execp->a_data, PAGE_SIZE); bsize = epp->ep_daddr + epp->ep_dsize - baddr; if (bsize > 0) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, bsize, baddr, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* * Make sure (# of pages) mapped above equals (vm_tsize + vm_dsize); * obreak(2) relies on this fact. Both `vm_tsize' and `vm_dsize' are * computed (in execve(2)) by rounding *up* `ep_tsize' and `ep_dsize' * respectively to page boundaries. * Compensate `ep_dsize' for the amount of data covered by the last * text page. */ dsize = epp->ep_dsize + execp->a_text - roundup(execp->a_text, PAGE_SIZE); epp->ep_dsize = (dsize > 0) ? dsize : 0; return (*epp->ep_esch->es_setup_stack)(l, epp); }
int exec_pecoff_prep_zmagic(struct lwp *l, struct exec_package *epp, struct coff_filehdr *fp, struct coff_aouthdr *ap, int peofs) { int error, i; struct pecoff_opthdr *wp; long daddr, baddr, bsize; u_long tsize, dsize; struct coff_scnhdr *sh; struct pecoff_args *argp; int scnsiz = sizeof(struct coff_scnhdr) * fp->f_nscns; wp = (void *)((char *)ap + sizeof(struct coff_aouthdr)); epp->ep_tsize = ap->a_tsize; epp->ep_daddr = VM_MAXUSER_ADDRESS; epp->ep_dsize = 0; /* read section header */ sh = malloc(scnsiz, M_TEMP, M_WAITOK); error = exec_read_from(l, epp->ep_vp, peofs + PECOFF_HDR_SIZE, sh, scnsiz); if (error) { free(sh, M_TEMP); return error; } /* * map section */ for (i = 0; i < fp->f_nscns; i++) { int prot = /*0*/VM_PROT_WRITE; long s_flags = sh[i].s_flags; if ((s_flags & COFF_STYP_DISCARD) != 0) continue; sh[i].s_vaddr += wp->w_base; /* RVA --> VA */ if ((s_flags & COFF_STYP_TEXT) != 0) { /* set up command for text segment */ /* DPRINTF(("COFF text addr %lx size %ld offset %ld\n", sh[i].s_vaddr, sh[i].s_size, sh[i].s_scnptr)); */ pecoff_load_section(&epp->ep_vmcmds, epp->ep_vp, &sh[i], (long *)&epp->ep_taddr, &tsize, &prot); } else if ((s_flags & COFF_STYP_BSS) != 0) { /* set up command for bss segment */ baddr = sh[i].s_vaddr; bsize = sh[i].s_paddr; if (bsize) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, bsize, baddr, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); epp->ep_daddr = min(epp->ep_daddr, baddr); bsize = baddr + bsize - epp->ep_daddr; epp->ep_dsize = max(epp->ep_dsize, bsize); } else if ((s_flags & (COFF_STYP_DATA|COFF_STYP_READ)) != 0) { /* set up command for data segment */ /* DPRINTF(("COFF data addr %lx size %ld offset %ld\n", sh[i].s_vaddr, sh[i].s_size, sh[i].s_scnptr));*/ pecoff_load_section(&epp->ep_vmcmds, epp->ep_vp, &sh[i], &daddr, &dsize, &prot); epp->ep_daddr = min(epp->ep_daddr, daddr); dsize = daddr + dsize - epp->ep_daddr; epp->ep_dsize = max(epp->ep_dsize, dsize); } } /* set up ep_emul_arg */ argp = malloc(sizeof(struct pecoff_args), M_TEMP, M_WAITOK); epp->ep_emul_arg = argp; argp->a_abiversion = NETBSDPE_ABI_VERSION; argp->a_zero = 0; argp->a_entry = wp->w_base + ap->a_entry; argp->a_end = epp->ep_daddr + epp->ep_dsize; argp->a_opthdr = *wp; /* * load dynamic linker */ error = pecoff_load_file(l, epp, "/usr/libexec/ld.so.dll", &epp->ep_vmcmds, &epp->ep_entry, argp); if (error) { free(sh, M_TEMP); return error; } #if 0 DPRINTF(("text addr: %lx size: %ld data addr: %lx size: %ld entry: %lx\n", epp->ep_taddr, epp->ep_tsize, epp->ep_daddr, epp->ep_dsize, epp->ep_entry)); #endif free(sh, M_TEMP); return (*epp->ep_esch->es_setup_stack)(l, epp); }
int linux_sys_uselib(struct proc *p, void *v, register_t *retval) { struct linux_sys_uselib_args /* { syscallarg(char *) path; } */ *uap = v; caddr_t sg; long bsize, dsize, tsize, taddr, baddr, daddr; struct nameidata ni; struct vnode *vp; struct exec hdr; struct exec_vmcmd_set vcset; int i, magic, error; size_t rem; sg = stackgap_init(p->p_emul); LINUX_CHECK_ALT_EXIST(p, &sg, SCARG(uap, path)); NDINIT(&ni, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p); if ((error = namei(&ni))) return (error); vp = ni.ni_vp; if ((error = vn_rdwr(UIO_READ, vp, (caddr_t) &hdr, LINUX_AOUT_HDR_SIZE, 0, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &rem, p))) { vrele(vp); return (error); } if (rem != 0) { vrele(vp); return (ENOEXEC); } if (LINUX_N_MACHTYPE(&hdr) != LINUX_MID_MACHINE) return (ENOEXEC); magic = LINUX_N_MAGIC(&hdr); taddr = trunc_page(hdr.a_entry); tsize = hdr.a_text; daddr = taddr + tsize; dsize = hdr.a_data + hdr.a_bss; if ((hdr.a_text != 0 || hdr.a_data != 0) && vp->v_writecount != 0) { vrele(vp); return (ETXTBSY); } vn_marktext(vp); VMCMDSET_INIT(&vcset); NEW_VMCMD( &vcset, magic == ZMAGIC ? vmcmd_map_readvn : vmcmd_map_pagedvn, hdr.a_text + hdr.a_data, taddr, vp, LINUX_N_TXTOFF(hdr, magic), VM_PROT_READ|VM_PROT_EXECUTE|VM_PROT_WRITE); baddr = round_page(daddr + hdr.a_data); bsize = daddr + dsize - baddr; if (bsize > 0) { NEW_VMCMD(&vcset, vmcmd_map_zero, bsize, baddr, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); } for (i = 0; i < vcset.evs_used && !error; i++) { struct exec_vmcmd *vcp; vcp = &vcset.evs_cmds[i]; error = (*vcp->ev_proc)(p, vcp); } kill_vmcmds(&vcset); vrele(vp); return (error); }
/* * Prepare an Elf binary's exec package * * First, set of the various offsets/lengths in the exec package. * * Then, mark the text image busy (so it can be demand paged) or error out if * this is not possible. Finally, set up vmcmds for the text, data, bss, and * stack segments. */ int ELFNAME2(exec,makecmds)(struct proc *p, struct exec_package *epp) { Elf_Ehdr *eh = epp->ep_hdr; Elf_Phdr *ph, *pp; Elf_Addr phdr = 0; int error, i; char interp[MAXPATHLEN]; u_long pos = 0, phsize; u_int8_t os = OOS_NULL; if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) return (ENOEXEC); if (ELFNAME(check_header)(eh, ET_EXEC) && ELFNAME(olf_check_header)(eh, ET_EXEC, &os)) return (ENOEXEC); /* * check if vnode is in open for writing, because we want to demand- * page out of it. if it is, don't do it, for various reasons. */ if (epp->ep_vp->v_writecount != 0) { #ifdef DIAGNOSTIC if (epp->ep_vp->v_flag & VTEXT) panic("exec: a VTEXT vnode has writecount != 0"); #endif return (ETXTBSY); } /* * Allocate space to hold all the program headers, and read them * from the file */ phsize = eh->e_phnum * sizeof(Elf_Phdr); ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK); if ((error = ELFNAME(read_from)(p, epp->ep_vp, eh->e_phoff, (caddr_t)ph, phsize)) != 0) goto bad; epp->ep_tsize = ELFDEFNNAME(NO_ADDR); epp->ep_dsize = ELFDEFNNAME(NO_ADDR); interp[0] = '\0'; for (i = 0; i < eh->e_phnum; i++) { pp = &ph[i]; if (pp->p_type == PT_INTERP) { if (pp->p_filesz >= sizeof(interp)) goto bad; if ((error = ELFNAME(read_from)(p, epp->ep_vp, pp->p_offset, (caddr_t)interp, pp->p_filesz)) != 0) goto bad; break; } } /* * OK, we want a slightly different twist of the * standard emulation package for "real" elf. */ epp->ep_emul = &ELFNAMEEND(emul); pos = ELFDEFNNAME(NO_ADDR); /* * On the same architecture, we may be emulating different systems. * See which one will accept this executable. * * Probe functions would normally see if the interpreter (if any) * exists. Emulation packages may possibly replace the interpreter in * interp[] with a changed path (/emul/xxx/<path>), and also * set the ep_emul field in the exec package structure. */ error = ENOEXEC; p->p_os = OOS_OPENBSD; #ifdef NATIVE_EXEC_ELF if (ELFNAME(os_pt_note)(p, epp, epp->ep_hdr, "OpenBSD", 8, 4) == 0) { goto native; } #endif for (i = 0; i < sizeof(ELFNAME(probes)) / sizeof(ELFNAME(probes)[0]) && error; i++) { if (os == OOS_NULL || ((1 << os) & ELFNAME(probes)[i].os_mask)) error = ELFNAME(probes)[i].func ? (*ELFNAME(probes)[i].func)(p, epp, interp, &pos, &os) : 0; } if (!error) p->p_os = os; #ifndef NATIVE_EXEC_ELF else goto bad; #else native: #endif /* NATIVE_EXEC_ELF */ /* * Load all the necessary sections */ for (i = 0; i < eh->e_phnum; i++) { Elf_Addr addr = ELFDEFNNAME(NO_ADDR), size = 0; int prot = 0; pp = &ph[i]; switch (ph[i].p_type) { case PT_LOAD: /* * Calcuates size of text and data segments * by starting at first and going to end of last. * 'rwx' sections are treated as data. * this is correct for BSS_PLT, but may not be * for DATA_PLT, is fine for TEXT_PLT. */ ELFNAME(load_psection)(&epp->ep_vmcmds, epp->ep_vp, &ph[i], &addr, &size, &prot, 0); /* * Decide whether it's text or data by looking * at the protection of the section */ if (prot & VM_PROT_WRITE) { /* data section */ if (epp->ep_dsize == ELFDEFNNAME(NO_ADDR)) { epp->ep_daddr = addr; epp->ep_dsize = size; } else { if (addr < epp->ep_daddr) { epp->ep_dsize = epp->ep_dsize + epp->ep_daddr - addr; epp->ep_daddr = addr; } else epp->ep_dsize = addr+size - epp->ep_daddr; } } else if (prot & VM_PROT_EXECUTE) { /* text section */ if (epp->ep_tsize == ELFDEFNNAME(NO_ADDR)) { epp->ep_taddr = addr; epp->ep_tsize = size; } else { if (addr < epp->ep_taddr) { epp->ep_tsize = epp->ep_tsize + epp->ep_taddr - addr; epp->ep_taddr = addr; } else epp->ep_tsize = addr+size - epp->ep_taddr; } } break; case PT_SHLIB: error = ENOEXEC; goto bad; case PT_INTERP: /* Already did this one */ case PT_DYNAMIC: case PT_NOTE: break; case PT_PHDR: /* Note address of program headers (in text segment) */ phdr = pp->p_vaddr; break; default: /* * Not fatal, we don't need to understand everything * :-) */ break; } } /* * Check if we found a dynamically linked binary and arrange to load * it's interpreter when the exec file is released. */ if (interp[0]) { char *ip; struct elf_args *ap; ip = (char *)malloc(MAXPATHLEN, M_TEMP, M_WAITOK); ap = (struct elf_args *) malloc(sizeof(struct elf_args), M_TEMP, M_WAITOK); bcopy(interp, ip, MAXPATHLEN); epp->ep_interp = ip; epp->ep_interp_pos = pos; ap->arg_phaddr = phdr; ap->arg_phentsize = eh->e_phentsize; ap->arg_phnum = eh->e_phnum; ap->arg_entry = eh->e_entry; ap->arg_os = os; epp->ep_emul_arg = ap; epp->ep_entry = eh->e_entry; /* keep check_exec() happy */ } else { epp->ep_interp = NULL; epp->ep_entry = eh->e_entry; } #if defined(COMPAT_SVR4) && defined(i386) #ifndef ELF_MAP_PAGE_ZERO /* Dell SVR4 maps page zero, yeuch! */ if (p->p_os == OOS_DELL) #endif NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0, epp->ep_vp, 0, VM_PROT_READ); #endif free((char *)ph, M_TEMP); vn_marktext(epp->ep_vp); return (exec_setup_stack(p, epp)); bad: free((char *)ph, M_TEMP); kill_vmcmds(&epp->ep_vmcmds); return (ENOEXEC); }