/* * sunos_exec_aout_prep_zmagic(): Prepare a SunOS ZMAGIC binary's exec package * * First, set of the various offsets/lengths in the exec package. * * Then, mark the text image busy (so it can be demand paged) or error * out if this is not possible. Finally, set up vmcmds for the * text, data, bss, and stack segments. */ int sunos_exec_aout_prep_zmagic(struct lwp *l, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; int error; epp->ep_taddr = SUNOS_N_TXTADDR(*execp, ZMAGIC); epp->ep_tsize = execp->a_text; epp->ep_daddr = SUNOS_N_DATADDR(*execp, ZMAGIC); epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; error = vn_marktext(epp->ep_vp); if (error) return (error); /* set up command for text segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_text, epp->ep_taddr, epp->ep_vp, SUNOS_N_TXTOFF(*execp, ZMAGIC), VM_PROT_READ|VM_PROT_EXECUTE); /* set up command for data segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_data, epp->ep_daddr, epp->ep_vp, SUNOS_N_DATOFF(*execp, ZMAGIC), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ if (execp->a_bss) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, execp->a_bss, epp->ep_daddr + execp->a_data, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (*epp->ep_esch->es_setup_stack)(l, epp); }
/* * exec_aout_prep_oldzmagic(): * Prepare the vmcmds to build a vmspace for an old ZMAGIC * binary. [386BSD/BSDI/4.4BSD/NetBSD0.8] * * Cloned from exec_aout_prep_zmagic() in kern/exec_aout.c; a more verbose * description of operation is there. * There were copies of this in the mac68k, hp300, and i386 ports. */ int exec_aout_prep_oldzmagic(struct lwp *l, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; int error; epp->ep_taddr = 0; epp->ep_tsize = execp->a_text; epp->ep_daddr = epp->ep_taddr + execp->a_text; epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; error = vn_marktext(epp->ep_vp); if (error) return (error); /* set up command for text segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_text, epp->ep_taddr, epp->ep_vp, PAGE_SIZE, /* XXX CLBYTES? */ VM_PROT_READ|VM_PROT_EXECUTE); /* set up command for data segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_data, epp->ep_daddr, epp->ep_vp, execp->a_text + PAGE_SIZE, /* XXX CLBYTES? */ VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ if (execp->a_bss) NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, execp->a_bss, epp->ep_daddr + execp->a_data, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (*epp->ep_esch->es_setup_stack)(l, epp); }
int exec_linux_aout_prep_qmagic(struct proc *p, struct exec_package *epp) { struct exec *execp = epp->ep_hdr; epp->ep_taddr = LINUX_N_TXTADDR(*execp, QMAGIC); epp->ep_tsize = execp->a_text; epp->ep_daddr = LINUX_N_DATADDR(*execp, QMAGIC); epp->ep_dsize = execp->a_data + execp->a_bss; epp->ep_entry = execp->a_entry; /* * check if vnode is in open for writing, because we want to * demand-page out of it. if it is, don't do it, for various * reasons */ if ((execp->a_text != 0 || execp->a_data != 0) && epp->ep_vp->v_writecount != 0) { #ifdef DIAGNOSTIC if (epp->ep_vp->v_flag & VTEXT) panic("exec: a VTEXT vnode has writecount != 0"); #endif return (ETXTBSY); } vn_marktext(epp->ep_vp); /* set up command for text segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_text, epp->ep_taddr, epp->ep_vp, LINUX_N_TXTOFF(*execp, QMAGIC), VM_PROT_READ|VM_PROT_EXECUTE); /* set up command for data segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_data, epp->ep_daddr, epp->ep_vp, LINUX_N_DATOFF(*execp, QMAGIC), VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); /* set up command for bss segment */ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, execp->a_bss, epp->ep_daddr + execp->a_data, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); return (exec_setup_stack(p, epp)); }
int exec_pecoff_makecmds(struct lwp *l, struct exec_package *epp) { int error, peofs; struct pecoff_dos_filehdr *dp = epp->ep_hdr; struct coff_filehdr *fp; struct proc *p; p = l->l_proc; /* * mmap EXE file (PE format) * 1. read header (DOS,PE) * 2. mmap code section (READ|EXEC) * 3. mmap other section, such as data (READ|WRITE|EXEC) */ if (epp->ep_hdrvalid < PECOFF_DOS_HDR_SIZE) { return ENOEXEC; } if ((error = pecoff_signature(l, epp->ep_vp, dp)) != 0) return error; if ((error = vn_marktext(epp->ep_vp)) != 0) return error; peofs = dp->d_peofs + sizeof(signature) - 1; fp = malloc(PECOFF_HDR_SIZE, M_TEMP, M_WAITOK); error = exec_read_from(l, epp->ep_vp, peofs, fp, PECOFF_HDR_SIZE); if (error) { free(fp, M_TEMP); return error; } error = exec_pecoff_coff_makecmds(l, epp, fp, peofs); if (error != 0) kill_vmcmds(&epp->ep_vmcmds); free(fp, M_TEMP); return error; }
/* * load(mmap) file. for dynamic linker (ld.so.dll) */ int pecoff_load_file(struct lwp *l, struct exec_package *epp, const char *path, struct exec_vmcmd_set *vcset, u_long *entry, struct pecoff_args *argp) { int error, peofs, scnsiz, i; struct vnode *vp; struct vattr attr; struct pecoff_dos_filehdr dh; struct coff_filehdr *fp = 0; struct coff_aouthdr *ap; struct pecoff_opthdr *wp; struct coff_scnhdr *sh = 0; error = emul_find_interp(l, epp, path); if (error != 0) return error; vp = epp->ep_interp; epp->ep_interp = NULL; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* * If it's not marked as executable, or it's not a regular * file, we don't allow it to be used. */ if (vp->v_type != VREG) { error = EACCES; goto badunlock; } if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) goto badunlock; /* get attributes */ if ((error = VOP_GETATTR(vp, &attr, l->l_cred)) != 0) goto badunlock; /* * Check mount point. Though we're not trying to exec this binary, * we will be executing code from it, so if the mount point * disallows execution or set-id-ness, we punt or kill the set-id. */ if (vp->v_mount->mnt_flag & MNT_NOEXEC) { error = EACCES; goto badunlock; } if (vp->v_mount->mnt_flag & MNT_NOSUID) epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); if ((error = vn_marktext(vp))) goto badunlock; VOP_UNLOCK(vp, 0); /* * Read header. */ error = exec_read_from(l, vp, 0, &dh, sizeof(dh)); if (error != 0) goto bad; if ((error = pecoff_signature(l, vp, &dh)) != 0) goto bad; fp = malloc(PECOFF_HDR_SIZE, M_TEMP, M_WAITOK); peofs = dh.d_peofs + sizeof(signature) - 1; error = exec_read_from(l, vp, peofs, fp, PECOFF_HDR_SIZE); if (error != 0) goto bad; if (COFF_BADMAG(fp)) { error = ENOEXEC; goto bad; } ap = (void *)((char *)fp + sizeof(struct coff_filehdr)); wp = (void *)((char *)ap + sizeof(struct coff_aouthdr)); /* read section header */ scnsiz = sizeof(struct coff_scnhdr) * fp->f_nscns; sh = malloc(scnsiz, M_TEMP, M_WAITOK); if ((error = exec_read_from(l, vp, peofs + PECOFF_HDR_SIZE, sh, scnsiz)) != 0) goto bad; /* * Read section header, and mmap. */ for (i = 0; i < fp->f_nscns; i++) { int prot = 0; long addr; u_long size; if (sh[i].s_flags & COFF_STYP_DISCARD) continue; /* XXX ? */ if ((sh[i].s_flags & COFF_STYP_TEXT) && (sh[i].s_flags & COFF_STYP_EXEC) == 0) continue; if ((sh[i].s_flags & (COFF_STYP_TEXT|COFF_STYP_DATA| COFF_STYP_BSS|COFF_STYP_READ)) == 0) continue; sh[i].s_vaddr += wp->w_base; /* RVA --> VA */ pecoff_load_section(vcset, vp, &sh[i], &addr, &size, &prot); } *entry = wp->w_base + ap->a_entry; argp->a_ldbase = wp->w_base; argp->a_ldexport = wp->w_imghdr[0].i_vaddr + wp->w_base; free(fp, M_TEMP); free(sh, M_TEMP); /*XXXUNCONST*/ vrele(vp); return 0; badunlock: VOP_UNLOCK(vp, 0); bad: if (fp != 0) free(fp, M_TEMP); if (sh != 0) free(sh, M_TEMP); /*XXXUNCONST*/ vrele(vp); return error; }
int linux_sys_uselib(struct proc *p, void *v, register_t *retval) { struct linux_sys_uselib_args /* { syscallarg(char *) path; } */ *uap = v; caddr_t sg; long bsize, dsize, tsize, taddr, baddr, daddr; struct nameidata ni; struct vnode *vp; struct exec hdr; struct exec_vmcmd_set vcset; int i, magic, error; size_t rem; sg = stackgap_init(p->p_emul); LINUX_CHECK_ALT_EXIST(p, &sg, SCARG(uap, path)); NDINIT(&ni, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), p); if ((error = namei(&ni))) return (error); vp = ni.ni_vp; if ((error = vn_rdwr(UIO_READ, vp, (caddr_t) &hdr, LINUX_AOUT_HDR_SIZE, 0, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &rem, p))) { vrele(vp); return (error); } if (rem != 0) { vrele(vp); return (ENOEXEC); } if (LINUX_N_MACHTYPE(&hdr) != LINUX_MID_MACHINE) return (ENOEXEC); magic = LINUX_N_MAGIC(&hdr); taddr = trunc_page(hdr.a_entry); tsize = hdr.a_text; daddr = taddr + tsize; dsize = hdr.a_data + hdr.a_bss; if ((hdr.a_text != 0 || hdr.a_data != 0) && vp->v_writecount != 0) { vrele(vp); return (ETXTBSY); } vn_marktext(vp); VMCMDSET_INIT(&vcset); NEW_VMCMD( &vcset, magic == ZMAGIC ? vmcmd_map_readvn : vmcmd_map_pagedvn, hdr.a_text + hdr.a_data, taddr, vp, LINUX_N_TXTOFF(hdr, magic), VM_PROT_READ|VM_PROT_EXECUTE|VM_PROT_WRITE); baddr = round_page(daddr + hdr.a_data); bsize = daddr + dsize - baddr; if (bsize > 0) { NEW_VMCMD(&vcset, vmcmd_map_zero, bsize, baddr, NULLVP, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); } for (i = 0; i < vcset.evs_used && !error; i++) { struct exec_vmcmd *vcp; vcp = &vcset.evs_cmds[i]; error = (*vcp->ev_proc)(p, vcp); } kill_vmcmds(&vcset); vrele(vp); return (error); }
/* * Prepare an Elf binary's exec package * * First, set of the various offsets/lengths in the exec package. * * Then, mark the text image busy (so it can be demand paged) or error out if * this is not possible. Finally, set up vmcmds for the text, data, bss, and * stack segments. */ int ELFNAME2(exec,makecmds)(struct proc *p, struct exec_package *epp) { Elf_Ehdr *eh = epp->ep_hdr; Elf_Phdr *ph, *pp; Elf_Addr phdr = 0; int error, i; char interp[MAXPATHLEN]; u_long pos = 0, phsize; u_int8_t os = OOS_NULL; if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) return (ENOEXEC); if (ELFNAME(check_header)(eh, ET_EXEC) && ELFNAME(olf_check_header)(eh, ET_EXEC, &os)) return (ENOEXEC); /* * check if vnode is in open for writing, because we want to demand- * page out of it. if it is, don't do it, for various reasons. */ if (epp->ep_vp->v_writecount != 0) { #ifdef DIAGNOSTIC if (epp->ep_vp->v_flag & VTEXT) panic("exec: a VTEXT vnode has writecount != 0"); #endif return (ETXTBSY); } /* * Allocate space to hold all the program headers, and read them * from the file */ phsize = eh->e_phnum * sizeof(Elf_Phdr); ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK); if ((error = ELFNAME(read_from)(p, epp->ep_vp, eh->e_phoff, (caddr_t)ph, phsize)) != 0) goto bad; epp->ep_tsize = ELFDEFNNAME(NO_ADDR); epp->ep_dsize = ELFDEFNNAME(NO_ADDR); interp[0] = '\0'; for (i = 0; i < eh->e_phnum; i++) { pp = &ph[i]; if (pp->p_type == PT_INTERP) { if (pp->p_filesz >= sizeof(interp)) goto bad; if ((error = ELFNAME(read_from)(p, epp->ep_vp, pp->p_offset, (caddr_t)interp, pp->p_filesz)) != 0) goto bad; break; } } /* * OK, we want a slightly different twist of the * standard emulation package for "real" elf. */ epp->ep_emul = &ELFNAMEEND(emul); pos = ELFDEFNNAME(NO_ADDR); /* * On the same architecture, we may be emulating different systems. * See which one will accept this executable. * * Probe functions would normally see if the interpreter (if any) * exists. Emulation packages may possibly replace the interpreter in * interp[] with a changed path (/emul/xxx/<path>), and also * set the ep_emul field in the exec package structure. */ error = ENOEXEC; p->p_os = OOS_OPENBSD; #ifdef NATIVE_EXEC_ELF if (ELFNAME(os_pt_note)(p, epp, epp->ep_hdr, "OpenBSD", 8, 4) == 0) { goto native; } #endif for (i = 0; i < sizeof(ELFNAME(probes)) / sizeof(ELFNAME(probes)[0]) && error; i++) { if (os == OOS_NULL || ((1 << os) & ELFNAME(probes)[i].os_mask)) error = ELFNAME(probes)[i].func ? (*ELFNAME(probes)[i].func)(p, epp, interp, &pos, &os) : 0; } if (!error) p->p_os = os; #ifndef NATIVE_EXEC_ELF else goto bad; #else native: #endif /* NATIVE_EXEC_ELF */ /* * Load all the necessary sections */ for (i = 0; i < eh->e_phnum; i++) { Elf_Addr addr = ELFDEFNNAME(NO_ADDR), size = 0; int prot = 0; pp = &ph[i]; switch (ph[i].p_type) { case PT_LOAD: /* * Calcuates size of text and data segments * by starting at first and going to end of last. * 'rwx' sections are treated as data. * this is correct for BSS_PLT, but may not be * for DATA_PLT, is fine for TEXT_PLT. */ ELFNAME(load_psection)(&epp->ep_vmcmds, epp->ep_vp, &ph[i], &addr, &size, &prot, 0); /* * Decide whether it's text or data by looking * at the protection of the section */ if (prot & VM_PROT_WRITE) { /* data section */ if (epp->ep_dsize == ELFDEFNNAME(NO_ADDR)) { epp->ep_daddr = addr; epp->ep_dsize = size; } else { if (addr < epp->ep_daddr) { epp->ep_dsize = epp->ep_dsize + epp->ep_daddr - addr; epp->ep_daddr = addr; } else epp->ep_dsize = addr+size - epp->ep_daddr; } } else if (prot & VM_PROT_EXECUTE) { /* text section */ if (epp->ep_tsize == ELFDEFNNAME(NO_ADDR)) { epp->ep_taddr = addr; epp->ep_tsize = size; } else { if (addr < epp->ep_taddr) { epp->ep_tsize = epp->ep_tsize + epp->ep_taddr - addr; epp->ep_taddr = addr; } else epp->ep_tsize = addr+size - epp->ep_taddr; } } break; case PT_SHLIB: error = ENOEXEC; goto bad; case PT_INTERP: /* Already did this one */ case PT_DYNAMIC: case PT_NOTE: break; case PT_PHDR: /* Note address of program headers (in text segment) */ phdr = pp->p_vaddr; break; default: /* * Not fatal, we don't need to understand everything * :-) */ break; } } /* * Check if we found a dynamically linked binary and arrange to load * it's interpreter when the exec file is released. */ if (interp[0]) { char *ip; struct elf_args *ap; ip = (char *)malloc(MAXPATHLEN, M_TEMP, M_WAITOK); ap = (struct elf_args *) malloc(sizeof(struct elf_args), M_TEMP, M_WAITOK); bcopy(interp, ip, MAXPATHLEN); epp->ep_interp = ip; epp->ep_interp_pos = pos; ap->arg_phaddr = phdr; ap->arg_phentsize = eh->e_phentsize; ap->arg_phnum = eh->e_phnum; ap->arg_entry = eh->e_entry; ap->arg_os = os; epp->ep_emul_arg = ap; epp->ep_entry = eh->e_entry; /* keep check_exec() happy */ } else { epp->ep_interp = NULL; epp->ep_entry = eh->e_entry; } #if defined(COMPAT_SVR4) && defined(i386) #ifndef ELF_MAP_PAGE_ZERO /* Dell SVR4 maps page zero, yeuch! */ if (p->p_os == OOS_DELL) #endif NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0, epp->ep_vp, 0, VM_PROT_READ); #endif free((char *)ph, M_TEMP); vn_marktext(epp->ep_vp); return (exec_setup_stack(p, epp)); bad: free((char *)ph, M_TEMP); kill_vmcmds(&epp->ep_vmcmds); return (ENOEXEC); }
/* * Load a file (interpreter/library) pointed to by path [stolen from * coff_load_shlib()]. Made slightly generic so it might be used externally. */ int ELFNAME(load_file)(struct proc *p, char *path, struct exec_package *epp, struct elf_args *ap, Elf_Addr *last) { int error, i; struct nameidata nd; Elf_Ehdr eh; Elf_Phdr *ph = NULL; u_long phsize; char *bp = NULL; Elf_Addr addr; struct vnode *vp; u_int8_t os; /* Just a dummy in this routine */ Elf_Phdr *base_ph = NULL; struct interp_ld_sec { Elf_Addr vaddr; u_long memsz; } loadmap[ELF_MAX_VALID_PHDR]; int nload, idx = 0; Elf_Addr pos = *last; int file_align; bp = path; NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p); if ((error = namei(&nd)) != 0) { return (error); } vp = nd.ni_vp; if (vp->v_type != VREG) { error = EACCES; goto bad; } if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) goto bad; if (vp->v_mount->mnt_flag & MNT_NOEXEC) { error = EACCES; goto bad; } if ((error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) != 0) goto bad1; if ((error = ELFNAME(read_from)(p, nd.ni_vp, 0, (caddr_t)&eh, sizeof(eh))) != 0) goto bad1; if (ELFNAME(check_header)(&eh, ET_DYN) && ELFNAME(olf_check_header)(&eh, ET_DYN, &os)) { error = ENOEXEC; goto bad1; } phsize = eh.e_phnum * sizeof(Elf_Phdr); ph = (Elf_Phdr *)malloc(phsize, M_TEMP, M_WAITOK); if ((error = ELFNAME(read_from)(p, nd.ni_vp, eh.e_phoff, (caddr_t)ph, phsize)) != 0) goto bad1; for (i = 0; i < eh.e_phnum; i++) { if (ph[i].p_type == PT_LOAD) { loadmap[idx].vaddr = trunc_page(ph[i].p_vaddr); loadmap[idx].memsz = round_page (ph[i].p_vaddr + ph[i].p_memsz - loadmap[idx].vaddr); file_align = ph[i].p_align; idx++; } } nload = idx; /* * If no position to load the interpreter was set by a probe * function, pick the same address that a non-fixed mmap(0, ..) * would (i.e. something safely out of the way). */ if (pos == ELFDEFNNAME(NO_ADDR)) { pos = uvm_map_hint(p, VM_PROT_EXECUTE); } pos = ELF_ROUND(pos, file_align); *last = epp->ep_interp_pos = pos; for (i = 0; i < nload;/**/) { vaddr_t addr; struct uvm_object *uobj; off_t uoff; size_t size; #ifdef this_needs_fixing if (i == 0) { uobj = &vp->v_uvm.u_obj; /* need to fix uoff */ } else { #endif uobj = NULL; uoff = 0; #ifdef this_needs_fixing } #endif addr = trunc_page(pos + loadmap[i].vaddr); size = round_page(addr + loadmap[i].memsz) - addr; /* CRAP - map_findspace does not avoid daddr+MAXDSIZ */ if ((addr + size > (vaddr_t)p->p_vmspace->vm_daddr) && (addr < (vaddr_t)p->p_vmspace->vm_daddr + MAXDSIZ)) addr = round_page((vaddr_t)p->p_vmspace->vm_daddr + MAXDSIZ); if (uvm_map_findspace(&p->p_vmspace->vm_map, addr, size, &addr, uobj, uoff, 0, UVM_FLAG_FIXED) == NULL) { if (uvm_map_findspace(&p->p_vmspace->vm_map, addr, size, &addr, uobj, uoff, 0, 0) == NULL) { error = ENOMEM; /* XXX */ goto bad1; } } if (addr != pos + loadmap[i].vaddr) { /* base changed. */ pos = addr - trunc_page(loadmap[i].vaddr); pos = ELF_ROUND(pos,file_align); epp->ep_interp_pos = *last = pos; i = 0; continue; } i++; } /* * Load all the necessary sections */ for (i = 0; i < eh.e_phnum; i++) { Elf_Addr size = 0; int prot = 0; int flags; switch (ph[i].p_type) { case PT_LOAD: if (base_ph == NULL) { flags = VMCMD_BASE; addr = *last; base_ph = &ph[i]; } else { flags = VMCMD_RELATIVE; addr = ph[i].p_vaddr - base_ph->p_vaddr; } ELFNAME(load_psection)(&epp->ep_vmcmds, nd.ni_vp, &ph[i], &addr, &size, &prot, flags); /* If entry is within this section it must be text */ if (eh.e_entry >= ph[i].p_vaddr && eh.e_entry < (ph[i].p_vaddr + size)) { epp->ep_entry = addr + eh.e_entry - ELF_TRUNC(ph[i].p_vaddr,ph[i].p_align); ap->arg_interp = addr; } addr += size; break; case PT_DYNAMIC: case PT_PHDR: case PT_NOTE: break; default: break; } } vn_marktext(nd.ni_vp); bad1: VOP_CLOSE(nd.ni_vp, FREAD, p->p_ucred, p); bad: if (ph != NULL) free((char *)ph, M_TEMP); *last = addr; vput(nd.ni_vp); return (error); }