int udf_mount(struct mount *mp, const char *path, void *data, struct nameidata *ndp, struct proc *p) { struct vnode *devvp; /* vnode of the mount device */ struct udf_args args; size_t len; int error; if ((mp->mnt_flag & MNT_RDONLY) == 0) { mp->mnt_flag |= MNT_RDONLY; printf("udf_mount: enforcing read-only mode\n"); } /* * No root filesystem support. Probably not a big deal, since the * bootloader doesn't understand UDF. */ if (mp->mnt_flag & MNT_ROOTFS) return (EOPNOTSUPP); error = copyin(data, &args, sizeof(struct udf_args)); if (error) return (error); if (args.fspec == NULL) return (EINVAL); NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); if ((error = namei(ndp))) return (error); devvp = ndp->ni_vp; if (devvp->v_type != VBLK) { vrele(devvp); return (ENOTBLK); } if (major(devvp->v_rdev) >= nblkdev) { vrele(devvp); return (ENXIO); } /* Check the access rights on the mount device */ if (p->p_ucred->cr_uid) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_ACCESS(devvp, VREAD, p->p_ucred, p); VOP_UNLOCK(devvp, 0, p); if (error) { vrele(devvp); return (error); } } if ((error = udf_mountfs(devvp, mp, args.lastblock, p))) { vrele(devvp); return (error); } /* * Keep a copy of the mount information. */ copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &len); bzero(mp->mnt_stat.f_mntonname + len, MNAMELEN - len); copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &len); bzero(mp->mnt_stat.f_mntfromname + len, MNAMELEN - len); return (0); };
/* * Routine: macx_swapoff * Function: * Syscall interface to remove a file from backing store */ int macx_swapoff( struct macx_swapoff_args *args) { __unused int flags = args->flags; kern_return_t kr; mach_port_t backing_store; struct vnode *vp = 0; struct nameidata nd, *ndp; struct proc *p = current_proc(); int i; int error; boolean_t funnel_state; vfs_context_t ctx = vfs_context_current(); AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF); funnel_state = thread_funnel_set(kernel_flock, TRUE); backing_store = NULL; ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapoff_bailout; /* * Get the vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapoff_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapoff_bailout; } #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapoff(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapoff_bailout; #endif for(i = 0; i < MAX_BACKING_STORE; i++) { if(bs_port_table[i].vp == vp) { break; } } if (i == MAX_BACKING_STORE) { error = EINVAL; goto swapoff_bailout; } backing_store = (mach_port_t)bs_port_table[i].bs; kr = default_pager_backing_store_delete(backing_store); switch (kr) { case KERN_SUCCESS: error = 0; bs_port_table[i].vp = 0; /* This vnode is no longer used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); /* get rid of macx_swapon() "long term" reference */ vnode_rele(vp); break; case KERN_FAILURE: error = EAGAIN; break; default: error = EAGAIN; break; } swapoff_bailout: /* get rid of macx_swapoff() namei() reference */ if (vp) vnode_put(vp); (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }
/* * Q_QUOTAON - set up a quota file for a particular filesystem. */ int quotaon(struct thread *td, struct mount *mp, int type, void *fname) { struct ufsmount *ump; struct vnode *vp, **vpp; struct vnode *mvp; struct dquot *dq; int error, flags; struct nameidata nd; error = priv_check(td, PRIV_UFS_QUOTAON); if (error != 0) { vfs_unbusy(mp); return (error); } if ((mp->mnt_flag & MNT_RDONLY) != 0) { vfs_unbusy(mp); return (EROFS); } ump = VFSTOUFS(mp); dq = NODQUOT; NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, td); flags = FREAD | FWRITE; vfs_ref(mp); vfs_unbusy(mp); error = vn_open(&nd, &flags, 0, NULL); if (error != 0) { vfs_rel(mp); return (error); } NDFREE(&nd, NDF_ONLY_PNBUF); vp = nd.ni_vp; error = vfs_busy(mp, MBF_NOWAIT); vfs_rel(mp); if (error == 0) { if (vp->v_type != VREG) { error = EACCES; vfs_unbusy(mp); } } if (error != 0) { VOP_UNLOCK(vp, 0); (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); return (error); } UFS_LOCK(ump); if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) { UFS_UNLOCK(ump); VOP_UNLOCK(vp, 0); (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); vfs_unbusy(mp); return (EALREADY); } ump->um_qflags[type] |= QTF_OPENING|QTF_CLOSING; UFS_UNLOCK(ump); if ((error = dqopen(vp, ump, type)) != 0) { VOP_UNLOCK(vp, 0); UFS_LOCK(ump); ump->um_qflags[type] &= ~(QTF_OPENING|QTF_CLOSING); UFS_UNLOCK(ump); (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); vfs_unbusy(mp); return (error); } VOP_UNLOCK(vp, 0); MNT_ILOCK(mp); mp->mnt_flag |= MNT_QUOTA; MNT_IUNLOCK(mp); vpp = &ump->um_quotas[type]; if (*vpp != vp) quotaoff1(td, mp, type); /* * When the directory vnode containing the quota file is * inactivated, due to the shared lookup of the quota file * vput()ing the dvp, the qsyncvp() call for the containing * directory would try to acquire the quota lock exclusive. * At the same time, lookup already locked the quota vnode * shared. Mark the quota vnode lock as allowing recursion * and automatically converting shared locks to exclusive. * * Also mark quota vnode as system. */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_vflag |= VV_SYSTEM; VN_LOCK_AREC(vp); VN_LOCK_DSHARE(vp); VOP_UNLOCK(vp, 0); *vpp = vp; /* * Save the credential of the process that turned on quotas. * Set up the time limits for this quota. */ ump->um_cred[type] = crhold(td->td_ucred); ump->um_btime[type] = MAX_DQ_TIME; ump->um_itime[type] = MAX_IQ_TIME; if (dqget(NULLVP, 0, ump, type, &dq) == 0) { if (dq->dq_btime > 0) ump->um_btime[type] = dq->dq_btime; if (dq->dq_itime > 0) ump->um_itime[type] = dq->dq_itime; dqrele(NULLVP, dq); } /* * Allow the getdq from getinoquota below to read the quota * from file. */ UFS_LOCK(ump); ump->um_qflags[type] &= ~QTF_CLOSING; UFS_UNLOCK(ump); /* * Search vnodes associated with this mount point, * adding references to quota file being opened. * NB: only need to add dquot's for inodes being modified. */ again: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto again; } if (vp->v_type == VNON || vp->v_writecount == 0) { VOP_UNLOCK(vp, 0); vrele(vp); continue; } error = getinoquota(VTOI(vp)); VOP_UNLOCK(vp, 0); vrele(vp); if (error) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); break; } } if (error) quotaoff_inchange(td, mp, type); UFS_LOCK(ump); ump->um_qflags[type] &= ~QTF_OPENING; KASSERT((ump->um_qflags[type] & QTF_CLOSING) == 0, ("quotaon: leaking flags")); UFS_UNLOCK(ump); vfs_unbusy(mp); return (error); }
/* ARGSUSED */ int sys_auditctl(struct thread *td, struct auditctl_args *uap) { struct nameidata nd; struct ucred *cred; struct vnode *vp; int error = 0; int flags; if (jailed(td->td_ucred)) return (ENOSYS); error = priv_check(td, PRIV_AUDIT_CONTROL); if (error) return (error); vp = NULL; cred = NULL; /* * If a path is specified, open the replacement vnode, perform * validity checks, and grab another reference to the current * credential. * * On Darwin, a NULL path argument is also used to disable audit. */ if (uap->path == NULL) return (EINVAL); NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, UIO_USERSPACE, uap->path, td); flags = AUDIT_OPEN_FLAGS; error = vn_open(&nd, &flags, 0, NULL); if (error) return (error); vp = nd.ni_vp; #ifdef MAC error = mac_system_check_auditctl(td->td_ucred, vp); VOP_UNLOCK(vp, 0); if (error) { vn_close(vp, AUDIT_CLOSE_FLAGS, td->td_ucred, td); return (error); } #else VOP_UNLOCK(vp, 0); #endif NDFREE(&nd, NDF_ONLY_PNBUF); if (vp->v_type != VREG) { vn_close(vp, AUDIT_CLOSE_FLAGS, td->td_ucred, td); return (EINVAL); } cred = td->td_ucred; crhold(cred); /* * XXXAUDIT: Should audit_suspended actually be cleared by * audit_worker? */ audit_suspended = 0; audit_rotate_vnode(cred, vp); return (error); }
/* * VFS Operations. * * mount system call */ static int ext2_mount(struct mount *mp) { struct vfsoptlist *opts; struct vnode *devvp; struct thread *td; struct ext2mount *ump = NULL; struct m_ext2fs *fs; struct nameidata nd, *ndp = &nd; accmode_t accmode; char *path, *fspec; int error, flags, len; td = curthread; opts = mp->mnt_optnew; if (vfs_filteropt(opts, ext2_opts)) return (EINVAL); vfs_getopt(opts, "fspath", (void **)&path, NULL); /* Double-check the length of path.. */ if (strlen(path) >= MAXMNTLEN) return (ENAMETOOLONG); fspec = NULL; error = vfs_getopt(opts, "from", (void **)&fspec, &len); if (!error && fspec[len - 1] != '\0') return (EINVAL); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOEXT2(mp); fs = ump->um_e2fs; error = 0; if (fs->e2fs_ronly == 0 && vfs_flagopt(opts, "ro", NULL, 0)) { error = VFS_SYNC(mp, MNT_WAIT); if (error) return (error); flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ext2_flushfiles(mp, flags, td); if ( error == 0 && fs->e2fs_wasvalid && ext2_cgupdate(ump, MNT_WAIT) == 0) { fs->e2fs->e2fs_state |= E2FS_ISCLEAN; ext2_sbupdate(ump, MNT_WAIT); } fs->e2fs_ronly = 1; vfs_flagopt(opts, "ro", &mp->mnt_flag, MNT_RDONLY); DROP_GIANT(); g_topology_lock(); g_access(ump->um_cp, 0, -1, 0); g_topology_unlock(); PICKUP_GIANT(); } if (!error && (mp->mnt_flag & MNT_RELOAD)) error = ext2_reload(mp, td); if (error) return (error); devvp = ump->um_devvp; if (fs->e2fs_ronly && !vfs_flagopt(opts, "ro", NULL, 0)) { if (ext2_check_sb_compat(fs->e2fs, devvp->v_rdev, 0)) return (EPERM); /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { VOP_UNLOCK(devvp, 0); return (error); } VOP_UNLOCK(devvp, 0); DROP_GIANT(); g_topology_lock(); error = g_access(ump->um_cp, 0, 1, 0); g_topology_unlock(); PICKUP_GIANT(); if (error) return (error); if ((fs->e2fs->e2fs_state & E2FS_ISCLEAN) == 0 || (fs->e2fs->e2fs_state & E2FS_ERRORS)) { if (mp->mnt_flag & MNT_FORCE) { printf( "WARNING: %s was not properly dismounted\n", fs->e2fs_fsmnt); } else { printf( "WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", fs->e2fs_fsmnt); return (EPERM); } } fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN; (void)ext2_cgupdate(ump, MNT_WAIT); fs->e2fs_ronly = 0; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); } if (vfs_flagopt(opts, "export", NULL, 0)) { /* Process export requests in vfs_mount.c. */ return (error); } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ if (fspec == NULL) return (EINVAL); NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); if ((error = namei(ndp)) != 0) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); devvp = ndp->ni_vp; if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. * * XXXRW: VOP_ACCESS() enough? */ accmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accmode |= VWRITE; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { error = ext2_mountfs(devvp, mp); } else { if (devvp != ump->um_devvp) { vput(devvp); return (EINVAL); /* needs translation */ } else vput(devvp); } if (error) { vrele(devvp); return (error); } ump = VFSTOEXT2(mp); fs = ump->um_e2fs; /* * Note that this strncpy() is ok because of a check at the start * of ext2_mount(). */ strncpy(fs->e2fs_fsmnt, path, MAXMNTLEN); fs->e2fs_fsmnt[MAXMNTLEN - 1] = '\0'; vfs_mountedfrom(mp, fspec); return (0); }
/* ARGSUSED */ int sys_ktrace(struct proc *curp, void *v, register_t *retval) { struct sys_ktrace_args /* { syscallarg(const char *) fname; syscallarg(int) ops; syscallarg(int) facs; syscallarg(pid_t) pid; } */ *uap = v; struct vnode *vp = NULL; struct proc *p = NULL; struct pgrp *pg; int facs = SCARG(uap, facs) & ~((unsigned) KTRFAC_ROOT); int ops = KTROP(SCARG(uap, ops)); int descend = SCARG(uap, ops) & KTRFLAG_DESCEND; int ret = 0; int error = 0; struct nameidata nd; curp->p_traceflag |= KTRFAC_ACTIVE; if (ops != KTROP_CLEAR) { /* * an operation which requires a file argument. */ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname), curp); if ((error = vn_open(&nd, FREAD|FWRITE|O_NOFOLLOW, 0)) != 0) { curp->p_traceflag &= ~KTRFAC_ACTIVE; return (error); } vp = nd.ni_vp; VOP_UNLOCK(vp, 0, curp); if (vp->v_type != VREG) { (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); curp->p_traceflag &= ~KTRFAC_ACTIVE; return (EACCES); } } /* * Clear all uses of the tracefile */ if (ops == KTROP_CLEARFILE) { for (p = LIST_FIRST(&allproc); p; p = LIST_NEXT(p, p_list)) { if (p->p_tracep == vp) { if (ktrcanset(curp, p)) { p->p_traceflag = 0; ktrsettracevnode(p, NULL); } else error = EPERM; } } goto done; } /* * need something to (un)trace (XXX - why is this here?) */ if (!facs) { error = EINVAL; goto done; } /* * do it */ if (SCARG(uap, pid) < 0) { /* * by process group */ pg = pgfind(-SCARG(uap, pid)); if (pg == NULL) { error = ESRCH; goto done; } LIST_FOREACH(p, &pg->pg_members, p_pglist) if (descend) ret |= ktrsetchildren(curp, p, ops, facs, vp); else ret |= ktrops(curp, p, ops, facs, vp); } else {
/* ARGSUSED */ int compat_43_sys_lstat(struct lwp *l, const struct compat_43_sys_lstat_args *uap, register_t *retval) { /* { syscallarg(char *) path; syscallarg(struct ostat *) ub; } */ struct vnode *vp, *dvp; struct stat sb, sb1; struct stat43 osb; int error; struct pathbuf *pb; struct nameidata nd; int ndflags; error = pathbuf_copyin(SCARG(uap, path), &pb); if (error) { return error; } ndflags = NOFOLLOW | LOCKLEAF | LOCKPARENT | TRYEMULROOT; again: NDINIT(&nd, LOOKUP, ndflags, pb); if ((error = namei(&nd))) { if (error == EISDIR && (ndflags & LOCKPARENT) != 0) { /* * Should only happen on '/'. Retry without LOCKPARENT; * this is safe since the vnode won't be a VLNK. */ ndflags &= ~LOCKPARENT; goto again; } pathbuf_destroy(pb); return (error); } /* * For symbolic links, always return the attributes of its * containing directory, except for mode, size, and links. */ vp = nd.ni_vp; dvp = nd.ni_dvp; pathbuf_destroy(pb); if (vp->v_type != VLNK) { if ((ndflags & LOCKPARENT) != 0) { if (dvp == vp) vrele(dvp); else vput(dvp); } error = vn_stat(vp, &sb); vput(vp); if (error) return (error); } else { error = vn_stat(dvp, &sb); vput(dvp); if (error) { vput(vp); return (error); } error = vn_stat(vp, &sb1); vput(vp); if (error) return (error); sb.st_mode &= ~S_IFDIR; sb.st_mode |= S_IFLNK; sb.st_nlink = sb1.st_nlink; sb.st_size = sb1.st_size; sb.st_blocks = sb1.st_blocks; } cvtstat(&sb, &osb); error = copyout((void *)&osb, (void *)SCARG(uap, ub), sizeof (osb)); return (error); }
static int link_elf_load_file(linker_class_t cls, const char *filename, linker_file_t *result) { struct nameidata nd; struct thread *td = curthread; /* XXX */ Elf_Ehdr *hdr; Elf_Shdr *shdr; Elf_Sym *es; int nbytes, i, j; vm_offset_t mapbase; size_t mapsize; int error = 0; ssize_t resid; int flags; elf_file_t ef; linker_file_t lf; int symtabindex; int symstrindex; int shstrindex; int nsym; int pb, rl, ra; int alignmask; shdr = NULL; lf = NULL; mapsize = 0; hdr = NULL; NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, filename, td); flags = FREAD; error = vn_open(&nd, &flags, 0, NULL); if (error) return error; NDFREE(&nd, NDF_ONLY_PNBUF); if (nd.ni_vp->v_type != VREG) { error = ENOEXEC; goto out; } #ifdef MAC error = mac_kld_check_load(td->td_ucred, nd.ni_vp); if (error) { goto out; } #endif /* Read the elf header from the file. */ hdr = malloc(sizeof(*hdr), M_LINKER, M_WAITOK); error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)hdr, sizeof(*hdr), 0, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td); if (error) goto out; if (resid != 0){ error = ENOEXEC; goto out; } if (!IS_ELF(*hdr)) { error = ENOEXEC; goto out; } if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || hdr->e_ident[EI_DATA] != ELF_TARG_DATA) { link_elf_error(filename, "Unsupported file layout"); error = ENOEXEC; goto out; } if (hdr->e_ident[EI_VERSION] != EV_CURRENT || hdr->e_version != EV_CURRENT) { link_elf_error(filename, "Unsupported file version"); error = ENOEXEC; goto out; } if (hdr->e_type != ET_REL) { error = ENOSYS; goto out; } if (hdr->e_machine != ELF_TARG_MACH) { link_elf_error(filename, "Unsupported machine"); error = ENOEXEC; goto out; } lf = linker_make_file(filename, &link_elf_class); if (!lf) { error = ENOMEM; goto out; } ef = (elf_file_t) lf; ef->nprogtab = 0; ef->e_shdr = 0; ef->nreltab = 0; ef->nrelatab = 0; /* Allocate and read in the section header */ nbytes = hdr->e_shnum * hdr->e_shentsize; if (nbytes == 0 || hdr->e_shoff == 0 || hdr->e_shentsize != sizeof(Elf_Shdr)) { error = ENOEXEC; goto out; } shdr = malloc(nbytes, M_LINKER, M_WAITOK); ef->e_shdr = shdr; error = vn_rdwr(UIO_READ, nd.ni_vp, (caddr_t)shdr, nbytes, hdr->e_shoff, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td); if (error) goto out; if (resid) { error = ENOEXEC; goto out; } /* Scan the section header for information and table sizing. */ nsym = 0; symtabindex = -1; symstrindex = -1; for (i = 0; i < hdr->e_shnum; i++) { if (shdr[i].sh_size == 0) continue; switch (shdr[i].sh_type) { case SHT_PROGBITS: case SHT_NOBITS: #ifdef __amd64__ case SHT_X86_64_UNWIND: #endif ef->nprogtab++; break; case SHT_SYMTAB: nsym++; symtabindex = i; symstrindex = shdr[i].sh_link; break; case SHT_REL: ef->nreltab++; break; case SHT_RELA: ef->nrelatab++; break; case SHT_STRTAB: break; } } if (ef->nprogtab == 0) { link_elf_error(filename, "file has no contents"); error = ENOEXEC; goto out; } if (nsym != 1) { /* Only allow one symbol table for now */ link_elf_error(filename, "file has no valid symbol table"); error = ENOEXEC; goto out; } if (symstrindex < 0 || symstrindex > hdr->e_shnum || shdr[symstrindex].sh_type != SHT_STRTAB) { link_elf_error(filename, "file has invalid symbol strings"); error = ENOEXEC; goto out; } /* Allocate space for tracking the load chunks */ if (ef->nprogtab != 0) ef->progtab = malloc(ef->nprogtab * sizeof(*ef->progtab), M_LINKER, M_WAITOK | M_ZERO); if (ef->nreltab != 0) ef->reltab = malloc(ef->nreltab * sizeof(*ef->reltab), M_LINKER, M_WAITOK | M_ZERO); if (ef->nrelatab != 0) ef->relatab = malloc(ef->nrelatab * sizeof(*ef->relatab), M_LINKER, M_WAITOK | M_ZERO); if (symtabindex == -1) { link_elf_error(filename, "lost symbol table index"); error = ENOEXEC; goto out; } /* Allocate space for and load the symbol table */ ef->ddbsymcnt = shdr[symtabindex].sh_size / sizeof(Elf_Sym); ef->ddbsymtab = malloc(shdr[symtabindex].sh_size, M_LINKER, M_WAITOK); error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)ef->ddbsymtab, shdr[symtabindex].sh_size, shdr[symtabindex].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td); if (error) goto out; if (resid != 0){ error = EINVAL; goto out; } if (symstrindex == -1) { link_elf_error(filename, "lost symbol string index"); error = ENOEXEC; goto out; } /* Allocate space for and load the symbol strings */ ef->ddbstrcnt = shdr[symstrindex].sh_size; ef->ddbstrtab = malloc(shdr[symstrindex].sh_size, M_LINKER, M_WAITOK); error = vn_rdwr(UIO_READ, nd.ni_vp, ef->ddbstrtab, shdr[symstrindex].sh_size, shdr[symstrindex].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td); if (error) goto out; if (resid != 0){ error = EINVAL; goto out; } /* Do we have a string table for the section names? */ shstrindex = -1; if (hdr->e_shstrndx != 0 && shdr[hdr->e_shstrndx].sh_type == SHT_STRTAB) { shstrindex = hdr->e_shstrndx; ef->shstrcnt = shdr[shstrindex].sh_size; ef->shstrtab = malloc(shdr[shstrindex].sh_size, M_LINKER, M_WAITOK); error = vn_rdwr(UIO_READ, nd.ni_vp, ef->shstrtab, shdr[shstrindex].sh_size, shdr[shstrindex].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td); if (error) goto out; if (resid != 0){ error = EINVAL; goto out; } } /* Size up code/data(progbits) and bss(nobits). */ alignmask = 0; for (i = 0; i < hdr->e_shnum; i++) { if (shdr[i].sh_size == 0) continue; switch (shdr[i].sh_type) { case SHT_PROGBITS: case SHT_NOBITS: #ifdef __amd64__ case SHT_X86_64_UNWIND: #endif alignmask = shdr[i].sh_addralign - 1; mapsize += alignmask; mapsize &= ~alignmask; mapsize += shdr[i].sh_size; break; } } /* * We know how much space we need for the text/data/bss/etc. * This stuff needs to be in a single chunk so that profiling etc * can get the bounds and gdb can associate offsets with modules */ ef->object = vm_object_allocate(OBJT_DEFAULT, round_page(mapsize) >> PAGE_SHIFT); if (ef->object == NULL) { error = ENOMEM; goto out; } ef->address = (caddr_t) vm_map_min(kernel_map); /* * In order to satisfy amd64's architectural requirements on the * location of code and data in the kernel's address space, request a * mapping that is above the kernel. */ #ifdef __amd64__ mapbase = KERNBASE; #else mapbase = VM_MIN_KERNEL_ADDRESS; #endif error = vm_map_find(kernel_map, ef->object, 0, &mapbase, round_page(mapsize), 0, VMFS_OPTIMAL_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0); if (error) { vm_object_deallocate(ef->object); ef->object = 0; goto out; } /* Wire the pages */ error = vm_map_wire(kernel_map, mapbase, mapbase + round_page(mapsize), VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); if (error != KERN_SUCCESS) { error = ENOMEM; goto out; } /* Inform the kld system about the situation */ lf->address = ef->address = (caddr_t)mapbase; lf->size = mapsize; /* * Now load code/data(progbits), zero bss(nobits), allocate space for * and load relocs */ pb = 0; rl = 0; ra = 0; alignmask = 0; for (i = 0; i < hdr->e_shnum; i++) { if (shdr[i].sh_size == 0) continue; switch (shdr[i].sh_type) { case SHT_PROGBITS: case SHT_NOBITS: #ifdef __amd64__ case SHT_X86_64_UNWIND: #endif alignmask = shdr[i].sh_addralign - 1; mapbase += alignmask; mapbase &= ~alignmask; if (ef->shstrtab != NULL && shdr[i].sh_name != 0) { ef->progtab[pb].name = ef->shstrtab + shdr[i].sh_name; if (!strcmp(ef->progtab[pb].name, ".ctors")) { lf->ctors_addr = (caddr_t)mapbase; lf->ctors_size = shdr[i].sh_size; } } else if (shdr[i].sh_type == SHT_PROGBITS) ef->progtab[pb].name = "<<PROGBITS>>"; #ifdef __amd64__ else if (shdr[i].sh_type == SHT_X86_64_UNWIND) ef->progtab[pb].name = "<<UNWIND>>"; #endif else ef->progtab[pb].name = "<<NOBITS>>"; if (ef->progtab[pb].name != NULL && !strcmp(ef->progtab[pb].name, DPCPU_SETNAME)) ef->progtab[pb].addr = dpcpu_alloc(shdr[i].sh_size); #ifdef VIMAGE else if (ef->progtab[pb].name != NULL && !strcmp(ef->progtab[pb].name, VNET_SETNAME)) ef->progtab[pb].addr = vnet_data_alloc(shdr[i].sh_size); #endif else ef->progtab[pb].addr = (void *)(uintptr_t)mapbase; if (ef->progtab[pb].addr == NULL) { error = ENOSPC; goto out; } ef->progtab[pb].size = shdr[i].sh_size; ef->progtab[pb].sec = i; if (shdr[i].sh_type == SHT_PROGBITS #ifdef __amd64__ || shdr[i].sh_type == SHT_X86_64_UNWIND #endif ) { error = vn_rdwr(UIO_READ, nd.ni_vp, ef->progtab[pb].addr, shdr[i].sh_size, shdr[i].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td); if (error) goto out; if (resid != 0){ error = EINVAL; goto out; } /* Initialize the per-cpu or vnet area. */ if (ef->progtab[pb].addr != (void *)mapbase && !strcmp(ef->progtab[pb].name, DPCPU_SETNAME)) dpcpu_copy(ef->progtab[pb].addr, shdr[i].sh_size); #ifdef VIMAGE else if (ef->progtab[pb].addr != (void *)mapbase && !strcmp(ef->progtab[pb].name, VNET_SETNAME)) vnet_data_copy(ef->progtab[pb].addr, shdr[i].sh_size); #endif } else bzero(ef->progtab[pb].addr, shdr[i].sh_size); /* Update all symbol values with the offset. */ for (j = 0; j < ef->ddbsymcnt; j++) { es = &ef->ddbsymtab[j]; if (es->st_shndx != i) continue; es->st_value += (Elf_Addr)ef->progtab[pb].addr; } mapbase += shdr[i].sh_size; pb++; break; case SHT_REL: ef->reltab[rl].rel = malloc(shdr[i].sh_size, M_LINKER, M_WAITOK); ef->reltab[rl].nrel = shdr[i].sh_size / sizeof(Elf_Rel); ef->reltab[rl].sec = shdr[i].sh_info; error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)ef->reltab[rl].rel, shdr[i].sh_size, shdr[i].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td); if (error) goto out; if (resid != 0){ error = EINVAL; goto out; } rl++; break; case SHT_RELA: ef->relatab[ra].rela = malloc(shdr[i].sh_size, M_LINKER, M_WAITOK); ef->relatab[ra].nrela = shdr[i].sh_size / sizeof(Elf_Rela); ef->relatab[ra].sec = shdr[i].sh_info; error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)ef->relatab[ra].rela, shdr[i].sh_size, shdr[i].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td); if (error) goto out; if (resid != 0){ error = EINVAL; goto out; } ra++; break; } } if (pb != ef->nprogtab) { link_elf_error(filename, "lost progbits"); error = ENOEXEC; goto out; } if (rl != ef->nreltab) { link_elf_error(filename, "lost reltab"); error = ENOEXEC; goto out; } if (ra != ef->nrelatab) { link_elf_error(filename, "lost relatab"); error = ENOEXEC; goto out; } if (mapbase != (vm_offset_t)ef->address + mapsize) { printf( "%s: mapbase 0x%lx != address %p + mapsize 0x%lx (0x%lx)\n", filename != NULL ? filename : "<none>", (u_long)mapbase, ef->address, (u_long)mapsize, (u_long)(vm_offset_t)ef->address + mapsize); error = ENOMEM; goto out; } /* Local intra-module relocations */ error = link_elf_reloc_local(lf); if (error != 0) goto out; /* Pull in dependencies */ VOP_UNLOCK(nd.ni_vp, 0); error = linker_load_dependencies(lf); vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); if (error) goto out; /* External relocations */ error = relocate_file(ef); if (error) goto out; /* Notify MD code that a module is being loaded. */ error = elf_cpu_load_file(lf); if (error) goto out; /* Invoke .ctors */ link_elf_invoke_ctors(lf->ctors_addr, lf->ctors_size); *result = lf; out: VOP_UNLOCK(nd.ni_vp, 0); vn_close(nd.ni_vp, FREAD, td->td_ucred, td); if (error && lf) linker_file_unload(lf, LINKER_UNLOAD_FORCE); free(hdr, M_LINKER); return error; }
/* * Lookup and open needed files. * * For file system internal snapshot initializes sc_mntname, sc_mount, * sc_bs_vp and sc_time. * * Otherwise returns dev and size of the underlying block device. * Initializes sc_mntname, sc_mount, sc_bdev, sc_bs_vp and sc_mount */ static int fss_create_files(struct fss_softc *sc, struct fss_set *fss, off_t *bsize, struct lwp *l) { int error, bits, fsbsize; uint64_t numsec; unsigned int secsize; struct timespec ts; /* nd -> nd2 to reduce mistakes while updating only some namei calls */ struct pathbuf *pb2; struct nameidata nd2; struct vnode *vp; /* * Get the mounted file system. */ error = namei_simple_user(fss->fss_mount, NSM_FOLLOW_NOEMULROOT, &vp); if (error != 0) return error; if ((vp->v_vflag & VV_ROOT) != VV_ROOT) { vrele(vp); return EINVAL; } sc->sc_mount = vp->v_mount; memcpy(sc->sc_mntname, sc->sc_mount->mnt_stat.f_mntonname, MNAMELEN); vrele(vp); /* * Check for file system internal snapshot. */ error = namei_simple_user(fss->fss_bstore, NSM_FOLLOW_NOEMULROOT, &vp); if (error != 0) return error; if (vp->v_type == VREG && vp->v_mount == sc->sc_mount) { sc->sc_flags |= FSS_PERSISTENT; sc->sc_bs_vp = vp; fsbsize = sc->sc_bs_vp->v_mount->mnt_stat.f_iosize; bits = sizeof(sc->sc_bs_bshift)*NBBY; for (sc->sc_bs_bshift = 1; sc->sc_bs_bshift < bits; sc->sc_bs_bshift++) if (FSS_FSBSIZE(sc) == fsbsize) break; if (sc->sc_bs_bshift >= bits) return EINVAL; sc->sc_bs_bmask = FSS_FSBSIZE(sc)-1; sc->sc_clshift = 0; if ((fss->fss_flags & FSS_UNLINK_ON_CREATE) != 0) { error = do_sys_unlink(fss->fss_bstore, UIO_USERSPACE); if (error) return error; } error = vn_lock(vp, LK_EXCLUSIVE); if (error != 0) return error; error = VFS_SNAPSHOT(sc->sc_mount, sc->sc_bs_vp, &ts); TIMESPEC_TO_TIMEVAL(&sc->sc_time, &ts); VOP_UNLOCK(sc->sc_bs_vp); return error; } vrele(vp); /* * Get the block device it is mounted on and its size. */ error = spec_node_lookup_by_mount(sc->sc_mount, &vp); if (error) return error; sc->sc_bdev = vp->v_rdev; error = getdisksize(vp, &numsec, &secsize); vrele(vp); if (error) return error; *bsize = (off_t)numsec*secsize; /* * Get the backing store */ error = pathbuf_copyin(fss->fss_bstore, &pb2); if (error) { return error; } NDINIT(&nd2, LOOKUP, FOLLOW, pb2); if ((error = vn_open(&nd2, FREAD|FWRITE, 0)) != 0) { pathbuf_destroy(pb2); return error; } VOP_UNLOCK(nd2.ni_vp); sc->sc_bs_vp = nd2.ni_vp; if (nd2.ni_vp->v_type != VREG && nd2.ni_vp->v_type != VCHR) { pathbuf_destroy(pb2); return EINVAL; } pathbuf_destroy(pb2); if ((fss->fss_flags & FSS_UNLINK_ON_CREATE) != 0) { error = do_sys_unlink(fss->fss_bstore, UIO_USERSPACE); if (error) return error; } if (sc->sc_bs_vp->v_type == VREG) { fsbsize = sc->sc_bs_vp->v_mount->mnt_stat.f_iosize; if (fsbsize & (fsbsize-1)) /* No power of two */ return EINVAL; for (sc->sc_bs_bshift = 1; sc->sc_bs_bshift < 32; sc->sc_bs_bshift++) if (FSS_FSBSIZE(sc) == fsbsize) break; if (sc->sc_bs_bshift >= 32) return EINVAL; sc->sc_bs_bmask = FSS_FSBSIZE(sc)-1; } else { sc->sc_bs_bshift = DEV_BSHIFT; sc->sc_bs_bmask = FSS_FSBSIZE(sc)-1; } return 0; }
/* * Mount null layer */ static int nullfs_mount(struct mount *mp) { int error = 0; struct vnode *lowerrootvp, *vp; struct vnode *nullm_rootvp; struct null_mount *xmp; struct thread *td = curthread; char *target; int isvnunlocked = 0, len; struct nameidata nd, *ndp = &nd; NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp); if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_NULLFS)) return (EPERM); if (mp->mnt_flag & MNT_ROOTFS) return (EOPNOTSUPP); /* * Update is a no-op */ if (mp->mnt_flag & MNT_UPDATE) { /* * Only support update mounts for NFS export. */ if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) return (0); else return (EOPNOTSUPP); } /* * Get argument */ error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); if (error || target[len - 1] != '\0') return (EINVAL); /* * Unlock lower node to avoid possible deadlock. */ if ((mp->mnt_vnodecovered->v_op == &null_vnodeops) && VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) { VOP_UNLOCK(mp->mnt_vnodecovered, 0); isvnunlocked = 1; } /* * Find lower node */ NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target, curthread); error = namei(ndp); /* * Re-lock vnode. */ if (isvnunlocked) vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY); if (error) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); /* * Sanity check on lower vnode */ lowerrootvp = ndp->ni_vp; /* * Check multi null mount to avoid `lock against myself' panic. */ if (lowerrootvp == VTONULL(mp->mnt_vnodecovered)->null_lowervp) { NULLFSDEBUG("nullfs_mount: multi null mount?\n"); vput(lowerrootvp); return (EDEADLK); } xmp = (struct null_mount *) malloc(sizeof(struct null_mount), M_NULLFSMNT, M_WAITOK); /* XXX */ /* * Save reference to underlying FS */ xmp->nullm_vfs = lowerrootvp->v_mount; /* * Save reference. Each mount also holds * a reference on the root vnode. */ error = null_nodeget(mp, lowerrootvp, &vp); /* * Make sure the node alias worked */ if (error) { free(xmp, M_NULLFSMNT); return (error); } /* * Keep a held reference to the root vnode. * It is vrele'd in nullfs_unmount. */ nullm_rootvp = vp; nullm_rootvp->v_vflag |= VV_ROOT; xmp->nullm_rootvp = nullm_rootvp; /* * Unlock the node (either the lower or the alias) */ VOP_UNLOCK(vp, 0); if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) { MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); } MNT_ILOCK(mp); mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & (MNTK_MPSAFE | MNTK_SHARED_WRITES); MNT_IUNLOCK(mp); mp->mnt_data = xmp; vfs_getnewfsid(mp); vfs_mountedfrom(mp, target); NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); return (0); }
static int udf_mount(struct mount *mp) { struct vnode *devvp; /* vnode of the mount device */ struct thread *td; struct udf_mnt *imp = 0; struct vfsoptlist *opts; char *fspec, *cs_disk, *cs_local; int error, len, *udf_flags; struct nameidata nd, *ndp = &nd; td = curthread; opts = mp->mnt_optnew; /* * Unconditionally mount as read-only. */ MNT_ILOCK(mp); mp->mnt_flag |= MNT_RDONLY; MNT_IUNLOCK(mp); /* * No root filesystem support. Probably not a big deal, since the * bootloader doesn't understand UDF. */ if (mp->mnt_flag & MNT_ROOTFS) return (ENOTSUP); fspec = NULL; error = vfs_getopt(opts, "from", (void **)&fspec, &len); if (!error && fspec[len - 1] != '\0') return (EINVAL); if (mp->mnt_flag & MNT_UPDATE) { return (0); } /* Check that the mount device exists */ if (fspec == NULL) return (EINVAL); NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); if ((error = namei(ndp))) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); devvp = ndp->ni_vp; if (vn_isdisk(devvp, &error) == 0) { vput(devvp); return (error); } /* Check the access rights on the mount device */ error = VOP_ACCESS(devvp, VREAD, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if ((error = udf_mountfs(devvp, mp))) { vrele(devvp); return (error); } imp = VFSTOUDFFS(mp); udf_flags = NULL; error = vfs_getopt(opts, "flags", (void **)&udf_flags, &len); if (error || len != sizeof(int)) return (EINVAL); imp->im_flags = *udf_flags; if (imp->im_flags & UDFMNT_KICONV && udf_iconv) { cs_disk = NULL; error = vfs_getopt(opts, "cs_disk", (void **)&cs_disk, &len); if (!error && cs_disk[len - 1] != '\0') return (EINVAL); cs_local = NULL; error = vfs_getopt(opts, "cs_local", (void **)&cs_local, &len); if (!error && cs_local[len - 1] != '\0') return (EINVAL); udf_iconv->open(cs_local, cs_disk, &imp->im_d2l); #if 0 udf_iconv->open(cs_disk, cs_local, &imp->im_l2d); #endif } vfs_mountedfrom(mp, fspec); return 0; };
/* ARGSUSED */ int vndioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) { int unit = DISKUNIT(dev); struct disklabel *lp; struct vnd_softc *sc; struct vnd_ioctl *vio; struct vnd_user *vnu; struct vattr vattr; struct nameidata nd; int error, part, pmask; DNPRINTF(VDB_FOLLOW, "vndioctl(%x, %lx, %p, %x, %p): unit %d\n", dev, cmd, addr, flag, p, unit); error = suser(p, 0); if (error) return (error); if (unit >= numvnd) return (ENXIO); sc = &vnd_softc[unit]; vio = (struct vnd_ioctl *)addr; switch (cmd) { case VNDIOCSET: if (sc->sc_flags & VNF_INITED) return (EBUSY); /* Geometry eventually has to fit into label fields */ if (vio->vnd_secsize > UINT_MAX || vio->vnd_ntracks > UINT_MAX || vio->vnd_nsectors > UINT_MAX) return (EINVAL); if ((error = disk_lock(&sc->sc_dk)) != 0) return (error); if ((error = copyinstr(vio->vnd_file, sc->sc_file, sizeof(sc->sc_file), NULL))) { disk_unlock(&sc->sc_dk); return (error); } /* Set geometry for device. */ sc->sc_secsize = vio->vnd_secsize; sc->sc_ntracks = vio->vnd_ntracks; sc->sc_nsectors = vio->vnd_nsectors; /* * Open for read and write first. This lets vn_open() weed out * directories, sockets, etc. so we don't have to worry about * them. */ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p); sc->sc_flags &= ~VNF_READONLY; error = vn_open(&nd, FREAD|FWRITE, 0); if (error == EROFS) { sc->sc_flags |= VNF_READONLY; error = vn_open(&nd, FREAD, 0); } if (error) { disk_unlock(&sc->sc_dk); return (error); } if (nd.ni_vp->v_type == VBLK) sc->sc_size = vndbdevsize(nd.ni_vp, p); else { error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p); if (error) { VOP_UNLOCK(nd.ni_vp, 0, p); vn_close(nd.ni_vp, VNDRW(sc), p->p_ucred, p); disk_unlock(&sc->sc_dk); return (error); } sc->sc_size = vattr.va_size / sc->sc_secsize; } VOP_UNLOCK(nd.ni_vp, 0, p); sc->sc_vp = nd.ni_vp; if ((error = vndsetcred(sc, p->p_ucred)) != 0) { (void) vn_close(nd.ni_vp, VNDRW(sc), p->p_ucred, p); disk_unlock(&sc->sc_dk); return (error); } if (vio->vnd_keylen > 0) { char key[BLF_MAXUTILIZED]; if (vio->vnd_keylen > sizeof(key)) vio->vnd_keylen = sizeof(key); if ((error = copyin(vio->vnd_key, key, vio->vnd_keylen)) != 0) { (void) vn_close(nd.ni_vp, VNDRW(sc), p->p_ucred, p); disk_unlock(&sc->sc_dk); return (error); } sc->sc_keyctx = malloc(sizeof(*sc->sc_keyctx), M_DEVBUF, M_WAITOK); blf_key(sc->sc_keyctx, key, vio->vnd_keylen); explicit_bzero(key, vio->vnd_keylen); } else sc->sc_keyctx = NULL; vio->vnd_size = sc->sc_size * sc->sc_secsize; sc->sc_flags |= VNF_INITED; DNPRINTF(VDB_INIT, "vndioctl: SET vp %p size %llx\n", sc->sc_vp, (unsigned long long)sc->sc_size); /* Attach the disk. */ sc->sc_dk.dk_name = sc->sc_dev.dv_xname; disk_attach(&sc->sc_dev, &sc->sc_dk); disk_unlock(&sc->sc_dk); break; case VNDIOCCLR: if ((sc->sc_flags & VNF_INITED) == 0) return (ENXIO); if ((error = disk_lock(&sc->sc_dk)) != 0) return (error); /* * Don't unconfigure if any other partitions are open * or if both the character and block flavors of this * partition are open. */ part = DISKPART(dev); pmask = (1 << part); if ((sc->sc_dk.dk_openmask & ~pmask) || ((sc->sc_dk.dk_bopenmask & pmask) && (sc->sc_dk.dk_copenmask & pmask))) { disk_unlock(&sc->sc_dk); return (EBUSY); } vndclear(sc); DNPRINTF(VDB_INIT, "vndioctl: CLRed\n"); /* Free crypto key */ if (sc->sc_keyctx) { explicit_bzero(sc->sc_keyctx, sizeof(*sc->sc_keyctx)); free(sc->sc_keyctx, M_DEVBUF, sizeof(*sc->sc_keyctx)); } /* Detach the disk. */ disk_detach(&sc->sc_dk); disk_unlock(&sc->sc_dk); break; case VNDIOCGET: vnu = (struct vnd_user *)addr; if (vnu->vnu_unit == -1) vnu->vnu_unit = unit; if (vnu->vnu_unit >= numvnd) return (ENXIO); if (vnu->vnu_unit < 0) return (EINVAL); sc = &vnd_softc[vnu->vnu_unit]; if (sc->sc_flags & VNF_INITED) { error = VOP_GETATTR(sc->sc_vp, &vattr, p->p_ucred, p); if (error) return (error); strlcpy(vnu->vnu_file, sc->sc_file, sizeof(vnu->vnu_file)); vnu->vnu_dev = vattr.va_fsid; vnu->vnu_ino = vattr.va_fileid; } else { vnu->vnu_dev = 0; vnu->vnu_ino = 0; } break; case DIOCRLDINFO: if ((sc->sc_flags & VNF_HAVELABEL) == 0) return (ENOTTY); lp = malloc(sizeof(*lp), M_TEMP, M_WAITOK); vndgetdisklabel(dev, sc, lp, 0); *(sc->sc_dk.dk_label) = *lp; free(lp, M_TEMP, sizeof(*lp)); return (0); case DIOCGPDINFO: if ((sc->sc_flags & VNF_HAVELABEL) == 0) return (ENOTTY); vndgetdisklabel(dev, sc, (struct disklabel *)addr, 1); return (0); case DIOCGDINFO: if ((sc->sc_flags & VNF_HAVELABEL) == 0) return (ENOTTY); *(struct disklabel *)addr = *(sc->sc_dk.dk_label); return (0); case DIOCGPART: if ((sc->sc_flags & VNF_HAVELABEL) == 0) return (ENOTTY); ((struct partinfo *)addr)->disklab = sc->sc_dk.dk_label; ((struct partinfo *)addr)->part = &sc->sc_dk.dk_label->d_partitions[DISKPART(dev)]; return (0); case DIOCWDINFO: case DIOCSDINFO: if ((sc->sc_flags & VNF_HAVELABEL) == 0) return (ENOTTY); if ((flag & FWRITE) == 0) return (EBADF); if ((error = disk_lock(&sc->sc_dk)) != 0) return (error); error = setdisklabel(sc->sc_dk.dk_label, (struct disklabel *)addr, /* sc->sc_dk.dk_openmask */ 0); if (error == 0) { if (cmd == DIOCWDINFO) error = writedisklabel(DISKLABELDEV(dev), vndstrategy, sc->sc_dk.dk_label); } disk_unlock(&sc->sc_dk); return (error); default: return (ENOTTY); } return (0); }
/* * Mount unionfs layer. */ static int unionfs_domount(struct mount *mp) { int error; struct vnode *lowerrootvp; struct vnode *upperrootvp; struct unionfs_mount *ump; struct thread *td; char *target; char *tmp; char *ep; int len; size_t done; int below; uid_t uid; gid_t gid; u_short udir; u_short ufile; unionfs_copymode copymode; unionfs_whitemode whitemode; struct componentname fakecn; struct nameidata nd, *ndp; struct vattr va; UNIONFSDEBUG("unionfs_mount(mp = %p)\n", (void *)mp); error = 0; below = 0; uid = 0; gid = 0; udir = 0; ufile = 0; copymode = UNIONFS_TRANSPARENT; /* default */ whitemode = UNIONFS_WHITE_ALWAYS; ndp = &nd; td = curthread; if (mp->mnt_flag & MNT_ROOTFS) { vfs_mount_error(mp, "Cannot union mount root filesystem"); return (EOPNOTSUPP); } /* * Update is a no operation. */ if (mp->mnt_flag & MNT_UPDATE) { vfs_mount_error(mp, "unionfs does not support mount update"); return (EOPNOTSUPP); } /* * Get argument */ error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); if (error) error = vfs_getopt(mp->mnt_optnew, "from", (void **)&target, &len); if (error || target[len - 1] != '\0') { vfs_mount_error(mp, "Invalid target"); return (EINVAL); } if (vfs_getopt(mp->mnt_optnew, "below", NULL, NULL) == 0) below = 1; if (vfs_getopt(mp->mnt_optnew, "udir", (void **)&tmp, NULL) == 0) { if (tmp != NULL) udir = (mode_t)strtol(tmp, &ep, 8); if (tmp == NULL || *ep) { vfs_mount_error(mp, "Invalid udir"); return (EINVAL); } udir &= S_IRWXU | S_IRWXG | S_IRWXO; } if (vfs_getopt(mp->mnt_optnew, "ufile", (void **)&tmp, NULL) == 0) { if (tmp != NULL) ufile = (mode_t)strtol(tmp, &ep, 8); if (tmp == NULL || *ep) { vfs_mount_error(mp, "Invalid ufile"); return (EINVAL); } ufile &= S_IRWXU | S_IRWXG | S_IRWXO; } /* check umask, uid and gid */ if (udir == 0 && ufile != 0) udir = ufile; if (ufile == 0 && udir != 0) ufile = udir; vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); if (!error) { if (udir == 0) udir = va.va_mode; if (ufile == 0) ufile = va.va_mode; uid = va.va_uid; gid = va.va_gid; } VOP_UNLOCK(mp->mnt_vnodecovered, 0); if (error) return (error); if (mp->mnt_cred->cr_ruid == 0) { /* root only */ if (vfs_getopt(mp->mnt_optnew, "uid", (void **)&tmp, NULL) == 0) { if (tmp != NULL) uid = (uid_t)strtol(tmp, &ep, 10); if (tmp == NULL || *ep) { vfs_mount_error(mp, "Invalid uid"); return (EINVAL); } } if (vfs_getopt(mp->mnt_optnew, "gid", (void **)&tmp, NULL) == 0) { if (tmp != NULL) gid = (gid_t)strtol(tmp, &ep, 10); if (tmp == NULL || *ep) { vfs_mount_error(mp, "Invalid gid"); return (EINVAL); } } if (vfs_getopt(mp->mnt_optnew, "copymode", (void **)&tmp, NULL) == 0) { if (tmp == NULL) { vfs_mount_error(mp, "Invalid copymode"); return (EINVAL); } else if (strcasecmp(tmp, "traditional") == 0) copymode = UNIONFS_TRADITIONAL; else if (strcasecmp(tmp, "transparent") == 0) copymode = UNIONFS_TRANSPARENT; else if (strcasecmp(tmp, "masquerade") == 0) copymode = UNIONFS_MASQUERADE; else { vfs_mount_error(mp, "Invalid copymode"); return (EINVAL); } } if (vfs_getopt(mp->mnt_optnew, "whiteout", (void **)&tmp, NULL) == 0) { if (tmp == NULL) { vfs_mount_error(mp, "Invalid whiteout mode"); return (EINVAL); } else if (strcasecmp(tmp, "always") == 0) whitemode = UNIONFS_WHITE_ALWAYS; else if (strcasecmp(tmp, "whenneeded") == 0) whitemode = UNIONFS_WHITE_WHENNEEDED; else { vfs_mount_error(mp, "Invalid whiteout mode"); return (EINVAL); } } } /* If copymode is UNIONFS_TRADITIONAL, uid/gid is mounted user. */ if (copymode == UNIONFS_TRADITIONAL) { uid = mp->mnt_cred->cr_ruid; gid = mp->mnt_cred->cr_rgid; } UNIONFSDEBUG("unionfs_mount: uid=%d, gid=%d\n", uid, gid); UNIONFSDEBUG("unionfs_mount: udir=0%03o, ufile=0%03o\n", udir, ufile); UNIONFSDEBUG("unionfs_mount: copymode=%d\n", copymode); /* * Find upper node */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, target, td); if ((error = namei(ndp))) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); /* get root vnodes */ lowerrootvp = mp->mnt_vnodecovered; upperrootvp = ndp->ni_vp; /* create unionfs_mount */ ump = (struct unionfs_mount *)malloc(sizeof(struct unionfs_mount), M_UNIONFSMNT, M_WAITOK | M_ZERO); /* * Save reference */ if (below) { VOP_UNLOCK(upperrootvp, 0); vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY); ump->um_lowervp = upperrootvp; ump->um_uppervp = lowerrootvp; } else { ump->um_lowervp = lowerrootvp; ump->um_uppervp = upperrootvp; } ump->um_rootvp = NULLVP; ump->um_uid = uid; ump->um_gid = gid; ump->um_udir = udir; ump->um_ufile = ufile; ump->um_copymode = copymode; ump->um_whitemode = whitemode; MNT_ILOCK(mp); if ((lowerrootvp->v_mount->mnt_kern_flag & MNTK_MPSAFE) && (upperrootvp->v_mount->mnt_kern_flag & MNTK_MPSAFE)) mp->mnt_kern_flag |= MNTK_MPSAFE; MNT_IUNLOCK(mp); mp->mnt_data = ump; /* * Copy upper layer's RDONLY flag. */ mp->mnt_flag |= ump->um_uppervp->v_mount->mnt_flag & MNT_RDONLY; /* * Check whiteout */ if ((mp->mnt_flag & MNT_RDONLY) == 0) { memset(&fakecn, 0, sizeof(fakecn)); fakecn.cn_nameiop = LOOKUP; fakecn.cn_thread = td; error = VOP_WHITEOUT(ump->um_uppervp, &fakecn, LOOKUP); if (error) { if (below) { VOP_UNLOCK(ump->um_uppervp, 0); vrele(upperrootvp); } else vput(ump->um_uppervp); free(ump, M_UNIONFSMNT); mp->mnt_data = NULL; return (error); } } /* * Unlock the node */ VOP_UNLOCK(ump->um_uppervp, 0); /* * Get the unionfs root vnode. */ error = unionfs_nodeget(mp, ump->um_uppervp, ump->um_lowervp, NULLVP, &(ump->um_rootvp), NULL, td); vrele(upperrootvp); if (error) { free(ump, M_UNIONFSMNT); mp->mnt_data = NULL; return (error); } /* * Check mnt_flag */ if ((ump->um_lowervp->v_mount->mnt_flag & MNT_LOCAL) && (ump->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) mp->mnt_flag |= MNT_LOCAL; /* * Get new fsid */ vfs_getnewfsid(mp); len = MNAMELEN - 1; tmp = mp->mnt_stat.f_mntfromname; copystr((below ? "<below>:" : "<above>:"), tmp, len, &done); len -= done - 1; tmp += done - 1; copystr(target, tmp, len, NULL); UNIONFSDEBUG("unionfs_mount: from %s, on %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); return (0); }
int ptmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { dev_t newdev, error; struct pt_softc * pti; struct nameidata cnd, snd; struct filedesc *fdp = p->p_fd; struct file *cfp = NULL, *sfp = NULL; int cindx, sindx; uid_t uid; gid_t gid; struct vattr vattr; struct ucred *cred; struct ptmget *ptm = (struct ptmget *)data; switch (cmd) { case PTMGET: fdplock(fdp); /* Grab two filedescriptors. */ if ((error = falloc(p, &cfp, &cindx)) != 0) { fdpunlock(fdp); break; } if ((error = falloc(p, &sfp, &sindx)) != 0) { fdremove(fdp, cindx); closef(cfp, p); fdpunlock(fdp); break; } retry: /* Find and open a free master pty. */ newdev = pty_getfree(); if ((error = check_pty(minor(newdev)))) goto bad; pti = pt_softc[minor(newdev)]; NDINIT(&cnd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_pn, p); if ((error = ptm_vn_open(&cnd)) != 0) { /* * Check if the master open failed because we lost * the race to grab it. */ if (error == EIO && !pty_isfree(minor(newdev))) goto retry; goto bad; } cfp->f_flag = FREAD|FWRITE; cfp->f_type = DTYPE_VNODE; cfp->f_ops = &vnops; cfp->f_data = (caddr_t) cnd.ni_vp; VOP_UNLOCK(cnd.ni_vp, 0, p); /* * Open the slave. * namei -> setattr -> unlock -> revoke -> vrele -> * namei -> open -> unlock * Three stage rocket: * 1. Change the owner and permissions on the slave. * 2. Revoke all the users of the slave. * 3. open the slave. */ NDINIT(&snd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_sn, p); if ((error = namei(&snd)) != 0) goto bad; if ((snd.ni_vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { gid = tty_gid; /* get real uid */ uid = p->p_ucred->cr_ruid; VATTR_NULL(&vattr); vattr.va_uid = uid; vattr.va_gid = gid; vattr.va_mode = (S_IRUSR|S_IWUSR|S_IWGRP) & ALLPERMS; /* Get a fake cred to pretend we're root. */ cred = crget(); error = VOP_SETATTR(snd.ni_vp, &vattr, cred, p); crfree(cred); if (error) { vput(snd.ni_vp); goto bad; } } VOP_UNLOCK(snd.ni_vp, 0, p); if (snd.ni_vp->v_usecount > 1 || (snd.ni_vp->v_flag & (VALIASED))) VOP_REVOKE(snd.ni_vp, REVOKEALL); /* * The vnode is useless after the revoke, we need to * namei again. */ vrele(snd.ni_vp); NDINIT(&snd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_sn, p); /* now open it */ if ((error = ptm_vn_open(&snd)) != 0) goto bad; sfp->f_flag = FREAD|FWRITE; sfp->f_type = DTYPE_VNODE; sfp->f_ops = &vnops; sfp->f_data = (caddr_t) snd.ni_vp; VOP_UNLOCK(snd.ni_vp, 0, p); /* now, put the indexen and names into struct ptmget */ ptm->cfd = cindx; ptm->sfd = sindx; memcpy(ptm->cn, pti->pty_pn, sizeof(pti->pty_pn)); memcpy(ptm->sn, pti->pty_sn, sizeof(pti->pty_sn)); /* mark the files mature now that we've passed all errors */ FILE_SET_MATURE(cfp, p); FILE_SET_MATURE(sfp, p); fdpunlock(fdp); break; default: error = EINVAL; break; } return (error); bad: fdremove(fdp, cindx); closef(cfp, p); fdremove(fdp, sindx); closef(sfp, p); fdpunlock(fdp); return (error); }
/* * mp - path - addr in user space of mount point (ie /usr or whatever) * data - addr in user space of mount params including the name of the block * special file to treat as a filesystem. */ static int msdosfs_mount(struct mount *mp) { struct vnode *devvp; /* vnode for blk device to mount */ struct thread *td; /* msdosfs specific mount control block */ struct msdosfsmount *pmp = NULL; struct nameidata ndp; int error, flags; accmode_t accmode; char *from; td = curthread; if (vfs_filteropt(mp->mnt_optnew, msdosfs_opts)) return (EINVAL); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { pmp = VFSTOMSDOSFS(mp); if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) { /* * Forbid export requests if filesystem has * MSDOSFS_LARGEFS flag set. */ if ((pmp->pm_flags & MSDOSFS_LARGEFS) != 0) { vfs_mount_error(mp, "MSDOSFS_LARGEFS flag set, cannot export"); return (EOPNOTSUPP); } } if (!(pmp->pm_flags & MSDOSFSMNT_RONLY) && vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { error = VFS_SYNC(mp, MNT_WAIT); if (error) return (error); flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = vflush(mp, 0, flags, td); if (error) return (error); /* * Now the volume is clean. Mark it so while the * device is still rw. */ error = markvoldirty(pmp, 0); if (error) { (void)markvoldirty(pmp, 1); return (error); } /* Downgrade the device from rw to ro. */ DROP_GIANT(); g_topology_lock(); error = g_access(pmp->pm_cp, 0, -1, 0); g_topology_unlock(); PICKUP_GIANT(); if (error) { (void)markvoldirty(pmp, 1); return (error); } /* * Backing out after an error was painful in the * above. Now we are committed to succeeding. */ pmp->pm_fmod = 0; pmp->pm_flags |= MSDOSFSMNT_RONLY; MNT_ILOCK(mp); mp->mnt_flag |= MNT_RDONLY; MNT_IUNLOCK(mp); } else if ((pmp->pm_flags & MSDOSFSMNT_RONLY) && !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ devvp = pmp->pm_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { VOP_UNLOCK(devvp, 0); return (error); } VOP_UNLOCK(devvp, 0); DROP_GIANT(); g_topology_lock(); error = g_access(pmp->pm_cp, 0, 1, 0); g_topology_unlock(); PICKUP_GIANT(); if (error) return (error); pmp->pm_fmod = 1; pmp->pm_flags &= ~MSDOSFSMNT_RONLY; MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_RDONLY; MNT_IUNLOCK(mp); /* Now that the volume is modifiable, mark it dirty. */ error = markvoldirty(pmp, 1); if (error) return (error); } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ if (vfs_getopt(mp->mnt_optnew, "from", (void **)&from, NULL)) return (EINVAL); NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, from, td); error = namei(&ndp); if (error) return (error); devvp = ndp.ni_vp; NDFREE(&ndp, NDF_ONLY_PNBUF); if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ accmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accmode |= VWRITE; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { error = mountmsdosfs(devvp, mp); #ifdef MSDOSFS_DEBUG /* only needed for the printf below */ pmp = VFSTOMSDOSFS(mp); #endif } else { vput(devvp); if (devvp != pmp->pm_devvp) return (EINVAL); /* XXX needs translation */ } if (error) { vrele(devvp); return (error); } error = update_mp(mp, td); if (error) { if ((mp->mnt_flag & MNT_UPDATE) == 0) msdosfs_unmount(mp, MNT_FORCE); return error; } if (devvp->v_type == VCHR && devvp->v_rdev != NULL) devvp->v_rdev->si_mountpt = mp; vfs_mountedfrom(mp, from); #ifdef MSDOSFS_DEBUG printf("msdosfs_mount(): mp %p, pmp %p, inusemap %p\n", mp, pmp, pmp->pm_inusemap); #endif return (0); }
/* * Dump core, into a file named "progname.core" or "core" (depending on the * value of shortcorename), unless the process was setuid/setgid. */ static int coredump(struct lwp *l, const char *pattern) { struct vnode *vp; struct proc *p; struct vmspace *vm; kauth_cred_t cred; struct pathbuf *pb; struct nameidata nd; struct vattr vattr; struct coredump_iostate io; struct plimit *lim; int error, error1; char *name, *lastslash; name = PNBUF_GET(); p = l->l_proc; vm = p->p_vmspace; mutex_enter(proc_lock); /* p_session */ mutex_enter(p->p_lock); /* * Refuse to core if the data + stack + user size is larger than * the core dump limit. XXX THIS IS WRONG, because of mapped * data. */ if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >= p->p_rlimit[RLIMIT_CORE].rlim_cur) { error = EFBIG; /* better error code? */ mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } /* * It may well not be curproc, so grab a reference to its current * credentials. */ kauth_cred_hold(p->p_cred); cred = p->p_cred; /* * Make sure the process has not set-id, to prevent data leaks, * unless it was specifically requested to allow set-id coredumps. */ if (p->p_flag & PK_SUGID) { if (!security_setidcore_dump) { error = EPERM; mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } pattern = security_setidcore_path; } /* Lock, as p_limit and pl_corename might change. */ lim = p->p_limit; mutex_enter(&lim->pl_lock); if (pattern == NULL) { pattern = lim->pl_corename; } error = coredump_buildname(p, name, pattern, MAXPATHLEN); mutex_exit(&lim->pl_lock); if (error) { mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } /* * On a simple filename, see if the filesystem allow us to write * core dumps there. */ lastslash = strrchr(name, '/'); if (!lastslash) { vp = p->p_cwdi->cwdi_cdir; if (vp->v_mount == NULL || (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) error = EPERM; } mutex_exit(p->p_lock); mutex_exit(proc_lock); if (error) goto done; /* * On a complex filename, see if the filesystem allow us to write * core dumps there. * * XXX: We should have an API that avoids double lookups */ if (lastslash) { char c[2]; if (lastslash - name >= MAXPATHLEN - 2) { error = EPERM; goto done; } c[0] = lastslash[1]; c[1] = lastslash[2]; lastslash[1] = '.'; lastslash[2] = '\0'; error = namei_simple_kernel(name, NSM_FOLLOW_NOEMULROOT, &vp); if (error) goto done; if (vp->v_mount == NULL || (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) error = EPERM; vrele(vp); if (error) goto done; lastslash[1] = c[0]; lastslash[2] = c[1]; } pb = pathbuf_create(name); if (pb == NULL) { error = ENOMEM; goto done; } NDINIT(&nd, LOOKUP, NOFOLLOW, pb); if ((error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR)) != 0) { pathbuf_destroy(pb); goto done; } vp = nd.ni_vp; pathbuf_destroy(pb); /* * Don't dump to: * - non-regular files * - files with links * - files we don't own */ if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) || vattr.va_nlink != 1 || vattr.va_uid != kauth_cred_geteuid(cred)) { error = EACCES; goto out; } vattr_null(&vattr); vattr.va_size = 0; if ((p->p_flag & PK_SUGID) && security_setidcore_dump) { vattr.va_uid = security_setidcore_owner; vattr.va_gid = security_setidcore_group; vattr.va_mode = security_setidcore_mode; } VOP_SETATTR(vp, &vattr, cred); p->p_acflag |= ACORE; io.io_lwp = l; io.io_vp = vp; io.io_cred = cred; io.io_offset = 0; /* Now dump the actual core file. */ error = (*p->p_execsw->es_coredump)(l, &io); out: VOP_UNLOCK(vp); error1 = vn_close(vp, FWRITE, cred); if (error == 0) error = error1; done: if (name != NULL) PNBUF_PUT(name); return error; }
/* ARGSUSED */ int sys_ktrace(struct lwp *l, const struct sys_ktrace_args *uap, register_t *retval) { /* { syscallarg(const char *) fname; syscallarg(int) ops; syscallarg(int) facs; syscallarg(int) pid; } */ struct vnode *vp = NULL; file_t *fp = NULL; struct pathbuf *pb; struct nameidata nd; int error = 0; int fd; if (ktrenter(l)) return EAGAIN; if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) { /* * an operation which requires a file argument. */ error = pathbuf_copyin(SCARG(uap, fname), &pb); if (error) { ktrexit(l); return (error); } NDINIT(&nd, LOOKUP, FOLLOW, pb); if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) { pathbuf_destroy(pb); ktrexit(l); return (error); } vp = nd.ni_vp; pathbuf_destroy(pb); VOP_UNLOCK(vp); if (vp->v_type != VREG) { vn_close(vp, FREAD|FWRITE, l->l_cred); ktrexit(l); return (EACCES); } /* * This uses up a file descriptor slot in the * tracing process for the duration of this syscall. * This is not expected to be a problem. */ if ((error = fd_allocfile(&fp, &fd)) != 0) { vn_close(vp, FWRITE, l->l_cred); ktrexit(l); return error; } fp->f_flag = FWRITE; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (void *)vp; vp = NULL; } error = ktrace_common(l, SCARG(uap, ops), SCARG(uap, facs), SCARG(uap, pid), &fp); if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) fd_abort(curproc, fp, fd); return (error); }
static int ntfs_mount(struct mount *mp) { int err = 0, error; struct vnode *devvp; struct nameidata ndp; struct thread *td; char *from; td = curthread; if (vfs_filteropt(mp->mnt_optnew, ntfs_opts)) return (EINVAL); /* Force mount as read-only. */ MNT_ILOCK(mp); mp->mnt_flag |= MNT_RDONLY; MNT_IUNLOCK(mp); from = vfs_getopts(mp->mnt_optnew, "from", &error); if (error) return (error); /* * If updating, check whether changing from read-only to * read/write. */ if (mp->mnt_flag & MNT_UPDATE) { if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) { /* Process export requests in vfs_mount.c */ return (0); } else { printf("ntfs_mount(): MNT_UPDATE not supported\n"); return (EINVAL); } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, from, td); err = namei(&ndp); if (err) return (err); NDFREE(&ndp, NDF_ONLY_PNBUF); devvp = ndp.ni_vp; if (!vn_isdisk(devvp, &err)) { vput(devvp); return (err); } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ err = VOP_ACCESS(devvp, VREAD, td->td_ucred, td); if (err) err = priv_check(td, PRIV_VFS_MOUNT_PERM); if (err) { vput(devvp); return (err); } /* * Since this is a new mount, we want the names for the device and * the mount point copied in. If an error occurs, the mountpoint is * discarded by the upper level code. Note that vfs_mount() handles * copying the mountpoint f_mntonname for us, so we don't have to do * it here unless we want to set it to something other than "path" * for some rason. */ err = ntfs_mountfs(devvp, mp, td); if (err == 0) { /* Save "mounted from" info for mount point. */ vfs_mountedfrom(mp, from); } else vrele(devvp); return (err); }
/* * VFS Operations. * * mount system call */ int ffs_mount(struct mount *mp, const char *path, void *data, struct nameidata *ndp, struct proc *p) { struct vnode *devvp; struct ufs_args args; struct ufsmount *ump = NULL; struct fs *fs; char fname[MNAMELEN]; char fspec[MNAMELEN]; int error = 0, flags; int ronly; mode_t accessmode; error = copyin(data, &args, sizeof(struct ufs_args)); if (error) return (error); #ifndef FFS_SOFTUPDATES if (mp->mnt_flag & MNT_SOFTDEP) { printf("WARNING: soft updates isn't compiled in\n"); mp->mnt_flag &= ~MNT_SOFTDEP; } #endif /* * Soft updates is incompatible with "async", * so if we are doing softupdates stop the user * from setting the async flag. */ if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) == (MNT_SOFTDEP | MNT_ASYNC)) { return (EINVAL); } /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOUFS(mp); fs = ump->um_fs; devvp = ump->um_devvp; error = 0; ronly = fs->fs_ronly; if (ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { /* Flush any dirty data */ mp->mnt_flag &= ~MNT_RDONLY; VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p); mp->mnt_flag |= MNT_RDONLY; /* * Get rid of files open for writing. */ flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; if (fs->fs_flags & FS_DOSOFTDEP) { error = softdep_flushfiles(mp, flags, p); mp->mnt_flag &= ~MNT_SOFTDEP; } else error = ffs_flushfiles(mp, flags, p); ronly = 1; } /* * Flush soft dependencies if disabling it via an update * mount. This may leave some items to be processed, * so don't do this yet XXX. */ if ((fs->fs_flags & FS_DOSOFTDEP) && !(mp->mnt_flag & MNT_SOFTDEP) && !(mp->mnt_flag & MNT_RDONLY) && fs->fs_ronly == 0) { #if 0 flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = softdep_flushfiles(mp, flags, p); #elif FFS_SOFTUPDATES mp->mnt_flag |= MNT_SOFTDEP; #endif } /* * When upgrading to a softdep mount, we must first flush * all vnodes. (not done yet -- see above) */ if (!(fs->fs_flags & FS_DOSOFTDEP) && (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) { #if 0 flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ffs_flushfiles(mp, flags, p); #else mp->mnt_flag &= ~MNT_SOFTDEP; #endif } if (!error && (mp->mnt_flag & MNT_RELOAD)) error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p); if (error) goto error_1; if (ronly && (mp->mnt_flag & MNT_WANTRDWR)) { /* * If upgrade to read-write by non-root, then verify * that user has necessary permissions on the device. */ if (suser(p, 0)) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_ACCESS(devvp, VREAD | VWRITE, p->p_ucred, p); VOP_UNLOCK(devvp, 0, p); if (error) goto error_1; } if (fs->fs_clean == 0) { #if 0 /* * It is safe to mount an unclean file system * if it was previously mounted with softdep * but we may lose space and must * sometimes run fsck manually. */ if (fs->fs_flags & FS_DOSOFTDEP) printf( "WARNING: %s was not properly unmounted\n", fs->fs_fsmnt); else #endif if (mp->mnt_flag & MNT_FORCE) { printf( "WARNING: %s was not properly unmounted\n", fs->fs_fsmnt); } else { printf( "WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", fs->fs_fsmnt); error = EROFS; goto error_1; } } if ((fs->fs_flags & FS_DOSOFTDEP)) { error = softdep_mount(devvp, mp, fs, p->p_ucred); if (error) goto error_1; } fs->fs_contigdirs = malloc((u_long)fs->fs_ncg, M_UFSMNT, M_WAITOK|M_ZERO); ronly = 0; } if (args.fspec == NULL) { /* * Process export requests. */ error = vfs_export(mp, &ump->um_export, &args.export_info); if (error) goto error_1; else goto success; } } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ error = copyinstr(args.fspec, fspec, sizeof(fspec), NULL); if (error) goto error_1; if (disk_map(fspec, fname, MNAMELEN, DM_OPENBLCK) == -1) memcpy(fname, fspec, sizeof(fname)); NDINIT(ndp, LOOKUP, FOLLOW, UIO_SYSSPACE, fname, p); if ((error = namei(ndp)) != 0) goto error_1; devvp = ndp->ni_vp; if (devvp->v_type != VBLK) { error = ENOTBLK; goto error_2; } if (major(devvp->v_rdev) >= nblkdev) { error = ENXIO; goto error_2; } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ if (suser(p, 0)) { accessmode = VREAD; if ((mp->mnt_flag & MNT_RDONLY) == 0) accessmode |= VWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p); VOP_UNLOCK(devvp, 0, p); if (error) goto error_2; } if (mp->mnt_flag & MNT_UPDATE) { /* * UPDATE * If it's not the same vnode, or at least the same device * then it's not correct. */ if (devvp != ump->um_devvp) { if (devvp->v_rdev == ump->um_devvp->v_rdev) { vrele(devvp); } else { error = EINVAL; /* needs translation */ } } else vrele(devvp); /* * Update device name only on success */ if (!error) { /* * Save "mounted from" info for mount point (NULL pad) */ memset(mp->mnt_stat.f_mntfromname, 0, MNAMELEN); strlcpy(mp->mnt_stat.f_mntfromname, fname, MNAMELEN); memset(mp->mnt_stat.f_mntfromspec, 0, MNAMELEN); strlcpy(mp->mnt_stat.f_mntfromspec, fspec, MNAMELEN); } } else { /* * Since this is a new mount, we want the names for * the device and the mount point copied in. If an * error occurs, the mountpoint is discarded by the * upper level code. */ memset(mp->mnt_stat.f_mntonname, 0, MNAMELEN); strlcpy(mp->mnt_stat.f_mntonname, path, MNAMELEN); memset(mp->mnt_stat.f_mntfromname, 0, MNAMELEN); strlcpy(mp->mnt_stat.f_mntfromname, fname, MNAMELEN); memset(mp->mnt_stat.f_mntfromspec, 0, MNAMELEN); strlcpy(mp->mnt_stat.f_mntfromspec, fspec, MNAMELEN); error = ffs_mountfs(devvp, mp, p); } if (error) goto error_2; /* * Initialize FS stat information in mount struct; uses both * mp->mnt_stat.f_mntonname and mp->mnt_stat.f_mntfromname * * This code is common to root and non-root mounts */ memcpy(&mp->mnt_stat.mount_info.ufs_args, &args, sizeof(args)); VFS_STATFS(mp, &mp->mnt_stat, p); success: if (path && (mp->mnt_flag & MNT_UPDATE)) { /* Update clean flag after changing read-onlyness. */ fs = ump->um_fs; if (ronly != fs->fs_ronly) { fs->fs_ronly = ronly; fs->fs_clean = ronly && (fs->fs_flags & FS_UNCLEAN) == 0 ? 1 : 0; if (ronly) free(fs->fs_contigdirs, M_UFSMNT, 0); } if (!ronly) { if (mp->mnt_flag & MNT_SOFTDEP) fs->fs_flags |= FS_DOSOFTDEP; else fs->fs_flags &= ~FS_DOSOFTDEP; } ffs_sbupdate(ump, MNT_WAIT); } return (0); error_2: /* error with devvp held */ vrele (devvp); error_1: /* no state to back out */ return (error); }
/*ARGSUSED*/ int coda_mount(struct mount *vfsp) { struct vnode *dvp; struct cnode *cp; struct cdev *dev; struct coda_mntinfo *mi; struct vnode *rootvp; struct CodaFid rootfid = INVAL_FID; struct CodaFid ctlfid = CTL_FID; int error; struct nameidata ndp; ENTRY; char *from; if (vfs_filteropt(vfsp->mnt_optnew, coda_opts)) return (EINVAL); from = vfs_getopts(vfsp->mnt_optnew, "from", &error); if (error) return (error); coda_vfsopstats_init(); coda_vnodeopstats_init(); MARK_ENTRY(CODA_MOUNT_STATS); if (CODA_MOUNTED(vfsp)) { MARK_INT_FAIL(CODA_MOUNT_STATS); return (EBUSY); } /* * Validate mount device. Similar to getmdev(). */ NDINIT(&ndp, LOOKUP, FOLLOW, UIO_SYSSPACE, from, curthread); error = namei(&ndp); dvp = ndp.ni_vp; if (error) { MARK_INT_FAIL(CODA_MOUNT_STATS); return (error); } if (dvp->v_type != VCHR) { MARK_INT_FAIL(CODA_MOUNT_STATS); vrele(dvp); NDFREE(&ndp, NDF_ONLY_PNBUF); return (ENXIO); } dev = dvp->v_rdev; vrele(dvp); NDFREE(&ndp, NDF_ONLY_PNBUF); /* * Initialize the mount record and link it to the vfs struct. */ mi = dev2coda_mntinfo(dev); if (!mi) { MARK_INT_FAIL(CODA_MOUNT_STATS); printf("Coda mount: %s is not a cfs device\n", from); return (ENXIO); } if (!VC_OPEN(&mi->mi_vcomm)) { MARK_INT_FAIL(CODA_MOUNT_STATS); return (ENODEV); } /* * No initialization (here) of mi_vcomm! */ vfsp->mnt_data = mi; vfs_getnewfsid (vfsp); mi->mi_vfsp = vfsp; mi->mi_started = 0; /* XXX See coda_root() */ /* * Make a root vnode to placate the Vnode interface, but don't * actually make the CODA_ROOT call to venus until the first call to * coda_root in case a server is down while venus is starting. */ cp = make_coda_node(&rootfid, vfsp, VDIR); rootvp = CTOV(cp); rootvp->v_vflag |= VV_ROOT; cp = make_coda_node(&ctlfid, vfsp, VREG); coda_ctlvp = CTOV(cp); /* * Add vfs and rootvp to chain of vfs hanging off mntinfo. */ mi->mi_vfsp = vfsp; mi->mi_rootvp = rootvp; vfs_mountedfrom(vfsp, from); /* * Error is currently guaranteed to be zero, but in case some code * changes... */ CODADEBUG(1, myprintf(("coda_mount returned %d\n", error)););
static int cd9660_mount(struct mount *mp) { struct vnode *devvp; struct thread *td; char *fspec; int error; accmode_t accmode; struct nameidata ndp; struct iso_mnt *imp = NULL; td = curthread; /* * Unconditionally mount as read-only. */ MNT_ILOCK(mp); mp->mnt_flag |= MNT_RDONLY; MNT_IUNLOCK(mp); fspec = vfs_getopts(mp->mnt_optnew, "from", &error); if (error) return (error); imp = VFSTOISOFS(mp); if (mp->mnt_flag & MNT_UPDATE) { if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) return (0); } /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible block device. */ NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); if ((error = namei(&ndp))) return (error); NDFREE(&ndp, NDF_ONLY_PNBUF); devvp = ndp.ni_vp; if (!vn_isdisk(devvp, &error)) { vput(devvp); return (error); } /* * Verify that user has necessary permissions on the device, * or has superuser abilities */ accmode = VREAD; error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); if (error) error = priv_check(td, PRIV_VFS_MOUNT_PERM); if (error) { vput(devvp); return (error); } if ((mp->mnt_flag & MNT_UPDATE) == 0) { error = iso_mountfs(devvp, mp); if (error) vrele(devvp); } else { if (devvp != imp->im_devvp) error = EINVAL; /* needs translation */ vput(devvp); } if (error) return (error); vfs_mountedfrom(mp, fspec); return (0); }
VMBlockVFSMount(struct mount *mp, // IN: mount(2) parameters struct thread *td) // IN: caller's thread context #endif { struct VMBlockMount *xmp; struct nameidata nd, *ndp = &nd; struct vnode *lowerrootvp, *vp; char *target; char *pathname; int len, error = 0; VMBLOCKDEBUG("VMBlockVFSMount(mp = %p)\n", (void *)mp); /* * TODO: Strip out extraneous export & other misc cruft. */ /* * Disallow the following: * 1. Mounting over the system root. * 2. Mount updates/remounts. (Reconsider for rw->ro, ro->rw?) * 3. Mounting VMBlock on top of a VMBlock. */ if ((mp->mnt_flag & MNT_ROOTFS) || (mp->mnt_flag & MNT_UPDATE) || (mp->mnt_vnodecovered->v_op == &VMBlockVnodeOps)) { return EOPNOTSUPP; } /* * XXX Should only be unlocked if mnt_flag & MNT_UPDATE. */ ASSERT_VOP_UNLOCKED(mp->mnt_vnodecovered, "Covered vnode already locked!"); /* * Look up path to lower layer (VMBlock source / DnD staging area). * (E.g., in the command "mount /tmp/VMwareDnD /var/run/vmblock", * /tmp/VMwareDnD is the staging area.) */ error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); if (error || target[len - 1] != '\0') { return EINVAL; } pathname = uma_zalloc(VMBlockPathnameZone, M_WAITOK); if (pathname == NULL) { return ENOMEM; } if (strlcpy(pathname, target, MAXPATHLEN) >= MAXPATHLEN) { uma_zfree(VMBlockPathnameZone, pathname); return ENAMETOOLONG; } /* * Find lower node and lock if not already locked. */ NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target, compat_td); error = namei(ndp); if (error) { NDFREE(ndp, 0); uma_zfree(VMBlockPathnameZone, pathname); return error; } NDFREE(ndp, NDF_ONLY_PNBUF); /* * Check multi VMBlock mount to avoid `lock against myself' panic. */ lowerrootvp = ndp->ni_vp; if (lowerrootvp == VPTOVMB(mp->mnt_vnodecovered)->lowerVnode) { VMBLOCKDEBUG("VMBlockVFSMount: multi vmblock mount?\n"); vput(lowerrootvp); uma_zfree(VMBlockPathnameZone, pathname); return EDEADLK; } xmp = malloc(sizeof *xmp, M_VMBLOCKFSMNT, M_WAITOK); /* * Record pointer (mountVFS) to the staging area's file system. Follow up * by grabbing a VMBlockNode for our layer's root. */ xmp->mountVFS = lowerrootvp->v_mount; error = VMBlockNodeGet(mp, lowerrootvp, &vp, pathname); /* * Make sure the node alias worked */ if (error) { COMPAT_VOP_UNLOCK(vp, 0, compat_td); vrele(lowerrootvp); free(xmp, M_VMBLOCKFSMNT); /* XXX */ uma_zfree(VMBlockPathnameZone, pathname); return error; } /* * Record a reference to the new filesystem's root vnode & mark it as such. */ xmp->rootVnode = vp; xmp->rootVnode->v_vflag |= VV_ROOT; /* * Unlock the node (either the lower or the alias) */ COMPAT_VOP_UNLOCK(vp, 0, compat_td); /* * If the staging area is a local filesystem, reflect that here, too. (We * could potentially allow NFS staging areas.) */ MNT_ILOCK(mp); mp->mnt_flag |= lowerrootvp->v_mount->mnt_flag & MNT_LOCAL; #if __FreeBSD_version >= 600000 && __FreeBSD_version < 1000000 mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & MNTK_MPSAFE; #endif MNT_IUNLOCK(mp); mp->mnt_data = (qaddr_t) xmp; vfs_getnewfsid(mp); vfs_mountedfrom(mp, target); VMBLOCKDEBUG("VMBlockVFSMount: lower %s, alias at %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); return 0; }
/* ARGSUSED */ int auditctl(proc_t p, struct auditctl_args *uap, __unused int32_t *retval) { struct nameidata nd; kauth_cred_t cred; struct vnode *vp; int error = 0; error = suser(kauth_cred_get(), &p->p_acflag); if (error) return (error); vp = NULL; cred = NULL; /* * If a path is specified, open the replacement vnode, perform * validity checks, and grab another reference to the current * credential. * * XXX Changes API slightly. NULL path no longer disables audit but * returns EINVAL. */ if (uap->path == USER_ADDR_NULL) return (EINVAL); NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF | AUDITVNPATH1, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), uap->path, vfs_context_current()); error = vn_open(&nd, AUDIT_OPEN_FLAGS, 0); if (error) return (error); vp = nd.ni_vp; #if CONFIG_MACF /* * Accessibility of the vnode was determined in vn_open; the * mac_system_check_auditctl should only determine whether that vnode * is appropriate for storing audit data, or that the caller was * permitted to control the auditing system at all. For example, a * confidentiality policy may want to ensure that audit files are * always high sensitivity. */ error = mac_system_check_auditctl(kauth_cred_get(), vp); if (error) { vn_close(vp, AUDIT_CLOSE_FLAGS, vfs_context_current()); vnode_put(vp); return (error); } #endif if (vp->v_type != VREG) { vn_close(vp, AUDIT_CLOSE_FLAGS, vfs_context_current()); vnode_put(vp); return (EINVAL); } mtx_lock(&audit_mtx); /* * XXXAUDIT: Should audit_suspended actually be cleared by * audit_worker? */ audit_suspended = 0; mtx_unlock(&audit_mtx); /* * The following gets unreferenced in audit_rotate_vnode() * after the rotation and it is no longer needed. */ cred = kauth_cred_get_with_ref(); audit_rotate_vnode(cred, vp); vnode_put(vp); return (error); }
static void ptyfs_getinfo(struct ptyfsnode *ptyfs, struct lwp *l) { extern struct ptm_pty *ptyfs_save_ptm, ptm_ptyfspty; if (ptyfs->ptyfs_type == PTYFSroot) { ptyfs->ptyfs_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP| S_IROTH|S_IXOTH; goto out; } else ptyfs->ptyfs_mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP| S_IROTH|S_IWOTH; if (ptyfs_save_ptm != NULL && ptyfs_save_ptm != &ptm_ptyfspty) { int error; struct pathbuf *pb; struct nameidata nd; char ttyname[64]; kauth_cred_t cred; struct vattr va; /* * We support traditional ptys, so we copy the info * from the inode */ if ((error = (*ptyfs_save_ptm->makename)( ptyfs_save_ptm, l, ttyname, sizeof(ttyname), ptyfs->ptyfs_pty, ptyfs->ptyfs_type == PTYFSpts ? 't' : 'p')) != 0) goto out; pb = pathbuf_create(ttyname); if (pb == NULL) { error = ENOMEM; goto out; } NDINIT(&nd, LOOKUP, NOFOLLOW|LOCKLEAF, pb); if ((error = namei(&nd)) != 0) { pathbuf_destroy(pb); goto out; } cred = kauth_cred_alloc(); error = VOP_GETATTR(nd.ni_vp, &va, cred); kauth_cred_free(cred); VOP_UNLOCK(nd.ni_vp); vrele(nd.ni_vp); pathbuf_destroy(pb); if (error) goto out; ptyfs->ptyfs_uid = va.va_uid; ptyfs->ptyfs_gid = va.va_gid; ptyfs->ptyfs_mode = va.va_mode; ptyfs->ptyfs_flags = va.va_flags; ptyfs->ptyfs_birthtime = va.va_birthtime; ptyfs->ptyfs_ctime = va.va_ctime; ptyfs->ptyfs_mtime = va.va_mtime; ptyfs->ptyfs_atime = va.va_atime; return; } out: ptyfs->ptyfs_uid = ptyfs->ptyfs_gid = 0; ptyfs->ptyfs_status |= PTYFS_CHANGE; PTYFS_ITIMES(ptyfs, NULL, NULL, NULL); ptyfs->ptyfs_birthtime = ptyfs->ptyfs_mtime = ptyfs->ptyfs_atime = ptyfs->ptyfs_ctime; ptyfs->ptyfs_flags = 0; }
/* * Routine: macx_swapon * Function: * Syscall interface to add a file to backing store */ int macx_swapon( struct macx_swapon_args *args) { int size = args->size; vnode_t vp = (vnode_t)NULL; struct nameidata nd, *ndp; register int error; kern_return_t kr; mach_port_t backing_store; memory_object_default_t default_pager; int i; boolean_t funnel_state; off_t file_size; vfs_context_t ctx = vfs_context_current(); struct proc *p = current_proc(); AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON); AUDIT_ARG(value32, args->priority); funnel_state = thread_funnel_set(kernel_flock, TRUE); ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapon_bailout; if(default_pager_init_flag == 0) { start_def_pager(NULL); default_pager_init_flag = 1; } /* * Get a vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapon_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapon_bailout; } /* get file size */ if ((error = vnode_size(vp, &file_size, ctx)) != 0) goto swapon_bailout; #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapon(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapon_bailout; #endif /* resize to desired size if it's too small */ if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0)) goto swapon_bailout; /* add new backing store to list */ i = 0; while(bs_port_table[i].vp != 0) { if(i == MAX_BACKING_STORE) break; i++; } if(i == MAX_BACKING_STORE) { error = ENOMEM; goto swapon_bailout; } /* remember the vnode. This vnode has namei() reference */ bs_port_table[i].vp = vp; /* * Look to see if we are already paging to this file. */ /* make certain the copy send of kernel call will work */ default_pager = MEMORY_OBJECT_DEFAULT_NULL; kr = host_default_memory_manager(host_priv_self(), &default_pager, 0); if(kr != KERN_SUCCESS) { error = EAGAIN; bs_port_table[i].vp = 0; goto swapon_bailout; } kr = default_pager_backing_store_create(default_pager, -1, /* default priority */ 0, /* default cluster size */ &backing_store); memory_object_default_deallocate(default_pager); if(kr != KERN_SUCCESS) { error = ENOMEM; bs_port_table[i].vp = 0; goto swapon_bailout; } /* Mark this vnode as being used for swapfile */ vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); /* * NOTE: we are able to supply PAGE_SIZE here instead of * an actual record size or block number because: * a: we do not support offsets from the beginning of the * file (allowing for non page size/record modulo offsets. * b: because allow paging will be done modulo page size */ kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp, PAGE_SIZE, (int)(file_size/PAGE_SIZE)); if(kr != KERN_SUCCESS) { bs_port_table[i].vp = 0; if(kr == KERN_INVALID_ARGUMENT) error = EINVAL; else error = ENOMEM; /* This vnode is not to be used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); goto swapon_bailout; } bs_port_table[i].bs = (void *)backing_store; error = 0; ubc_setthreadcred(vp, p, current_thread()); /* * take a long term reference on the vnode to keep * vnreclaim() away from this vnode. */ vnode_ref(vp); swapon_bailout: if (vp) { vnode_put(vp); } (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }
static int fuse_getdevice(const char *fspec, struct thread *td, struct cdev **fdevp) { struct nameidata nd, *ndp = &nd; struct vnode *devvp; struct cdev *fdev; int err; /* * Not an update, or updating the name: look up the name * and verify that it refers to a sensible disk device. */ NDINIT(ndp, LOOKUP, FOLLOW, UIO_SYSSPACE, fspec, td); if ((err = namei(ndp)) != 0) return err; NDFREE(ndp, NDF_ONLY_PNBUF); devvp = ndp->ni_vp; if (devvp->v_type != VCHR) { vrele(devvp); return ENXIO; } fdev = devvp->v_rdev; dev_ref(fdev); if (fuse_enforce_dev_perms) { /* * Check if mounter can open the fuse device. * * This has significance only if we are doing a secondary mount * which doesn't involve actually opening fuse devices, but we * still want to enforce the permissions of the device (in * order to keep control over the circle of fuse users). * * (In case of primary mounts, we are either the superuser so * we can do anything anyway, or we can mount only if the * device is already opened by us, ie. we are permitted to open * the device.) */ #if 0 #ifdef MAC err = mac_check_vnode_open(td->td_ucred, devvp, VREAD | VWRITE); if (!err) #endif #endif /* 0 */ err = VOP_ACCESS(devvp, VREAD | VWRITE, td->td_ucred, td); if (err) { vrele(devvp); dev_rel(fdev); return err; } } /* * according to coda code, no extra lock is needed -- * although in sys/vnode.h this field is marked "v" */ vrele(devvp); if (!fdev->si_devsw || strcmp("fuse", fdev->si_devsw->d_name)) { dev_rel(fdev); return ENXIO; } *fdevp = fdev; return 0; }
static int vfs_mountroot_shuffle(struct thread *td, struct mount *mpdevfs) { struct nameidata nd; struct mount *mporoot, *mpnroot; struct vnode *vp, *vporoot, *vpdevfs; char *fspath; int error; mpnroot = TAILQ_NEXT(mpdevfs, mnt_list); /* Shuffle the mountlist. */ mtx_lock(&mountlist_mtx); mporoot = TAILQ_FIRST(&mountlist); TAILQ_REMOVE(&mountlist, mpdevfs, mnt_list); if (mporoot != mpdevfs) { TAILQ_REMOVE(&mountlist, mpnroot, mnt_list); TAILQ_INSERT_HEAD(&mountlist, mpnroot, mnt_list); } TAILQ_INSERT_TAIL(&mountlist, mpdevfs, mnt_list); mtx_unlock(&mountlist_mtx); cache_purgevfs(mporoot); if (mporoot != mpdevfs) cache_purgevfs(mpdevfs); VFS_ROOT(mporoot, LK_EXCLUSIVE, &vporoot); VI_LOCK(vporoot); vporoot->v_iflag &= ~VI_MOUNT; VI_UNLOCK(vporoot); vporoot->v_mountedhere = NULL; mporoot->mnt_flag &= ~MNT_ROOTFS; mporoot->mnt_vnodecovered = NULL; vput(vporoot); /* Set up the new rootvnode, and purge the cache */ mpnroot->mnt_vnodecovered = NULL; set_rootvnode(); cache_purgevfs(rootvnode->v_mount); if (mporoot != mpdevfs) { /* Remount old root under /.mount or /mnt */ fspath = "/.mount"; NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspath, td); error = namei(&nd); if (error) { NDFREE(&nd, NDF_ONLY_PNBUF); fspath = "/mnt"; NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspath, td); error = namei(&nd); } if (!error) { vp = nd.ni_vp; error = (vp->v_type == VDIR) ? 0 : ENOTDIR; if (!error) error = vinvalbuf(vp, V_SAVE, 0, 0); if (!error) { cache_purge(vp); mporoot->mnt_vnodecovered = vp; vp->v_mountedhere = mporoot; strlcpy(mporoot->mnt_stat.f_mntonname, fspath, MNAMELEN); VOP_UNLOCK(vp, 0); } else vput(vp); } NDFREE(&nd, NDF_ONLY_PNBUF); if (error && bootverbose) printf("mountroot: unable to remount previous root " "under /.mount or /mnt (error %d).\n", error); } /* Remount devfs under /dev */ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, "/dev", td); error = namei(&nd); if (!error) { vp = nd.ni_vp; error = (vp->v_type == VDIR) ? 0 : ENOTDIR; if (!error) error = vinvalbuf(vp, V_SAVE, 0, 0); if (!error) { vpdevfs = mpdevfs->mnt_vnodecovered; if (vpdevfs != NULL) { cache_purge(vpdevfs); vpdevfs->v_mountedhere = NULL; vrele(vpdevfs); } mpdevfs->mnt_vnodecovered = vp; vp->v_mountedhere = mpdevfs; VOP_UNLOCK(vp, 0); } else vput(vp); } if (error && bootverbose) printf("mountroot: unable to remount devfs under /dev " "(error %d).\n", error); NDFREE(&nd, NDF_ONLY_PNBUF); if (mporoot == mpdevfs) { vfs_unbusy(mpdevfs); /* Unlink the no longer needed /dev/dev -> / symlink */ error = kern_unlinkat(td, AT_FDCWD, "/dev/dev", UIO_SYSSPACE, 0); if (error && bootverbose) printf("mountroot: unable to unlink /dev/dev " "(error %d)\n", error); } return (0); }
static int link_elf_ctf_get(linker_file_t lf, linker_ctf_t *lc) { #ifdef DDB_CTF Elf_Ehdr *hdr = NULL; Elf_Shdr *shdr = NULL; caddr_t ctftab = NULL; caddr_t raw = NULL; caddr_t shstrtab = NULL; elf_file_t ef = (elf_file_t) lf; int flags; int i; int nbytes; ssize_t resid; int vfslocked; size_t sz; struct nameidata nd; struct thread *td = curthread; uint8_t ctf_hdr[CTF_HDR_SIZE]; #endif int error = 0; if (lf == NULL || lc == NULL) return (EINVAL); /* Set the defaults for no CTF present. That's not a crime! */ bzero(lc, sizeof(*lc)); #ifdef DDB_CTF /* * First check if we've tried to load CTF data previously and the * CTF ELF section wasn't found. We flag that condition by setting * ctfcnt to -1. See below. */ if (ef->ctfcnt < 0) return (EFTYPE); /* Now check if we've already loaded the CTF data.. */ if (ef->ctfcnt > 0) { /* We only need to load once. */ lc->ctftab = ef->ctftab; lc->ctfcnt = ef->ctfcnt; lc->symtab = ef->ddbsymtab; lc->strtab = ef->ddbstrtab; lc->strcnt = ef->ddbstrcnt; lc->nsym = ef->ddbsymcnt; lc->ctfoffp = (uint32_t **) &ef->ctfoff; lc->typoffp = (uint32_t **) &ef->typoff; lc->typlenp = &ef->typlen; return (0); } /* * We need to try reading the CTF data. Flag no CTF data present * by default and if we actually succeed in reading it, we'll * update ctfcnt to the number of bytes read. */ ef->ctfcnt = -1; NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, lf->pathname, td); flags = FREAD; error = vn_open(&nd, &flags, 0, NULL); if (error) return (error); vfslocked = NDHASGIANT(&nd); NDFREE(&nd, NDF_ONLY_PNBUF); /* Allocate memory for the FLF header. */ if ((hdr = malloc(sizeof(*hdr), M_LINKER, M_WAITOK)) == NULL) { error = ENOMEM; goto out; } /* Read the ELF header. */ if ((error = vn_rdwr(UIO_READ, nd.ni_vp, hdr, sizeof(*hdr), 0, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td)) != 0) goto out; /* Sanity check. */ if (!IS_ELF(*hdr)) { error = ENOEXEC; goto out; } nbytes = hdr->e_shnum * hdr->e_shentsize; if (nbytes == 0 || hdr->e_shoff == 0 || hdr->e_shentsize != sizeof(Elf_Shdr)) { error = ENOEXEC; goto out; } /* Allocate memory for all the section headers */ if ((shdr = malloc(nbytes, M_LINKER, M_WAITOK)) == NULL) { error = ENOMEM; goto out; } /* Read all the section headers */ if ((error = vn_rdwr(UIO_READ, nd.ni_vp, (caddr_t)shdr, nbytes, hdr->e_shoff, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td)) != 0) goto out; /* * We need to search for the CTF section by name, so if the * section names aren't present, then we can't locate the * .SUNW_ctf section containing the CTF data. */ if (hdr->e_shstrndx == 0 || shdr[hdr->e_shstrndx].sh_type != SHT_STRTAB) { printf("%s(%d): module %s e_shstrndx is %d, sh_type is %d\n", __func__, __LINE__, lf->pathname, hdr->e_shstrndx, shdr[hdr->e_shstrndx].sh_type); error = EFTYPE; goto out; } /* Allocate memory to buffer the section header strings. */ if ((shstrtab = malloc(shdr[hdr->e_shstrndx].sh_size, M_LINKER, M_WAITOK)) == NULL) { error = ENOMEM; goto out; } /* Read the section header strings. */ if ((error = vn_rdwr(UIO_READ, nd.ni_vp, shstrtab, shdr[hdr->e_shstrndx].sh_size, shdr[hdr->e_shstrndx].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td)) != 0) goto out; /* Search for the section containing the CTF data. */ for (i = 0; i < hdr->e_shnum; i++) if (strcmp(".SUNW_ctf", shstrtab + shdr[i].sh_name) == 0) break; /* Check if the CTF section wasn't found. */ if (i >= hdr->e_shnum) { printf("%s(%d): module %s has no .SUNW_ctf section\n", __func__, __LINE__, lf->pathname); error = EFTYPE; goto out; } /* Read the CTF header. */ if ((error = vn_rdwr(UIO_READ, nd.ni_vp, ctf_hdr, sizeof(ctf_hdr), shdr[i].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td)) != 0) goto out; /* Check the CTF magic number. (XXX check for big endian!) */ if (ctf_hdr[0] != 0xf1 || ctf_hdr[1] != 0xcf) { printf("%s(%d): module %s has invalid format\n", __func__, __LINE__, lf->pathname); error = EFTYPE; goto out; } /* Check if version 2. */ if (ctf_hdr[2] != 2) { printf("%s(%d): module %s CTF format version is %d " "(2 expected)\n", __func__, __LINE__, lf->pathname, ctf_hdr[2]); error = EFTYPE; goto out; } /* Check if the data is compressed. */ if ((ctf_hdr[3] & 0x1) != 0) { uint32_t *u32 = (uint32_t *) ctf_hdr; /* * The last two fields in the CTF header are the offset * from the end of the header to the start of the string * data and the length of that string data. se this * information to determine the decompressed CTF data * buffer required. */ sz = u32[CTF_HDR_STRTAB_U32] + u32[CTF_HDR_STRLEN_U32] + sizeof(ctf_hdr); /* * Allocate memory for the compressed CTF data, including * the header (which isn't compressed). */ if ((raw = malloc(shdr[i].sh_size, M_LINKER, M_WAITOK)) == NULL) { error = ENOMEM; goto out; } } else { /* * The CTF data is not compressed, so the ELF section * size is the same as the buffer size required. */ sz = shdr[i].sh_size; } /* * Allocate memory to buffer the CTF data in it's decompressed * form. */ if ((ctftab = malloc(sz, M_LINKER, M_WAITOK)) == NULL) { error = ENOMEM; goto out; } /* * Read the CTF data into the raw buffer if compressed, or * directly into the CTF buffer otherwise. */ if ((error = vn_rdwr(UIO_READ, nd.ni_vp, raw == NULL ? ctftab : raw, shdr[i].sh_size, shdr[i].sh_offset, UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred, NOCRED, &resid, td)) != 0) goto out; /* Check if decompression is required. */ if (raw != NULL) { z_stream zs; int ret; /* * The header isn't compressed, so copy that into the * CTF buffer first. */ bcopy(ctf_hdr, ctftab, sizeof(ctf_hdr)); /* Initialise the zlib structure. */ bzero(&zs, sizeof(zs)); zs.zalloc = z_alloc; zs.zfree = z_free; if (inflateInit(&zs) != Z_OK) { error = EIO; goto out; } zs.avail_in = shdr[i].sh_size - sizeof(ctf_hdr); zs.next_in = ((uint8_t *) raw) + sizeof(ctf_hdr); zs.avail_out = sz - sizeof(ctf_hdr); zs.next_out = ((uint8_t *) ctftab) + sizeof(ctf_hdr); if ((ret = inflate(&zs, Z_FINISH)) != Z_STREAM_END) { printf("%s(%d): zlib inflate returned %d\n", __func__, __LINE__, ret); error = EIO; goto out; } } /* Got the CTF data! */ ef->ctftab = ctftab; ef->ctfcnt = shdr[i].sh_size; /* We'll retain the memory allocated for the CTF data. */ ctftab = NULL; /* Let the caller use the CTF data read. */ lc->ctftab = ef->ctftab; lc->ctfcnt = ef->ctfcnt; lc->symtab = ef->ddbsymtab; lc->strtab = ef->ddbstrtab; lc->strcnt = ef->ddbstrcnt; lc->nsym = ef->ddbsymcnt; lc->ctfoffp = (uint32_t **) &ef->ctfoff; lc->typoffp = (uint32_t **) &ef->typoff; lc->typlenp = &ef->typlen; out: VOP_UNLOCK(nd.ni_vp, 0); vn_close(nd.ni_vp, FREAD, td->td_ucred, td); VFS_UNLOCK_GIANT(vfslocked); if (hdr != NULL) free(hdr, M_LINKER); if (shdr != NULL) free(shdr, M_LINKER); if (shstrtab != NULL) free(shstrtab, M_LINKER); if (ctftab != NULL) free(ctftab, M_LINKER); if (raw != NULL) free(raw, M_LINKER); #else error = EOPNOTSUPP; #endif return (error); }
static int coff_load_file(struct thread *td, char *name) { struct proc *p = td->td_proc; struct vmspace *vmspace = p->p_vmspace; int error; struct nameidata nd; struct vnode *vp; struct vattr attr; struct filehdr *fhdr; struct aouthdr *ahdr; struct scnhdr *scns; char *ptr = 0; int nscns; unsigned long text_offset = 0, text_address = 0, text_size = 0; unsigned long data_offset = 0, data_address = 0, data_size = 0; unsigned long bss_size = 0; int i; NDINIT(&nd, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME, UIO_SYSSPACE, name, td); error = namei(&nd); if (error) return error; vp = nd.ni_vp; if (vp == NULL) return ENOEXEC; if (vp->v_writecount) { error = ETXTBSY; goto fail; } if ((error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) != 0) goto fail; if ((vp->v_mount->mnt_flag & MNT_NOEXEC) || ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) goto fail; if (attr.va_size == 0) { error = ENOEXEC; goto fail; } if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0) goto fail; if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td)) != 0) goto fail; /* * Lose the lock on the vnode. It's no longer needed, and must not * exist for the pagefault paging to work below. */ VOP_UNLOCK(vp, 0, td); if ((error = vm_mmap(kernel_map, (vm_offset_t *) &ptr, PAGE_SIZE, VM_PROT_READ, VM_PROT_READ, 0, (caddr_t) vp, 0)) != 0) goto unlocked_fail; fhdr = (struct filehdr *)ptr; if (fhdr->f_magic != I386_COFF) { error = ENOEXEC; goto dealloc_and_fail; } nscns = fhdr->f_nscns; if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) { /* * XXX -- just fail. I'm so lazy. */ error = ENOEXEC; goto dealloc_and_fail; } ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr)); scns = (struct scnhdr*)(ptr + sizeof(struct filehdr) + sizeof(struct aouthdr)); for (i = 0; i < nscns; i++) { if (scns[i].s_flags & STYP_NOLOAD) continue; else if (scns[i].s_flags & STYP_TEXT) { text_address = scns[i].s_vaddr; text_size = scns[i].s_size; text_offset = scns[i].s_scnptr; } else if (scns[i].s_flags & STYP_DATA) { data_address = scns[i].s_vaddr; data_size = scns[i].s_size; data_offset = scns[i].s_scnptr; } else if (scns[i].s_flags & STYP_BSS) { bss_size = scns[i].s_size; } } if ((error = load_coff_section(vmspace, vp, text_offset, (caddr_t)(void *)(uintptr_t)text_address, text_size, text_size, VM_PROT_READ | VM_PROT_EXECUTE)) != 0) { goto dealloc_and_fail; } if ((error = load_coff_section(vmspace, vp, data_offset, (caddr_t)(void *)(uintptr_t)data_address, data_size + bss_size, data_size, VM_PROT_ALL)) != 0) { goto dealloc_and_fail; } error = 0; dealloc_and_fail: if (vm_map_remove(kernel_map, (vm_offset_t) ptr, (vm_offset_t) ptr + PAGE_SIZE)) panic("%s vm_map_remove failed", __func__); fail: VOP_UNLOCK(vp, 0, td); unlocked_fail: NDFREE(&nd, NDF_ONLY_PNBUF); vrele(nd.ni_vp); return error; }
/* * module_load_plist_vfs: * * Load a plist located in the file system into memory. */ static int module_load_plist_vfs(const char *modpath, const bool nochroot, prop_dictionary_t *filedictp) { struct pathbuf *pb; struct nameidata nd; struct stat sb; void *base; char *proppath; const size_t plistsize = 8192; size_t resid; int error, pathlen; KASSERT(filedictp != NULL); base = NULL; proppath = PNBUF_GET(); strcpy(proppath, modpath); pathlen = strlen(proppath); if ((pathlen >= 6) && (strcmp(&proppath[pathlen - 5], ".kmod") == 0)) { strcpy(&proppath[pathlen - 5], ".plist"); } else if (pathlen < MAXPATHLEN - 6) { strcat(proppath, ".plist"); } else { error = ENOENT; goto out1; } /* XXX this makes an unnecessary extra copy of the path */ pb = pathbuf_create(proppath); if (pb == NULL) { error = ENOMEM; goto out1; } NDINIT(&nd, LOOKUP, FOLLOW | (nochroot ? NOCHROOT : 0), pb); error = vn_open(&nd, FREAD, 0); if (error != 0) { goto out2; } error = vn_stat(nd.ni_vp, &sb); if (error != 0) { goto out3; } if (sb.st_size >= (plistsize - 1)) { /* leave space for term \0 */ error = EFBIG; goto out3; } base = kmem_alloc(plistsize, KM_SLEEP); if (base == NULL) { error = ENOMEM; goto out3; } error = vn_rdwr(UIO_READ, nd.ni_vp, base, sb.st_size, 0, UIO_SYSSPACE, IO_NODELOCKED, curlwp->l_cred, &resid, curlwp); *((uint8_t *)base + sb.st_size) = '\0'; if (error == 0 && resid != 0) { error = EFBIG; } if (error != 0) { kmem_free(base, plistsize); base = NULL; goto out3; } *filedictp = prop_dictionary_internalize(base); if (*filedictp == NULL) { error = EINVAL; } kmem_free(base, plistsize); base = NULL; KASSERT(error == 0); out3: VOP_UNLOCK(nd.ni_vp); vn_close(nd.ni_vp, FREAD, kauth_cred_get()); out2: pathbuf_destroy(pb); out1: PNBUF_PUT(proppath); return error; }