static int /*ARGSUSED*/ pty_allocvp(struct mount *mp, struct lwp *l, struct vnode **vp, dev_t dev, char ms) { int error; struct pathbuf *pb; struct nameidata nd; char name[TTY_NAMESIZE]; error = pty_makename(NULL, l, name, sizeof(name), dev, ms); if (error) return error; pb = pathbuf_create(name); if (pb == NULL) { return ENOMEM; } NDINIT(&nd, LOOKUP, NOFOLLOW|LOCKLEAF, pb); if ((error = namei(&nd)) != 0) { pathbuf_destroy(pb); return error; } *vp = nd.ni_vp; pathbuf_destroy(pb); return 0; }
static int linux_unlink_dircheck(const char *path) { struct nameidata nd; struct pathbuf *pb; int error; /* * Linux returns EISDIR if unlink(2) is called on a directory. * We return EPERM in such cases. To emulate correct behaviour, * check if the path points to directory and return EISDIR if this * is the case. * * XXX this should really not copy in the path buffer twice... */ error = pathbuf_copyin(path, &pb); if (error) { return error; } NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); if (namei(&nd) == 0) { struct stat sb; if (vn_stat(nd.ni_vp, &sb) == 0 && S_ISDIR(sb.st_mode)) error = EISDIR; vput(nd.ni_vp); } pathbuf_destroy(pb); return error ? error : EPERM; }
/* * kobj_load_vfs: * * Load an object located in the file system. */ int kobj_load_vfs(kobj_t *kop, const char *path, const bool nochroot) { struct nameidata nd; struct pathbuf *pb; kauth_cred_t cred; int error; kobj_t ko; KASSERT(path != NULL); if (strchr(path, '/') == NULL) return ENOENT; cred = kauth_cred_get(); ko = kmem_zalloc(sizeof(*ko), KM_SLEEP); if (ko == NULL) { return ENOMEM; } pb = pathbuf_create(path); if (pb == NULL) { kmem_free(ko, sizeof(*ko)); return ENOMEM; } NDINIT(&nd, LOOKUP, FOLLOW | (nochroot ? NOCHROOT : 0), pb); error = vn_open(&nd, FREAD, 0); if (error != 0) { pathbuf_destroy(pb); kmem_free(ko, sizeof(*ko)); return error; } ko->ko_type = KT_VNODE; kobj_setname(ko, path); ko->ko_source = nd.ni_vp; ko->ko_read = kobj_read_vfs; ko->ko_close = kobj_close_vfs; pathbuf_destroy(pb); *kop = ko; return kobj_load(ko); }
/* * Search the alternate path for dynamic binary interpreter. If not found * there, check if the interpreter exists in within 'proper' tree. */ int emul_find_interp(struct lwp *l, struct exec_package *epp, const char *itp) { int error; struct pathbuf *pb; struct nameidata nd; unsigned int flags; pb = pathbuf_create(itp); if (pb == NULL) { return ENOMEM; } /* If we haven't found the emulation root already, do so now */ /* Maybe we should remember failures somehow ? */ if (epp->ep_esch->es_emul->e_path != 0 && epp->ep_emul_root == NULL) emul_find_root(l, epp); if (epp->ep_interp != NULL) vrele(epp->ep_interp); /* We need to use the emulation root for the new program, * not the one for the current process. */ if (epp->ep_emul_root == NULL) flags = FOLLOW; else { nd.ni_erootdir = epp->ep_emul_root; /* hack: Pass in the emulation path for ktrace calls */ nd.ni_next = epp->ep_esch->es_emul->e_path; flags = FOLLOW | TRYEMULROOT | EMULROOTSET; } NDINIT(&nd, LOOKUP, flags, pb); error = namei(&nd); if (error != 0) { epp->ep_interp = NULL; return error; } /* Save interpreter in case we actually need to load it */ epp->ep_interp = nd.ni_vp; pathbuf_destroy(pb); return 0; }
/* * Mount umap layer */ int umapfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct pathbuf *pb; struct nameidata nd; struct umap_args *args = data; struct vnode *lowerrootvp, *vp; struct umap_mount *amp; int error; #ifdef UMAPFS_DIAGNOSTIC int i; #endif if (args == NULL) return EINVAL; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { amp = MOUNTTOUMAPMOUNT(mp); if (amp == NULL) return EIO; args->la.target = NULL; args->nentries = amp->info_nentries; args->gnentries = amp->info_gnentries; *data_len = sizeof *args; return 0; } /* only for root */ error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_UMAP, NULL, NULL, NULL); if (error) return error; #ifdef UMAPFS_DIAGNOSTIC printf("umapfs_mount(mp = %p)\n", mp); #endif /* * Update is not supported */ if (mp->mnt_flag & MNT_UPDATE) return EOPNOTSUPP; /* * Find lower node */ error = pathbuf_copyin(args->umap_target, &pb); if (error) { return error; } NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, pb); if ((error = namei(&nd)) != 0) { pathbuf_destroy(pb); return error; } /* * Sanity check on lower vnode */ lowerrootvp = nd.ni_vp; pathbuf_destroy(pb); #ifdef UMAPFS_DIAGNOSTIC printf("vp = %p, check for VDIR...\n", lowerrootvp); #endif if (lowerrootvp->v_type != VDIR) { vput(lowerrootvp); return (EINVAL); } #ifdef UMAPFS_DIAGNOSTIC printf("mp = %p\n", mp); #endif amp = kmem_zalloc(sizeof(struct umap_mount), KM_SLEEP); mp->mnt_data = amp; amp->umapm_vfs = lowerrootvp->v_mount; if (amp->umapm_vfs->mnt_flag & MNT_LOCAL) mp->mnt_flag |= MNT_LOCAL; /* * Now copy in the number of entries and maps for umap mapping. */ if (args->nentries > MAPFILEENTRIES || args->gnentries > GMAPFILEENTRIES) { vput(lowerrootvp); return (error); } amp->info_nentries = args->nentries; amp->info_gnentries = args->gnentries; error = copyin(args->mapdata, amp->info_mapdata, 2*sizeof(u_long)*args->nentries); if (error) { vput(lowerrootvp); return (error); } #ifdef UMAPFS_DIAGNOSTIC printf("umap_mount:nentries %d\n",args->nentries); for (i = 0; i < args->nentries; i++) printf(" %ld maps to %ld\n", amp->info_mapdata[i][0], amp->info_mapdata[i][1]); #endif error = copyin(args->gmapdata, amp->info_gmapdata, 2*sizeof(u_long)*args->gnentries); if (error) { vput(lowerrootvp); return (error); } #ifdef UMAPFS_DIAGNOSTIC printf("umap_mount:gnentries %d\n",args->gnentries); for (i = 0; i < args->gnentries; i++) printf("\tgroup %ld maps to %ld\n", amp->info_gmapdata[i][0], amp->info_gmapdata[i][1]); #endif /* * Make sure the mount point's sufficiently initialized * that the node create call will work. */ vfs_getnewfsid(mp); amp->umapm_size = sizeof(struct umap_node); amp->umapm_tag = VT_UMAP; amp->umapm_bypass = umap_bypass; amp->umapm_vnodeop_p = umap_vnodeop_p; /* * fix up umap node for root vnode. */ VOP_UNLOCK(lowerrootvp); error = layer_node_create(mp, lowerrootvp, &vp); /* * Make sure the node alias worked */ if (error) { vrele(lowerrootvp); kmem_free(amp, sizeof(struct umap_mount)); return error; } /* * Keep a held reference to the root vnode. * It is vrele'd in umapfs_unmount. */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_vflag |= VV_ROOT; amp->umapm_rootvp = vp; VOP_UNLOCK(vp); error = set_statvfs_info(path, UIO_USERSPACE, args->umap_target, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); #ifdef UMAPFS_DIAGNOSTIC printf("umapfs_mount: lower %s, alias at %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); #endif return error; }
/* * set up a quota file for a particular file system. */ int lfsquota1_handle_cmd_quotaon(struct lwp *l, struct ulfsmount *ump, int type, const char *fname) { struct mount *mp = ump->um_mountp; struct lfs *fs = ump->um_lfs; struct vnode *vp, **vpp; struct vnode_iterator *marker; struct dquot *dq; int error; struct pathbuf *pb; struct nameidata nd; if (fs->um_flags & ULFS_QUOTA2) { uprintf("%s: quotas v2 already enabled\n", mp->mnt_stat.f_mntonname); return (EBUSY); } vpp = &ump->um_quotas[type]; pb = pathbuf_create(fname); if (pb == NULL) { return ENOMEM; } NDINIT(&nd, LOOKUP, FOLLOW, pb); if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) { pathbuf_destroy(pb); return error; } vp = nd.ni_vp; pathbuf_destroy(pb); VOP_UNLOCK(vp); if (vp->v_type != VREG) { (void) vn_close(vp, FREAD|FWRITE, l->l_cred); return (EACCES); } if (*vpp != vp) lfsquota1_handle_cmd_quotaoff(l, ump, type); mutex_enter(&lfs_dqlock); while ((ump->umq1_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0) cv_wait(&lfs_dqcv, &lfs_dqlock); ump->umq1_qflags[type] |= QTF_OPENING; mutex_exit(&lfs_dqlock); mp->mnt_flag |= MNT_QUOTA; vp->v_vflag |= VV_SYSTEM; /* XXXSMP */ *vpp = vp; /* * Save the credential of the process that turned on quotas. * Set up the time limits for this quota. */ kauth_cred_hold(l->l_cred); ump->um_cred[type] = l->l_cred; ump->umq1_btime[type] = MAX_DQ_TIME; ump->umq1_itime[type] = MAX_IQ_TIME; if (lfs_dqget(NULLVP, 0, ump, type, &dq) == 0) { if (dq->dq_btime > 0) ump->umq1_btime[type] = dq->dq_btime; if (dq->dq_itime > 0) ump->umq1_itime[type] = dq->dq_itime; lfs_dqrele(NULLVP, dq); } /* * Search vnodes associated with this mount point, * adding references to quota file being opened. * NB: only need to add dquot's for inodes being modified. */ vfs_vnode_iterator_init(mp, &marker); while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) { error = vn_lock(vp, LK_EXCLUSIVE); if (error) { vrele(vp); continue; } mutex_enter(vp->v_interlock); if (VTOI(vp) == NULL || vp->v_type == VNON || vp->v_writecount == 0) { mutex_exit(vp->v_interlock); vput(vp); continue; } mutex_exit(vp->v_interlock); if ((error = lfs_getinoquota(VTOI(vp))) != 0) { vput(vp); break; } vput(vp); } vfs_vnode_iterator_destroy(marker); mutex_enter(&lfs_dqlock); ump->umq1_qflags[type] &= ~QTF_OPENING; cv_broadcast(&lfs_dqcv); if (error == 0) fs->um_flags |= ULFS_QUOTA; mutex_exit(&lfs_dqlock); if (error) lfsquota1_handle_cmd_quotaoff(l, ump, type); return (error); }
/* ARGSUSED */ int compat_43_sys_lstat(struct lwp *l, const struct compat_43_sys_lstat_args *uap, register_t *retval) { /* { syscallarg(char *) path; syscallarg(struct ostat *) ub; } */ struct vnode *vp, *dvp; struct stat sb, sb1; struct stat43 osb; int error; struct pathbuf *pb; struct nameidata nd; int ndflags; error = pathbuf_copyin(SCARG(uap, path), &pb); if (error) { return error; } ndflags = NOFOLLOW | LOCKLEAF | LOCKPARENT | TRYEMULROOT; again: NDINIT(&nd, LOOKUP, ndflags, pb); if ((error = namei(&nd))) { if (error == EISDIR && (ndflags & LOCKPARENT) != 0) { /* * Should only happen on '/'. Retry without LOCKPARENT; * this is safe since the vnode won't be a VLNK. */ ndflags &= ~LOCKPARENT; goto again; } pathbuf_destroy(pb); return (error); } /* * For symbolic links, always return the attributes of its * containing directory, except for mode, size, and links. */ vp = nd.ni_vp; dvp = nd.ni_dvp; pathbuf_destroy(pb); if (vp->v_type != VLNK) { if ((ndflags & LOCKPARENT) != 0) { if (dvp == vp) vrele(dvp); else vput(dvp); } error = vn_stat(vp, &sb); vput(vp); if (error) return (error); } else { error = vn_stat(dvp, &sb); vput(dvp); if (error) { vput(vp); return (error); } error = vn_stat(vp, &sb1); vput(vp); if (error) return (error); sb.st_mode &= ~S_IFDIR; sb.st_mode |= S_IFLNK; sb.st_nlink = sb1.st_nlink; sb.st_size = sb1.st_size; sb.st_blocks = sb1.st_blocks; } cvtstat(&sb, &osb); error = copyout((void *)&osb, (void *)SCARG(uap, ub), sizeof (osb)); return (error); }
/* ARGSUSED */ int sys_ktrace(struct lwp *l, const struct sys_ktrace_args *uap, register_t *retval) { /* { syscallarg(const char *) fname; syscallarg(int) ops; syscallarg(int) facs; syscallarg(int) pid; } */ struct vnode *vp = NULL; file_t *fp = NULL; struct pathbuf *pb; struct nameidata nd; int error = 0; int fd; if (ktrenter(l)) return EAGAIN; if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) { /* * an operation which requires a file argument. */ error = pathbuf_copyin(SCARG(uap, fname), &pb); if (error) { ktrexit(l); return (error); } NDINIT(&nd, LOOKUP, FOLLOW, pb); if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) { pathbuf_destroy(pb); ktrexit(l); return (error); } vp = nd.ni_vp; pathbuf_destroy(pb); VOP_UNLOCK(vp); if (vp->v_type != VREG) { vn_close(vp, FREAD|FWRITE, l->l_cred); ktrexit(l); return (EACCES); } /* * This uses up a file descriptor slot in the * tracing process for the duration of this syscall. * This is not expected to be a problem. */ if ((error = fd_allocfile(&fp, &fd)) != 0) { vn_close(vp, FWRITE, l->l_cred); ktrexit(l); return error; } fp->f_flag = FWRITE; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (void *)vp; vp = NULL; } error = ktrace_common(l, SCARG(uap, ops), SCARG(uap, facs), SCARG(uap, pid), &fp); if (KTROP(SCARG(uap, ops)) != KTROP_CLEAR) fd_abort(curproc, fp, fd); return (error); }
/* ARGSUSED */ static int cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l) { struct cgd_ioctl *ci = data; struct vnode *vp; int ret; size_t i; size_t keybytes; /* key length in bytes */ const char *cp; struct pathbuf *pb; char *inbuf; struct dk_softc *dksc = &cs->sc_dksc; cp = ci->ci_disk; ret = pathbuf_copyin(ci->ci_disk, &pb); if (ret != 0) { return ret; } ret = dk_lookup(pb, l, &vp); pathbuf_destroy(pb); if (ret != 0) { return ret; } inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK); if ((ret = cgdinit(cs, cp, vp, l)) != 0) goto bail; (void)memset(inbuf, 0, MAX_KEYSIZE); ret = copyinstr(ci->ci_alg, inbuf, 256, NULL); if (ret) goto bail; cs->sc_cfuncs = cryptfuncs_find(inbuf); if (!cs->sc_cfuncs) { ret = EINVAL; goto bail; } (void)memset(inbuf, 0, MAX_KEYSIZE); ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL); if (ret) goto bail; for (i = 0; i < __arraycount(encblkno); i++) if (strcmp(encblkno[i].n, inbuf) == 0) break; if (i == __arraycount(encblkno)) { ret = EINVAL; goto bail; } keybytes = ci->ci_keylen / 8 + 1; if (keybytes > MAX_KEYSIZE) { ret = EINVAL; goto bail; } (void)memset(inbuf, 0, MAX_KEYSIZE); ret = copyin(ci->ci_key, inbuf, keybytes); if (ret) goto bail; cs->sc_cdata.cf_blocksize = ci->ci_blocksize; cs->sc_cdata.cf_mode = encblkno[i].v; cs->sc_cdata.cf_keylen = ci->ci_keylen; cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf, &cs->sc_cdata.cf_blocksize); if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) { log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n", cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE); cs->sc_cdata.cf_priv = NULL; } /* * The blocksize is supposed to be in bytes. Unfortunately originally * it was expressed in bits. For compatibility we maintain encblkno * and encblkno8. */ cs->sc_cdata.cf_blocksize /= encblkno[i].d; (void)explicit_memset(inbuf, 0, MAX_KEYSIZE); if (!cs->sc_cdata.cf_priv) { ret = EINVAL; /* XXX is this the right error? */ goto bail; } free(inbuf, M_TEMP); bufq_alloc(&dksc->sc_bufq, "fcfs", 0); cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK); cs->sc_data_used = 0; /* Attach the disk. */ dk_attach(dksc); disk_attach(&dksc->sc_dkdev); disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL); /* Discover wedges on this disk. */ dkwedge_discover(&dksc->sc_dkdev); return 0; bail: free(inbuf, M_TEMP); (void)vn_close(vp, FREAD|FWRITE, l->l_cred); return ret; }
static void ptyfs_getinfo(struct ptyfsnode *ptyfs, struct lwp *l) { extern struct ptm_pty *ptyfs_save_ptm, ptm_ptyfspty; if (ptyfs->ptyfs_type == PTYFSroot) { ptyfs->ptyfs_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP| S_IROTH|S_IXOTH; goto out; } else ptyfs->ptyfs_mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP| S_IROTH|S_IWOTH; if (ptyfs_save_ptm != NULL && ptyfs_save_ptm != &ptm_ptyfspty) { int error; struct pathbuf *pb; struct nameidata nd; char ttyname[64]; kauth_cred_t cred; struct vattr va; /* * We support traditional ptys, so we copy the info * from the inode */ if ((error = (*ptyfs_save_ptm->makename)( ptyfs_save_ptm, l, ttyname, sizeof(ttyname), ptyfs->ptyfs_pty, ptyfs->ptyfs_type == PTYFSpts ? 't' : 'p')) != 0) goto out; pb = pathbuf_create(ttyname); if (pb == NULL) { error = ENOMEM; goto out; } NDINIT(&nd, LOOKUP, NOFOLLOW|LOCKLEAF, pb); if ((error = namei(&nd)) != 0) { pathbuf_destroy(pb); goto out; } cred = kauth_cred_alloc(); error = VOP_GETATTR(nd.ni_vp, &va, cred); kauth_cred_free(cred); VOP_UNLOCK(nd.ni_vp); vrele(nd.ni_vp); pathbuf_destroy(pb); if (error) goto out; ptyfs->ptyfs_uid = va.va_uid; ptyfs->ptyfs_gid = va.va_gid; ptyfs->ptyfs_mode = va.va_mode; ptyfs->ptyfs_flags = va.va_flags; ptyfs->ptyfs_birthtime = va.va_birthtime; ptyfs->ptyfs_ctime = va.va_ctime; ptyfs->ptyfs_mtime = va.va_mtime; ptyfs->ptyfs_atime = va.va_atime; return; } out: ptyfs->ptyfs_uid = ptyfs->ptyfs_gid = 0; ptyfs->ptyfs_status |= PTYFS_CHANGE; PTYFS_ITIMES(ptyfs, NULL, NULL, NULL); ptyfs->ptyfs_birthtime = ptyfs->ptyfs_mtime = ptyfs->ptyfs_atime = ptyfs->ptyfs_ctime; ptyfs->ptyfs_flags = 0; }
/* * Dump core, into a file named "progname.core" or "core" (depending on the * value of shortcorename), unless the process was setuid/setgid. */ static int coredump(struct lwp *l, const char *pattern) { struct vnode *vp; struct proc *p; struct vmspace *vm; kauth_cred_t cred; struct pathbuf *pb; struct nameidata nd; struct vattr vattr; struct coredump_iostate io; struct plimit *lim; int error, error1; char *name, *lastslash; name = PNBUF_GET(); p = l->l_proc; vm = p->p_vmspace; mutex_enter(proc_lock); /* p_session */ mutex_enter(p->p_lock); /* * Refuse to core if the data + stack + user size is larger than * the core dump limit. XXX THIS IS WRONG, because of mapped * data. */ if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >= p->p_rlimit[RLIMIT_CORE].rlim_cur) { error = EFBIG; /* better error code? */ mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } /* * It may well not be curproc, so grab a reference to its current * credentials. */ kauth_cred_hold(p->p_cred); cred = p->p_cred; /* * Make sure the process has not set-id, to prevent data leaks, * unless it was specifically requested to allow set-id coredumps. */ if (p->p_flag & PK_SUGID) { if (!security_setidcore_dump) { error = EPERM; mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } pattern = security_setidcore_path; } /* Lock, as p_limit and pl_corename might change. */ lim = p->p_limit; mutex_enter(&lim->pl_lock); if (pattern == NULL) { pattern = lim->pl_corename; } error = coredump_buildname(p, name, pattern, MAXPATHLEN); mutex_exit(&lim->pl_lock); if (error) { mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } /* * On a simple filename, see if the filesystem allow us to write * core dumps there. */ lastslash = strrchr(name, '/'); if (!lastslash) { vp = p->p_cwdi->cwdi_cdir; if (vp->v_mount == NULL || (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) error = EPERM; } mutex_exit(p->p_lock); mutex_exit(proc_lock); if (error) goto done; /* * On a complex filename, see if the filesystem allow us to write * core dumps there. * * XXX: We should have an API that avoids double lookups */ if (lastslash) { char c[2]; if (lastslash - name >= MAXPATHLEN - 2) { error = EPERM; goto done; } c[0] = lastslash[1]; c[1] = lastslash[2]; lastslash[1] = '.'; lastslash[2] = '\0'; error = namei_simple_kernel(name, NSM_FOLLOW_NOEMULROOT, &vp); if (error) goto done; if (vp->v_mount == NULL || (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) error = EPERM; vrele(vp); if (error) goto done; lastslash[1] = c[0]; lastslash[2] = c[1]; } pb = pathbuf_create(name); if (pb == NULL) { error = ENOMEM; goto done; } NDINIT(&nd, LOOKUP, NOFOLLOW, pb); if ((error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR)) != 0) { pathbuf_destroy(pb); goto done; } vp = nd.ni_vp; pathbuf_destroy(pb); /* * Don't dump to: * - non-regular files * - files with links * - files we don't own */ if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) || vattr.va_nlink != 1 || vattr.va_uid != kauth_cred_geteuid(cred)) { error = EACCES; goto out; } vattr_null(&vattr); vattr.va_size = 0; if ((p->p_flag & PK_SUGID) && security_setidcore_dump) { vattr.va_uid = security_setidcore_owner; vattr.va_gid = security_setidcore_group; vattr.va_mode = security_setidcore_mode; } VOP_SETATTR(vp, &vattr, cred); p->p_acflag |= ACORE; io.io_lwp = l; io.io_vp = vp; io.io_cred = cred; io.io_offset = 0; /* Now dump the actual core file. */ error = (*p->p_execsw->es_coredump)(l, &io); out: VOP_UNLOCK(vp); error1 = vn_close(vp, FWRITE, cred); if (error == 0) error = error1; done: if (name != NULL) PNBUF_PUT(name); return error; }
/* * Lookup and open needed files. * * For file system internal snapshot initializes sc_mntname, sc_mount, * sc_bs_vp and sc_time. * * Otherwise returns dev and size of the underlying block device. * Initializes sc_mntname, sc_mount, sc_bdev, sc_bs_vp and sc_mount */ static int fss_create_files(struct fss_softc *sc, struct fss_set *fss, off_t *bsize, struct lwp *l) { int error, bits, fsbsize; uint64_t numsec; unsigned int secsize; struct timespec ts; /* nd -> nd2 to reduce mistakes while updating only some namei calls */ struct pathbuf *pb2; struct nameidata nd2; struct vnode *vp; /* * Get the mounted file system. */ error = namei_simple_user(fss->fss_mount, NSM_FOLLOW_NOEMULROOT, &vp); if (error != 0) return error; if ((vp->v_vflag & VV_ROOT) != VV_ROOT) { vrele(vp); return EINVAL; } sc->sc_mount = vp->v_mount; memcpy(sc->sc_mntname, sc->sc_mount->mnt_stat.f_mntonname, MNAMELEN); vrele(vp); /* * Check for file system internal snapshot. */ error = namei_simple_user(fss->fss_bstore, NSM_FOLLOW_NOEMULROOT, &vp); if (error != 0) return error; if (vp->v_type == VREG && vp->v_mount == sc->sc_mount) { sc->sc_flags |= FSS_PERSISTENT; sc->sc_bs_vp = vp; fsbsize = sc->sc_bs_vp->v_mount->mnt_stat.f_iosize; bits = sizeof(sc->sc_bs_bshift)*NBBY; for (sc->sc_bs_bshift = 1; sc->sc_bs_bshift < bits; sc->sc_bs_bshift++) if (FSS_FSBSIZE(sc) == fsbsize) break; if (sc->sc_bs_bshift >= bits) return EINVAL; sc->sc_bs_bmask = FSS_FSBSIZE(sc)-1; sc->sc_clshift = 0; if ((fss->fss_flags & FSS_UNLINK_ON_CREATE) != 0) { error = do_sys_unlink(fss->fss_bstore, UIO_USERSPACE); if (error) return error; } error = vn_lock(vp, LK_EXCLUSIVE); if (error != 0) return error; error = VFS_SNAPSHOT(sc->sc_mount, sc->sc_bs_vp, &ts); TIMESPEC_TO_TIMEVAL(&sc->sc_time, &ts); VOP_UNLOCK(sc->sc_bs_vp); return error; } vrele(vp); /* * Get the block device it is mounted on and its size. */ error = spec_node_lookup_by_mount(sc->sc_mount, &vp); if (error) return error; sc->sc_bdev = vp->v_rdev; error = getdisksize(vp, &numsec, &secsize); vrele(vp); if (error) return error; *bsize = (off_t)numsec*secsize; /* * Get the backing store */ error = pathbuf_copyin(fss->fss_bstore, &pb2); if (error) { return error; } NDINIT(&nd2, LOOKUP, FOLLOW, pb2); if ((error = vn_open(&nd2, FREAD|FWRITE, 0)) != 0) { pathbuf_destroy(pb2); return error; } VOP_UNLOCK(nd2.ni_vp); sc->sc_bs_vp = nd2.ni_vp; if (nd2.ni_vp->v_type != VREG && nd2.ni_vp->v_type != VCHR) { pathbuf_destroy(pb2); return EINVAL; } pathbuf_destroy(pb2); if ((fss->fss_flags & FSS_UNLINK_ON_CREATE) != 0) { error = do_sys_unlink(fss->fss_bstore, UIO_USERSPACE); if (error) return error; } if (sc->sc_bs_vp->v_type == VREG) { fsbsize = sc->sc_bs_vp->v_mount->mnt_stat.f_iosize; if (fsbsize & (fsbsize-1)) /* No power of two */ return EINVAL; for (sc->sc_bs_bshift = 1; sc->sc_bs_bshift < 32; sc->sc_bs_bshift++) if (FSS_FSBSIZE(sc) == fsbsize) break; if (sc->sc_bs_bshift >= 32) return EINVAL; sc->sc_bs_bmask = FSS_FSBSIZE(sc)-1; } else { sc->sc_bs_bshift = DEV_BSHIFT; sc->sc_bs_bmask = FSS_FSBSIZE(sc)-1; } return 0; }
/* * module_load_plist_vfs: * * Load a plist located in the file system into memory. */ static int module_load_plist_vfs(const char *modpath, const bool nochroot, prop_dictionary_t *filedictp) { struct pathbuf *pb; struct nameidata nd; struct stat sb; void *base; char *proppath; const size_t plistsize = 8192; size_t resid; int error, pathlen; KASSERT(filedictp != NULL); base = NULL; proppath = PNBUF_GET(); strcpy(proppath, modpath); pathlen = strlen(proppath); if ((pathlen >= 6) && (strcmp(&proppath[pathlen - 5], ".kmod") == 0)) { strcpy(&proppath[pathlen - 5], ".plist"); } else if (pathlen < MAXPATHLEN - 6) { strcat(proppath, ".plist"); } else { error = ENOENT; goto out1; } /* XXX this makes an unnecessary extra copy of the path */ pb = pathbuf_create(proppath); if (pb == NULL) { error = ENOMEM; goto out1; } NDINIT(&nd, LOOKUP, FOLLOW | (nochroot ? NOCHROOT : 0), pb); error = vn_open(&nd, FREAD, 0); if (error != 0) { goto out2; } error = vn_stat(nd.ni_vp, &sb); if (error != 0) { goto out3; } if (sb.st_size >= (plistsize - 1)) { /* leave space for term \0 */ error = EFBIG; goto out3; } base = kmem_alloc(plistsize, KM_SLEEP); if (base == NULL) { error = ENOMEM; goto out3; } error = vn_rdwr(UIO_READ, nd.ni_vp, base, sb.st_size, 0, UIO_SYSSPACE, IO_NODELOCKED, curlwp->l_cred, &resid, curlwp); *((uint8_t *)base + sb.st_size) = '\0'; if (error == 0 && resid != 0) { error = EFBIG; } if (error != 0) { kmem_free(base, plistsize); base = NULL; goto out3; } *filedictp = prop_dictionary_internalize(base); if (*filedictp == NULL) { error = EINVAL; } kmem_free(base, plistsize); base = NULL; KASSERT(error == 0); out3: VOP_UNLOCK(nd.ni_vp); vn_close(nd.ni_vp, FREAD, kauth_cred_get()); out2: pathbuf_destroy(pb); out1: PNBUF_PUT(proppath); return error; }
/* ARGSUSED */ static int vndioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) { bool force; int unit = vndunit(dev); struct vnd_softc *vnd; struct vnd_ioctl *vio; struct vattr vattr; struct pathbuf *pb; struct nameidata nd; int error, part, pmask; uint64_t geomsize; int fflags; #ifdef __HAVE_OLD_DISKLABEL struct disklabel newlabel; #endif struct dkwedge_info *dkw; struct dkwedge_list *dkwl; #ifdef DEBUG if (vnddebug & VDB_FOLLOW) printf("vndioctl(0x%"PRIx64", 0x%lx, %p, 0x%x, %p): unit %d\n", dev, cmd, data, flag, l->l_proc, unit); #endif vnd = device_lookup_private(&vnd_cd, unit); if (vnd == NULL && #ifdef COMPAT_30 cmd != VNDIOCGET30 && #endif #ifdef COMPAT_50 cmd != VNDIOCGET50 && #endif cmd != VNDIOCGET) return ENXIO; vio = (struct vnd_ioctl *)data; /* Must be open for writes for these commands... */ switch (cmd) { case VNDIOCSET: case VNDIOCCLR: #ifdef COMPAT_50 case VNDIOCSET50: case VNDIOCCLR50: #endif case DIOCSDINFO: case DIOCWDINFO: #ifdef __HAVE_OLD_DISKLABEL case ODIOCSDINFO: case ODIOCWDINFO: #endif case DIOCKLABEL: case DIOCWLABEL: if ((flag & FWRITE) == 0) return EBADF; } /* Must be initialized for these... */ switch (cmd) { case VNDIOCCLR: #ifdef VNDIOCCLR50 case VNDIOCCLR50: #endif case DIOCGDINFO: case DIOCSDINFO: case DIOCWDINFO: case DIOCGPART: case DIOCKLABEL: case DIOCWLABEL: case DIOCGDEFLABEL: case DIOCCACHESYNC: #ifdef __HAVE_OLD_DISKLABEL case ODIOCGDINFO: case ODIOCSDINFO: case ODIOCWDINFO: case ODIOCGDEFLABEL: #endif if ((vnd->sc_flags & VNF_INITED) == 0) return ENXIO; } switch (cmd) { #ifdef VNDIOCSET50 case VNDIOCSET50: #endif case VNDIOCSET: if (vnd->sc_flags & VNF_INITED) return EBUSY; if ((error = vndlock(vnd)) != 0) return error; fflags = FREAD; if ((vio->vnd_flags & VNDIOF_READONLY) == 0) fflags |= FWRITE; error = pathbuf_copyin(vio->vnd_file, &pb); if (error) { goto unlock_and_exit; } NDINIT(&nd, LOOKUP, FOLLOW, pb); if ((error = vn_open(&nd, fflags, 0)) != 0) { pathbuf_destroy(pb); goto unlock_and_exit; } KASSERT(l); error = VOP_GETATTR(nd.ni_vp, &vattr, l->l_cred); if (!error && nd.ni_vp->v_type != VREG) error = EOPNOTSUPP; if (!error && vattr.va_bytes < vattr.va_size) /* File is definitely sparse, use vn_rdwr() */ vnd->sc_flags |= VNF_USE_VN_RDWR; if (error) { VOP_UNLOCK(nd.ni_vp); goto close_and_exit; } /* If using a compressed file, initialize its info */ /* (or abort with an error if kernel has no compression) */ if (vio->vnd_flags & VNF_COMP) { #ifdef VND_COMPRESSION struct vnd_comp_header *ch; int i; u_int32_t comp_size; u_int32_t comp_maxsize; /* allocate space for compresed file header */ ch = malloc(sizeof(struct vnd_comp_header), M_TEMP, M_WAITOK); /* read compressed file header */ error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)ch, sizeof(struct vnd_comp_header), 0, UIO_SYSSPACE, IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL); if (error) { free(ch, M_TEMP); VOP_UNLOCK(nd.ni_vp); goto close_and_exit; } /* save some header info */ vnd->sc_comp_blksz = ntohl(ch->block_size); /* note last offset is the file byte size */ vnd->sc_comp_numoffs = ntohl(ch->num_blocks)+1; free(ch, M_TEMP); if (vnd->sc_comp_blksz == 0 || vnd->sc_comp_blksz % DEV_BSIZE !=0) { VOP_UNLOCK(nd.ni_vp); error = EINVAL; goto close_and_exit; } if (sizeof(struct vnd_comp_header) + sizeof(u_int64_t) * vnd->sc_comp_numoffs > vattr.va_size) { VOP_UNLOCK(nd.ni_vp); error = EINVAL; goto close_and_exit; } /* set decompressed file size */ vattr.va_size = ((u_quad_t)vnd->sc_comp_numoffs - 1) * (u_quad_t)vnd->sc_comp_blksz; /* allocate space for all the compressed offsets */ vnd->sc_comp_offsets = malloc(sizeof(u_int64_t) * vnd->sc_comp_numoffs, M_DEVBUF, M_WAITOK); /* read in the offsets */ error = vn_rdwr(UIO_READ, nd.ni_vp, (void *)vnd->sc_comp_offsets, sizeof(u_int64_t) * vnd->sc_comp_numoffs, sizeof(struct vnd_comp_header), UIO_SYSSPACE, IO_UNIT|IO_NODELOCKED, l->l_cred, NULL, NULL); if (error) { VOP_UNLOCK(nd.ni_vp); goto close_and_exit; } /* * find largest block size (used for allocation limit). * Also convert offset to native byte order. */ comp_maxsize = 0; for (i = 0; i < vnd->sc_comp_numoffs - 1; i++) { vnd->sc_comp_offsets[i] = be64toh(vnd->sc_comp_offsets[i]); comp_size = be64toh(vnd->sc_comp_offsets[i + 1]) - vnd->sc_comp_offsets[i]; if (comp_size > comp_maxsize) comp_maxsize = comp_size; } vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1] = be64toh(vnd->sc_comp_offsets[vnd->sc_comp_numoffs - 1]); /* create compressed data buffer */ vnd->sc_comp_buff = malloc(comp_maxsize, M_DEVBUF, M_WAITOK); /* create decompressed buffer */ vnd->sc_comp_decombuf = malloc(vnd->sc_comp_blksz, M_DEVBUF, M_WAITOK); vnd->sc_comp_buffblk = -1; /* Initialize decompress stream */ memset(&vnd->sc_comp_stream, 0, sizeof(z_stream)); vnd->sc_comp_stream.zalloc = vnd_alloc; vnd->sc_comp_stream.zfree = vnd_free; error = inflateInit2(&vnd->sc_comp_stream, MAX_WBITS); if (error) { if (vnd->sc_comp_stream.msg) printf("vnd%d: compressed file, %s\n", unit, vnd->sc_comp_stream.msg); VOP_UNLOCK(nd.ni_vp); error = EINVAL; goto close_and_exit; } vnd->sc_flags |= VNF_COMP | VNF_READONLY; #else /* !VND_COMPRESSION */ VOP_UNLOCK(nd.ni_vp); error = EOPNOTSUPP; goto close_and_exit; #endif /* VND_COMPRESSION */ } VOP_UNLOCK(nd.ni_vp); vnd->sc_vp = nd.ni_vp; vnd->sc_size = btodb(vattr.va_size); /* note truncation */ /* * Use pseudo-geometry specified. If none was provided, * use "standard" Adaptec fictitious geometry. */ if (vio->vnd_flags & VNDIOF_HASGEOM) { memcpy(&vnd->sc_geom, &vio->vnd_geom, sizeof(vio->vnd_geom)); /* * Sanity-check the sector size. * XXX Don't allow secsize < DEV_BSIZE. Should * XXX we? */ if (vnd->sc_geom.vng_secsize < DEV_BSIZE || (vnd->sc_geom.vng_secsize % DEV_BSIZE) != 0 || vnd->sc_geom.vng_ncylinders == 0 || (vnd->sc_geom.vng_ntracks * vnd->sc_geom.vng_nsectors) == 0) { error = EINVAL; goto close_and_exit; } /* * Compute the size (in DEV_BSIZE blocks) specified * by the geometry. */ geomsize = (vnd->sc_geom.vng_nsectors * vnd->sc_geom.vng_ntracks * vnd->sc_geom.vng_ncylinders) * (vnd->sc_geom.vng_secsize / DEV_BSIZE); /* * Sanity-check the size against the specified * geometry. */ if (vnd->sc_size < geomsize) { error = EINVAL; goto close_and_exit; } } else if (vnd->sc_size >= (32 * 64)) { /* * Size must be at least 2048 DEV_BSIZE blocks * (1M) in order to use this geometry. */ vnd->sc_geom.vng_secsize = DEV_BSIZE; vnd->sc_geom.vng_nsectors = 32; vnd->sc_geom.vng_ntracks = 64; vnd->sc_geom.vng_ncylinders = vnd->sc_size / (64 * 32); } else { vnd->sc_geom.vng_secsize = DEV_BSIZE; vnd->sc_geom.vng_nsectors = 1; vnd->sc_geom.vng_ntracks = 1; vnd->sc_geom.vng_ncylinders = vnd->sc_size; } vnd_set_geometry(vnd); if (vio->vnd_flags & VNDIOF_READONLY) { vnd->sc_flags |= VNF_READONLY; } if ((error = vndsetcred(vnd, l->l_cred)) != 0) goto close_and_exit; vndthrottle(vnd, vnd->sc_vp); vio->vnd_osize = dbtob(vnd->sc_size); #ifdef VNDIOCSET50 if (cmd != VNDIOCSET50) #endif vio->vnd_size = dbtob(vnd->sc_size); vnd->sc_flags |= VNF_INITED; /* create the kernel thread, wait for it to be up */ error = kthread_create(PRI_NONE, 0, NULL, vndthread, vnd, &vnd->sc_kthread, "%s", device_xname(vnd->sc_dev)); if (error) goto close_and_exit; while ((vnd->sc_flags & VNF_KTHREAD) == 0) { tsleep(&vnd->sc_kthread, PRIBIO, "vndthr", 0); } #ifdef DEBUG if (vnddebug & VDB_INIT) printf("vndioctl: SET vp %p size 0x%lx %d/%d/%d/%d\n", vnd->sc_vp, (unsigned long) vnd->sc_size, vnd->sc_geom.vng_secsize, vnd->sc_geom.vng_nsectors, vnd->sc_geom.vng_ntracks, vnd->sc_geom.vng_ncylinders); #endif /* Attach the disk. */ disk_attach(&vnd->sc_dkdev); disk_blocksize(&vnd->sc_dkdev, vnd->sc_geom.vng_secsize); /* Initialize the xfer and buffer pools. */ pool_init(&vnd->sc_vxpool, sizeof(struct vndxfer), 0, 0, 0, "vndxpl", NULL, IPL_BIO); vndunlock(vnd); pathbuf_destroy(pb); /* Discover wedges on this disk */ dkwedge_discover(&vnd->sc_dkdev); break; close_and_exit: (void) vn_close(nd.ni_vp, fflags, l->l_cred); pathbuf_destroy(pb); unlock_and_exit: #ifdef VND_COMPRESSION /* free any allocated memory (for compressed file) */ if (vnd->sc_comp_offsets) { free(vnd->sc_comp_offsets, M_DEVBUF); vnd->sc_comp_offsets = NULL; } if (vnd->sc_comp_buff) { free(vnd->sc_comp_buff, M_DEVBUF); vnd->sc_comp_buff = NULL; } if (vnd->sc_comp_decombuf) { free(vnd->sc_comp_decombuf, M_DEVBUF); vnd->sc_comp_decombuf = NULL; } #endif /* VND_COMPRESSION */ vndunlock(vnd); return error; #ifdef VNDIOCCLR50 case VNDIOCCLR50: #endif case VNDIOCCLR: part = DISKPART(dev); pmask = (1 << part); force = (vio->vnd_flags & VNDIOF_FORCE) != 0; if ((error = vnddoclear(vnd, pmask, minor(dev), force)) != 0) return error; break; #ifdef COMPAT_30 case VNDIOCGET30: { struct vnd_user30 *vnu; struct vattr va; vnu = (struct vnd_user30 *)data; KASSERT(l); switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) { case 0: vnu->vnu_dev = va.va_fsid; vnu->vnu_ino = va.va_fileid; break; case -1: /* unused is not an error */ vnu->vnu_dev = 0; vnu->vnu_ino = 0; break; default: return error; } break; } #endif #ifdef COMPAT_50 case VNDIOCGET50: { struct vnd_user50 *vnu; struct vattr va; vnu = (struct vnd_user50 *)data; KASSERT(l); switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) { case 0: vnu->vnu_dev = va.va_fsid; vnu->vnu_ino = va.va_fileid; break; case -1: /* unused is not an error */ vnu->vnu_dev = 0; vnu->vnu_ino = 0; break; default: return error; } break; } #endif case VNDIOCGET: { struct vnd_user *vnu; struct vattr va; vnu = (struct vnd_user *)data; KASSERT(l); switch (error = vnd_cget(l, unit, &vnu->vnu_unit, &va)) { case 0: vnu->vnu_dev = va.va_fsid; vnu->vnu_ino = va.va_fileid; break; case -1: /* unused is not an error */ vnu->vnu_dev = 0; vnu->vnu_ino = 0; break; default: return error; } break; } case DIOCGDINFO: *(struct disklabel *)data = *(vnd->sc_dkdev.dk_label); break; #ifdef __HAVE_OLD_DISKLABEL case ODIOCGDINFO: newlabel = *(vnd->sc_dkdev.dk_label); if (newlabel.d_npartitions > OLDMAXPARTITIONS) return ENOTTY; memcpy(data, &newlabel, sizeof (struct olddisklabel)); break; #endif case DIOCGPART: ((struct partinfo *)data)->disklab = vnd->sc_dkdev.dk_label; ((struct partinfo *)data)->part = &vnd->sc_dkdev.dk_label->d_partitions[DISKPART(dev)]; break; case DIOCWDINFO: case DIOCSDINFO: #ifdef __HAVE_OLD_DISKLABEL case ODIOCWDINFO: case ODIOCSDINFO: #endif { struct disklabel *lp; if ((error = vndlock(vnd)) != 0) return error; vnd->sc_flags |= VNF_LABELLING; #ifdef __HAVE_OLD_DISKLABEL if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) { memset(&newlabel, 0, sizeof newlabel); memcpy(&newlabel, data, sizeof (struct olddisklabel)); lp = &newlabel; } else #endif lp = (struct disklabel *)data; error = setdisklabel(vnd->sc_dkdev.dk_label, lp, 0, vnd->sc_dkdev.dk_cpulabel); if (error == 0) { if (cmd == DIOCWDINFO #ifdef __HAVE_OLD_DISKLABEL || cmd == ODIOCWDINFO #endif ) error = writedisklabel(VNDLABELDEV(dev), vndstrategy, vnd->sc_dkdev.dk_label, vnd->sc_dkdev.dk_cpulabel); } vnd->sc_flags &= ~VNF_LABELLING; vndunlock(vnd); if (error) return error; break; } case DIOCKLABEL: if (*(int *)data != 0) vnd->sc_flags |= VNF_KLABEL; else vnd->sc_flags &= ~VNF_KLABEL; break; case DIOCWLABEL: if (*(int *)data != 0) vnd->sc_flags |= VNF_WLABEL; else vnd->sc_flags &= ~VNF_WLABEL; break; case DIOCGDEFLABEL: vndgetdefaultlabel(vnd, (struct disklabel *)data); break; #ifdef __HAVE_OLD_DISKLABEL case ODIOCGDEFLABEL: vndgetdefaultlabel(vnd, &newlabel); if (newlabel.d_npartitions > OLDMAXPARTITIONS) return ENOTTY; memcpy(data, &newlabel, sizeof (struct olddisklabel)); break; #endif case DIOCCACHESYNC: vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(vnd->sc_vp, vnd->sc_cred, FSYNC_WAIT | FSYNC_DATAONLY | FSYNC_CACHE, 0, 0); VOP_UNLOCK(vnd->sc_vp); return error; case DIOCAWEDGE: dkw = (void *) data; if ((flag & FWRITE) == 0) return EBADF; /* If the ioctl happens here, the parent is us. */ strlcpy(dkw->dkw_parent, device_xname(vnd->sc_dev), sizeof(dkw->dkw_parent)); return dkwedge_add(dkw); case DIOCDWEDGE: dkw = (void *) data; if ((flag & FWRITE) == 0) return EBADF; /* If the ioctl happens here, the parent is us. */ strlcpy(dkw->dkw_parent, device_xname(vnd->sc_dev), sizeof(dkw->dkw_parent)); return dkwedge_del(dkw); case DIOCLWEDGES: dkwl = (void *) data; return dkwedge_list(&vnd->sc_dkdev, dkwl, l); default: return ENOTTY; } return 0; }
/* * mount syscall vfsop. * * Returns 0 on success. */ static int efs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct efs_args *args = data; struct pathbuf *pb; struct nameidata devnd; struct efs_mount *emp; struct vnode *devvp; int err, mode; if (args == NULL) return EINVAL; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { if ((emp = VFSTOEFS(mp)) == NULL) return (EIO); args->fspec = NULL; args->version = EFS_MNT_VERSION; *data_len = sizeof *args; return 0; } if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); /* XXX read-only */ /* look up our device's vnode. it is returned locked */ err = pathbuf_copyin(args->fspec, &pb); if (err) { return err; } NDINIT(&devnd, LOOKUP, FOLLOW | LOCKLEAF, pb); if ((err = namei(&devnd))) { pathbuf_destroy(pb); return (err); } devvp = devnd.ni_vp; pathbuf_destroy(pb); if (devvp->v_type != VBLK) { vput(devvp); return (ENOTBLK); } /* XXX - rdonly */ mode = FREAD; /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ err = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(VREAD)); if (err) { vput(devvp); return (err); } if ((err = VOP_OPEN(devvp, mode, l->l_cred))) { vput(devvp); return (err); } err = efs_mount_common(mp, path, devvp, args); if (err) { VOP_CLOSE(devvp, mode, l->l_cred); vput(devvp); return (err); } VOP_UNLOCK(devvp); return (0); }