/* * Mount the per-process file descriptors (/dev/fd) */ int fdesc_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; int error = 0; struct vnode *rvp; if (mp->mnt_flag & MNT_GETARGS) { *data_len = 0; return 0; } /* * Update is a no-op */ if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); error = fdesc_allocvp(Froot, FD_ROOT, mp, &rvp); if (error) return (error); rvp->v_type = VDIR; rvp->v_vflag |= VV_ROOT; mp->mnt_stat.f_namemax = FDESC_MAXNAMLEN; mp->mnt_flag |= MNT_LOCAL; mp->mnt_data = rvp; vfs_getnewfsid(mp); error = set_statvfs_info(path, UIO_USERSPACE, "fdesc", UIO_SYSSPACE, mp->mnt_op->vfs_name, mp, l); VOP_UNLOCK(rvp); return error; }
/* * Mount a pseudofs instance */ int pfs_mount(struct pfs_info *pi, struct mount *mp) { struct statfs *sbp; if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_MPSAFE; MNT_IUNLOCK(mp); mp->mnt_data = pi; vfs_getnewfsid(mp); sbp = &mp->mnt_stat; vfs_mountedfrom(mp, pi->pi_name); sbp->f_bsize = PAGE_SIZE; sbp->f_iosize = PAGE_SIZE; sbp->f_blocks = 1; sbp->f_bfree = 0; sbp->f_bavail = 0; sbp->f_files = 1; sbp->f_ffree = 0; return (0); }
/* ARGSUSED */ static int linprocfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { size_t size; int error; if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); if (mp->mnt_vfc->vfc_refcount == 1 && (error = at_exit(linprocfs_exit))) { kprintf("linprocfs: cannot register linprocfs_exit with at_exit\n"); return(error); } mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_NOSTKMNT; mp->mnt_data = 0; vfs_getnewfsid(mp); vfs_add_vnodeops(mp, &linprocfs_vnode_vops, &mp->mnt_vn_norm_ops); size = sizeof("linprocfs") - 1; bcopy("linprocfs", mp->mnt_stat.f_mntfromname, size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); (void)linprocfs_statfs(mp, &mp->mnt_stat, cred); return (0); }
/* ARGSUSED */ int procfs_mount( struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct procfsmount *pmnt; struct procfs_args *args = data; int error; if (args == NULL) return EINVAL; if (UIO_MX & (UIO_MX-1)) { log(LOG_ERR, "procfs: invalid directory entry size"); return (EINVAL); } if (mp->mnt_flag & MNT_GETARGS) { if (*data_len < sizeof *args) return EINVAL; pmnt = VFSTOPROC(mp); if (pmnt == NULL) return EIO; args->version = PROCFS_ARGSVERSION; args->flags = pmnt->pmnt_flags; *data_len = sizeof *args; return 0; } if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); if (*data_len >= sizeof *args && args->version != PROCFS_ARGSVERSION) return EINVAL; pmnt = kmem_zalloc(sizeof(struct procfsmount), KM_SLEEP); mp->mnt_stat.f_namemax = PROCFS_MAXNAMLEN; mp->mnt_flag |= MNT_LOCAL; mp->mnt_data = pmnt; vfs_getnewfsid(mp); error = set_statvfs_info(path, UIO_USERSPACE, "procfs", UIO_SYSSPACE, mp->mnt_op->vfs_name, mp, l); pmnt->pmnt_exechook = exechook_establish(procfs_revoke_vnodes, mp); if (*data_len >= sizeof *args) pmnt->pmnt_flags = args->flags; else pmnt->pmnt_flags = 0; mp->mnt_iflag |= IMNT_MPSAFE; return error; }
/* * Mount the filesystem */ static int devfs_mount(struct mount *mp) { int error; struct devfs_mount *fmp; struct vnode *rvp; if (devfs_unr == NULL) devfs_unr = new_unrhdr(0, INT_MAX, NULL); error = 0; if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS)) return (EOPNOTSUPP); fmp = malloc(sizeof *fmp, M_DEVFS, M_WAITOK | M_ZERO); fmp->dm_idx = alloc_unr(devfs_unr); sx_init(&fmp->dm_lock, "devfsmount"); fmp->dm_holdcnt = 1; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED; #ifdef MAC mp->mnt_flag |= MNT_MULTILABEL; #endif MNT_IUNLOCK(mp); fmp->dm_mount = mp; mp->mnt_data = (void *) fmp; vfs_getnewfsid(mp); fmp->dm_rootdir = devfs_vmkdir(fmp, NULL, 0, NULL, DEVFS_ROOTINO); error = devfs_root(mp, LK_EXCLUSIVE, &rvp); if (error) { sx_destroy(&fmp->dm_lock); free_unr(devfs_unr, fmp->dm_idx); free(fmp, M_DEVFS); return (error); } VOP_UNLOCK(rvp, 0); vfs_mountedfrom(mp, "devfs"); return (0); }
/* ARGSUSED */ int procfs_mount(struct mount *mp, const char *path, void *data, struct nameidata *ndp, struct proc *p) { struct procfsmount *pmnt; struct procfs_args args; int error; if (UIO_MX & (UIO_MX-1)) { log(LOG_ERR, "procfs: invalid directory entry size"); return (EINVAL); } if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); if (data != NULL) { error = copyin(data, &args, sizeof(args)); if (error != 0) return (error); if (args.version != PROCFS_ARGSVERSION) return (EINVAL); } else args.flags = 0; mp->mnt_flag |= MNT_LOCAL; pmnt = (struct procfsmount *) malloc(sizeof(struct procfsmount), M_MISCFSMNT, M_WAITOK); mp->mnt_data = pmnt; vfs_getnewfsid(mp); bzero(mp->mnt_stat.f_mntonname, MNAMELEN); strlcpy(mp->mnt_stat.f_mntonname, path, MNAMELEN); bzero(mp->mnt_stat.f_mntfromname, MNAMELEN); bcopy("procfs", mp->mnt_stat.f_mntfromname, sizeof("procfs")); bzero(mp->mnt_stat.f_mntfromspec, MNAMELEN); bcopy("procfs", mp->mnt_stat.f_mntfromspec, sizeof("procfs")); bcopy(&args, &mp->mnt_stat.mount_info.procfs_args, sizeof(args)); #ifdef notyet pmnt->pmnt_exechook = exechook_establish(procfs_revoke_vnodes, mp); #endif pmnt->pmnt_flags = args.flags; return (0); }
afs_omount(struct mount *mp, char *path, caddr_t data, struct nameidata *ndp, THREAD_OR_PROC) #endif { /* ndp contains the mounted-from device. Just ignore it. * we also don't care about our proc struct. */ size_t size; if (mp->mnt_flag & MNT_UPDATE) return EINVAL; AFS_GLOCK(); AFS_STATCNT(afs_mount); if (afs_globalVFS) { /* Don't allow remounts. */ AFS_GUNLOCK(); return EBUSY; } afs_globalVFS = mp; mp->vfs_bsize = 8192; vfs_getnewfsid(mp); #ifdef AFS_FBSD70_ENV /* XXX 70? */ MNT_ILOCK(mp); mp->mnt_flag &= ~MNT_LOCAL; mp->mnt_kern_flag |= MNTK_MPSAFE; /* solid steel */ #endif mp->mnt_stat.f_iosize = 8192; if (path != NULL) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); else bcopy("/afs", mp->mnt_stat.f_mntonname, size = 4); memset(mp->mnt_stat.f_mntonname + size, 0, MNAMELEN - size); memset(mp->mnt_stat.f_mntfromname, 0, MNAMELEN); strcpy(mp->mnt_stat.f_mntfromname, "AFS"); /* null terminated string "AFS" will fit, just leave it be. */ strcpy(mp->mnt_stat.f_fstypename, "afs"); #ifdef AFS_FBSD70_ENV MNT_IUNLOCK(mp); #endif AFS_GUNLOCK(); afs_statfs(mp, &mp->mnt_stat, p); return 0; }
/* * Mount the per-process file descriptors (/dev/fd) */ static int fdesc_mount(struct mount *mp) { int error = 0; struct fdescmount *fmp; struct vnode *rvp; /* * Update is a no-op */ if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS)) return (EOPNOTSUPP); fmp = malloc(sizeof(struct fdescmount), M_FDESCMNT, M_WAITOK); /* XXX */ /* * We need to initialize a few bits of our local mount point struct to * avoid confusion in allocvp. */ mp->mnt_data = (qaddr_t) fmp; fmp->flags = 0; error = fdesc_allocvp(Froot, -1, FD_ROOT, mp, &rvp); if (error) { free(fmp, M_FDESCMNT); mp->mnt_data = 0; return (error); } rvp->v_type = VDIR; rvp->v_vflag |= VV_ROOT; fmp->f_root = rvp; VOP_UNLOCK(rvp, 0); /* XXX -- don't mark as local to work around fts() problems */ /*mp->mnt_flag |= MNT_LOCAL;*/ MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_MPSAFE; MNT_IUNLOCK(mp); vfs_getnewfsid(mp); vfs_mountedfrom(mp, "fdescfs"); return (0); }
/* * Mount the per-process file descriptors (/dev/fd) */ static int fdesc_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { int error = 0; struct fdescmount *fmp; struct vnode *rvp; if (path == NULL) panic("fdesc_mount: cannot mount as root"); /* * Update is a no-op */ if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); vfs_add_vnodeops(mp, &fdesc_vnode_vops, &mp->mnt_vn_norm_ops); error = fdesc_allocvp(Froot, FD_ROOT, mp, &rvp); if (error) return (error); fmp = kmalloc(sizeof(struct fdescmount), M_FDESCMNT, M_WAITOK); /* XXX */ rvp->v_type = VDIR; vsetflags(rvp, VROOT); fmp->f_root = rvp; /* XXX -- don't mark as local to work around fts() problems */ /*mp->mnt_flag |= MNT_LOCAL;*/ mp->mnt_data = (qaddr_t) fmp; vfs_getnewfsid(mp); bzero(mp->mnt_stat.f_mntfromname, MNAMELEN); bcopy("fdesc", mp->mnt_stat.f_mntfromname, sizeof("fdesc")); fdesc_statfs(mp, &mp->mnt_stat, cred); return (0); }
static int smbfs_mount(struct mount *mp) { struct smbmount *smp = NULL; struct smb_vc *vcp; struct smb_share *ssp = NULL; struct vnode *vp; struct thread *td; struct smb_dev *dev; struct smb_cred *scred; int error, v; char *pc, *pe; dev = NULL; td = curthread; if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS)) return EOPNOTSUPP; if (vfs_filteropt(mp->mnt_optnew, smbfs_opts)) { vfs_mount_error(mp, "%s", "Invalid option"); return (EINVAL); } scred = smbfs_malloc_scred(); smb_makescred(scred, td, td->td_ucred); /* Ask userspace of `fd`, the file descriptor of this session */ if (1 != vfs_scanopt(mp->mnt_optnew, "fd", "%d", &v)) { vfs_mount_error(mp, "No fd option"); smbfs_free_scred(scred); return (EINVAL); } error = smb_dev2share(v, SMBM_EXEC, scred, &ssp, &dev); smp = malloc(sizeof(*smp), M_SMBFSDATA, M_WAITOK | M_ZERO); if (error) { printf("invalid device handle %d (%d)\n", v, error); vfs_mount_error(mp, "invalid device handle %d %d\n", v, error); smbfs_free_scred(scred); free(smp, M_SMBFSDATA); return error; } vcp = SSTOVC(ssp); smb_share_unlock(ssp); mp->mnt_stat.f_iosize = SSTOVC(ssp)->vc_txmax; mp->mnt_data = smp; smp->sm_share = ssp; smp->sm_root = NULL; smp->sm_dev = dev; if (1 != vfs_scanopt(mp->mnt_optnew, "caseopt", "%d", &smp->sm_caseopt)) { vfs_mount_error(mp, "Invalid caseopt"); error = EINVAL; goto bad; } if (1 != vfs_scanopt(mp->mnt_optnew, "uid", "%d", &v)) { vfs_mount_error(mp, "Invalid uid"); error = EINVAL; goto bad; } smp->sm_uid = v; if (1 != vfs_scanopt(mp->mnt_optnew, "gid", "%d", &v)) { vfs_mount_error(mp, "Invalid gid"); error = EINVAL; goto bad; } smp->sm_gid = v; if (1 != vfs_scanopt(mp->mnt_optnew, "file_mode", "%d", &v)) { vfs_mount_error(mp, "Invalid file_mode"); error = EINVAL; goto bad; } smp->sm_file_mode = (v & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG; if (1 != vfs_scanopt(mp->mnt_optnew, "dir_mode", "%d", &v)) { vfs_mount_error(mp, "Invalid dir_mode"); error = EINVAL; goto bad; } smp->sm_dir_mode = (v & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR; vfs_flagopt(mp->mnt_optnew, "nolong", &smp->sm_flags, SMBFS_MOUNT_NO_LONG); pc = mp->mnt_stat.f_mntfromname; pe = pc + sizeof(mp->mnt_stat.f_mntfromname); bzero(pc, MNAMELEN); *pc++ = '/'; *pc++ = '/'; pc = strchr(strncpy(pc, vcp->vc_username, pe - pc - 2), 0); if (pc < pe-1) { *(pc++) = '@'; pc = strchr(strncpy(pc, vcp->vc_srvname, pe - pc - 2), 0); if (pc < pe - 1) { *(pc++) = '/'; strncpy(pc, ssp->ss_name, pe - pc - 2); } } vfs_getnewfsid(mp); error = smbfs_root(mp, LK_EXCLUSIVE, &vp); if (error) { vfs_mount_error(mp, "smbfs_root error: %d", error); goto bad; } VOP_UNLOCK(vp, 0); SMBVDEBUG("root.v_usecount = %d\n", vrefcnt(vp)); #ifdef DIAGNOSTIC SMBERROR("mp=%p\n", mp); #endif smbfs_free_scred(scred); return error; bad: if (ssp) smb_share_put(ssp, scred); smbfs_free_scred(scred); SMB_LOCK(); if (error && smp->sm_dev == dev) { smp->sm_dev = NULL; sdp_trydestroy(dev); } SMB_UNLOCK(); free(smp, M_SMBFSDATA); return error; }
/* * Mount null layer */ static int nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, vfs_context_t ctx) { int error = 0; struct vnode *lowerrootvp = NULL, *vp = NULL; struct vfsstatfs * sp = NULL; struct null_mount * xmp = NULL; char data[MAXPATHLEN]; size_t count; struct vfs_attr vfa; /* set defaults (arbitrary since this file system is readonly) */ uint32_t bsize = BLKDEV_IOSIZE; size_t iosize = BLKDEV_IOSIZE; uint64_t blocks = 4711 * 4711; uint64_t bfree = 0; uint64_t bavail = 0; uint64_t bused = 4711; uint64_t files = 4711; uint64_t ffree = 0; kauth_cred_t cred = vfs_context_ucred(ctx); NULLFSDEBUG("nullfs_mount(mp = %p) %llx\n", (void *)mp, vfs_flags(mp)); if (vfs_flags(mp) & MNT_ROOTFS) return (EOPNOTSUPP); /* * Update is a no-op */ if (vfs_isupdate(mp)) { return ENOTSUP; } /* check entitlement */ if (!IOTaskHasEntitlement(current_task(), NULLFS_ENTITLEMENT)) { return EPERM; } /* * Get argument */ error = copyinstr(user_data, data, MAXPATHLEN - 1, &count); if (error) { NULLFSDEBUG("nullfs: error copying data form user %d\n", error); goto error; } /* This could happen if the system is configured for 32 bit inodes instead of * 64 bit */ if (count > MAX_MNT_FROM_LENGTH) { error = EINVAL; NULLFSDEBUG("nullfs: path to translocate too large for this system %d vs %d\n", count, MAX_MNT_FROM_LENGTH); goto error; } error = vnode_lookup(data, 0, &lowerrootvp, ctx); if (error) { NULLFSDEBUG("lookup %s -> %d\n", data, error); goto error; } /* lowervrootvp has an iocount after vnode_lookup, drop that for a usecount. Keep this to signal what we want to keep around the thing we are mirroring. Drop it in unmount.*/ error = vnode_ref(lowerrootvp); vnode_put(lowerrootvp); if (error) { // If vnode_ref failed, then null it out so it can't be used anymore in cleanup. lowerrootvp = NULL; goto error; } NULLFSDEBUG("mount %s\n", data); MALLOC(xmp, struct null_mount *, sizeof(*xmp), M_TEMP, M_WAITOK | M_ZERO); if (xmp == NULL) { error = ENOMEM; goto error; } /* * Save reference to underlying FS */ xmp->nullm_lowerrootvp = lowerrootvp; xmp->nullm_lowerrootvid = vnode_vid(lowerrootvp); error = null_getnewvnode(mp, NULL, NULL, &vp, NULL, 1); if (error) { goto error; } /* vp has an iocount on it from vnode_create. drop that for a usecount. This * is our root vnode so we drop the ref in unmount * * Assuming for now that because we created this vnode and we aren't finished mounting we can get a ref*/ vnode_ref(vp); vnode_put(vp); error = nullfs_init_lck(&xmp->nullm_lock); if (error) { goto error; } xmp->nullm_rootvp = vp; /* read the flags the user set, but then ignore some of them, we will only allow them if they are set on the lower file system */ uint64_t flags = vfs_flags(mp) & (~(MNT_IGNORE_OWNERSHIP | MNT_LOCAL)); uint64_t lowerflags = vfs_flags(vnode_mount(lowerrootvp)) & (MNT_LOCAL | MNT_QUARANTINE | MNT_IGNORE_OWNERSHIP | MNT_NOEXEC); if (lowerflags) { flags |= lowerflags; } /* force these flags */ flags |= (MNT_DONTBROWSE | MNT_MULTILABEL | MNT_NOSUID | MNT_RDONLY); vfs_setflags(mp, flags); vfs_setfsprivate(mp, xmp); vfs_getnewfsid(mp); vfs_setlocklocal(mp); /* fill in the stat block */ sp = vfs_statfs(mp); strlcpy(sp->f_mntfromname, data, MAX_MNT_FROM_LENGTH); sp->f_flags = flags; xmp->nullm_flags = NULLM_CASEINSENSITIVE; /* default to case insensitive */ error = nullfs_vfs_getlowerattr(vnode_mount(lowerrootvp), &vfa, ctx); if (error == 0) { if (VFSATTR_IS_SUPPORTED(&vfa, f_bsize)) { bsize = vfa.f_bsize; } if (VFSATTR_IS_SUPPORTED(&vfa, f_iosize)) { iosize = vfa.f_iosize; } if (VFSATTR_IS_SUPPORTED(&vfa, f_blocks)) { blocks = vfa.f_blocks; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bfree)) { bfree = vfa.f_bfree; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bavail)) { bavail = vfa.f_bavail; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bused)) { bused = vfa.f_bused; } if (VFSATTR_IS_SUPPORTED(&vfa, f_files)) { files = vfa.f_files; } if (VFSATTR_IS_SUPPORTED(&vfa, f_ffree)) { ffree = vfa.f_ffree; } if (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) { if ((vfa.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE)) && (vfa.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE))) { xmp->nullm_flags &= ~NULLM_CASEINSENSITIVE; } } } else { goto error; } sp->f_bsize = bsize; sp->f_iosize = iosize; sp->f_blocks = blocks; sp->f_bfree = bfree; sp->f_bavail = bavail; sp->f_bused = bused; sp->f_files = files; sp->f_ffree = ffree; /* Associate the mac label information from the mirrored filesystem with the * mirror */ MAC_PERFORM(mount_label_associate, cred, vnode_mount(lowerrootvp), vfs_mntlabel(mp)); NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", sp->f_mntfromname, sp->f_mntonname); return (0); error: if (xmp) { FREE(xmp, M_TEMP); } if (lowerrootvp) { vnode_getwithref(lowerrootvp); vnode_rele(lowerrootvp); vnode_put(lowerrootvp); } if (vp) { /* we made the root vnode but the mount is failed, so clean it up */ vnode_getwithref(vp); vnode_rele(vp); /* give vp back */ vnode_recycle(vp); vnode_put(vp); } return error; }
static int vfs_mount_9p(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx) { #pragma unused(devvp) struct sockaddr *addr, *authaddr; struct vfsstatfs *sp; char authkey[DESKEYLEN+1]; kauth_cred_t cred; user_args_9p args; mount_9p *nmp; size_t size; fid_9p fid; qid_9p qid; char *vers; int e; TRACE(); nmp = NULL; addr = NULL; authaddr = NULL; fid = NOFID; if (vfs_isupdate(mp)) return ENOTSUP; if (vfs_context_is64bit(ctx)) { if ((e=copyin(data, &args, sizeof(args)))) goto error; } else { args_9p args32; if ((e=copyin(data, &args32, sizeof(args32)))) goto error; args.spec = CAST_USER_ADDR_T(args32.spec); args.addr = CAST_USER_ADDR_T(args32.addr); args.addrlen = args32.addrlen; args.authaddr = CAST_USER_ADDR_T(args32.authaddr); args.authaddrlen = args32.authaddrlen; args.volume = CAST_USER_ADDR_T(args32.volume); args.uname = CAST_USER_ADDR_T(args32.uname); args.aname = CAST_USER_ADDR_T(args32.aname); args.authkey = CAST_USER_ADDR_T(args32.authkey); args.flags = args32.flags; } e = ENOMEM; nmp = malloc_9p(sizeof(*nmp)); if (nmp == NULL) return e; nmp->mp = mp; TAILQ_INIT(&nmp->req); nmp->lck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL); nmp->reqlck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL); nmp->nodelck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL); nmp->node = hashinit(desiredvnodes, M_TEMP, &nmp->nodelen); if (nmp->lck==NULL || nmp->reqlck==NULL || nmp->nodelck==NULL || nmp->node==NULL) goto error; if ((e=nameget_9p(args.volume, &nmp->volume))) goto error; if ((e=nameget_9p(args.uname, &nmp->uname))) goto error; if ((e=nameget_9p(args.aname, &nmp->aname))) goto error; cred = vfs_context_ucred(ctx); if (IS_VALID_CRED(cred)) { nmp->uid = kauth_cred_getuid(cred); nmp->gid = kauth_cred_getgid(cred); } else { nmp->uid = KAUTH_UID_NONE; nmp->gid = KAUTH_GID_NONE; } vfs_getnewfsid(mp); vfs_setfsprivate(mp, nmp); nmp->flags = args.flags; if ((e=addrget_9p(args.addr, args.addrlen, &addr))) goto error; if ((e=connect_9p(nmp, addr))) goto error; vers = VERSION9P; if (ISSET(nmp->flags, FLAG_DOTU)) vers = VERSION9PDOTU; if ((e=version_9p(nmp, vers, &nmp->version))) goto error; if (ISSET(nmp->flags, FLAG_DOTU) && strcmp(VERSION9PDOTU, nmp->version)==0) SET(nmp->flags, F_DOTU); nmp->afid = NOFID; if (args.authaddr && args.authaddrlen && args.authkey) { if ((e=copyin(args.authkey, authkey, DESKEYLEN))) goto error; if ((e=addrget_9p(args.authaddr, args.authaddrlen, &authaddr))) goto error; if ((e=auth_9p(nmp, nmp->uname, nmp->aname, nmp->uid, &nmp->afid, &qid))) goto error; if (nmp->afid!=NOFID && (e=authp9any_9p(nmp, nmp->afid, authaddr, nmp->uname, authkey))) goto error; bzero(authkey, DESKEYLEN); } if ((e=attach_9p(nmp, nmp->uname, nmp->aname, nmp->afid, nmp->uid, &fid, &qid))) goto error; if ((e=nget_9p(nmp, fid, qid, NULL, &nmp->root, NULL, ctx))) goto error; nunlock_9p(NTO9P(nmp->root)); e = vnode_ref(nmp->root); vnode_put(nmp->root); if (e) goto error; vfs_setauthopaque(mp); vfs_clearauthopaqueaccess(mp); vfs_setlocklocal(mp); // init stats sp = vfs_statfs(nmp->mp); copyinstr(args.spec, sp->f_mntfromname, MNAMELEN-1, &size); bzero(sp->f_mntfromname+size, MNAMELEN-size); sp->f_bsize = PAGE_SIZE; sp->f_iosize = nmp->msize-IOHDRSZ; sp->f_blocks = sp->f_bfree = sp->f_bavail = sp->f_bused = 0; sp->f_files = 65535; sp->f_ffree = sp->f_files-2; sp->f_flags = vfs_flags(mp); free_9p(addr); free_9p(authaddr); return 0; error: bzero(authkey, DESKEYLEN); free_9p(addr); free_9p(authaddr); if (nmp->so) { clunk_9p(nmp, fid); disconnect_9p(nmp); } freemount_9p(nmp); vfs_setfsprivate(mp, NULL); return e; }
static int zfs_domount(struct mount *mp, dev_t mount_dev, char *osname, vfs_context_t ctx) { uint64_t readonly; int error = 0; int mode; zfsvfs_t *zfsvfs; znode_t *zp = NULL; struct timeval tv; ASSERT(mp); ASSERT(osname); /* * Initialize the zfs-specific filesystem structure. * Should probably make this a kmem cache, shuffle fields, * and just bzero up to z_hold_mtx[]. */ zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP); zfsvfs->z_vfs = mp; zfsvfs->z_parent = zfsvfs; zfsvfs->z_assign = TXG_NOWAIT; zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE; zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE; mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); list_create(&zfsvfs->z_all_znodes, sizeof (znode_t), offsetof(znode_t, z_link_node)); rw_init(&zfsvfs->z_unmount_lock, NULL, RW_DEFAULT, NULL); rw_init(&zfsvfs->z_unmount_inactive_lock, NULL, RW_DEFAULT, NULL); #ifndef __APPLE__ /* Initialize the generic filesystem structure. */ vfsp->vfs_bcount = 0; vfsp->vfs_data = NULL; if (zfs_create_unique_device(&mount_dev) == -1) { error = ENODEV; goto out; } ASSERT(vfs_devismounted(mount_dev) == 0); #endif vfs_setfsprivate(mp, zfsvfs); if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL)) goto out; if (readonly) { mode = DS_MODE_PRIMARY | DS_MODE_READONLY; vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_RDONLY)); } else { mode = DS_MODE_PRIMARY; } error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os); if (error == EROFS) { mode = DS_MODE_PRIMARY | DS_MODE_READONLY; error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os); } if (error) goto out; if (error = zfs_init_fs(zfsvfs, &zp, (cred_t *) vfs_context_ucred(ctx))) goto out; /* The call to zfs_init_fs leaves the vnode held, release it here. */ vnode_put(ZTOV(zp)); if (dmu_objset_is_snapshot(zfsvfs->z_os)) { uint64_t xattr; ASSERT(mode & DS_MODE_READONLY); #if 0 atime_changed_cb(zfsvfs, B_FALSE); readonly_changed_cb(zfsvfs, B_TRUE); if (error = dsl_prop_get_integer(osname, "xattr", &xattr, NULL)) goto out; xattr_changed_cb(zfsvfs, xattr); #endif zfsvfs->z_issnap = B_TRUE; } else { if (!vfs_isrdonly(mp)) zfs_unlinked_drain(zfsvfs); #ifndef __APPLE__ /* * Parse and replay the intent log. * * Because of ziltest, this must be done after * zfs_unlinked_drain(). (Further note: ziltest doesn't * use readonly mounts, where zfs_unlinked_drain() isn't * called.) This is because ziltest causes spa_sync() * to think it's committed, but actually it is not, so * the intent log contains many txg's worth of changes. * * In particular, if object N is in the unlinked set in * the last txg to actually sync, then it could be * actually freed in a later txg and then reallocated in * a yet later txg. This would write a "create object * N" record to the intent log. Normally, this would be * fine because the spa_sync() would have written out * the fact that object N is free, before we could write * the "create object N" intent log record. * * But when we are in ziltest mode, we advance the "open * txg" without actually spa_sync()-ing the changes to * disk. So we would see that object N is still * allocated and in the unlinked set, and there is an * intent log record saying to allocate it. */ zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign, zfs_replay_vector); if (!zil_disable) zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data); #endif } #if 0 if (!zfsvfs->z_issnap) zfsctl_create(zfsvfs); #endif /* * Record the mount time (for Spotlight) */ microtime(&tv); zfsvfs->z_mount_time = tv.tv_sec; out: if (error) { if (zfsvfs->z_os) dmu_objset_close(zfsvfs->z_os); mutex_destroy(&zfsvfs->z_znodes_lock); list_destroy(&zfsvfs->z_all_znodes); rw_destroy(&zfsvfs->z_unmount_lock); rw_destroy(&zfsvfs->z_unmount_inactive_lock); kmem_free(zfsvfs, sizeof (zfsvfs_t)); } else { OSIncrementAtomic(&zfs_active_fs_count); (void) copystr(osname, vfs_statfs(mp)->f_mntfromname, MNAMELEN - 1, 0); vfs_getnewfsid(mp); } return (error); }
/*ARGSUSED*/ int coda_mount(struct mount *vfsp) { struct vnode *dvp; struct cnode *cp; struct cdev *dev; struct coda_mntinfo *mi; struct vnode *rootvp; struct CodaFid rootfid = INVAL_FID; struct CodaFid ctlfid = CTL_FID; int error; struct nameidata ndp; ENTRY; char *from; if (vfs_filteropt(vfsp->mnt_optnew, coda_opts)) return (EINVAL); from = vfs_getopts(vfsp->mnt_optnew, "from", &error); if (error) return (error); coda_vfsopstats_init(); coda_vnodeopstats_init(); MARK_ENTRY(CODA_MOUNT_STATS); if (CODA_MOUNTED(vfsp)) { MARK_INT_FAIL(CODA_MOUNT_STATS); return (EBUSY); } /* * Validate mount device. Similar to getmdev(). */ NDINIT(&ndp, LOOKUP, FOLLOW, UIO_SYSSPACE, from, curthread); error = namei(&ndp); dvp = ndp.ni_vp; if (error) { MARK_INT_FAIL(CODA_MOUNT_STATS); return (error); } if (dvp->v_type != VCHR) { MARK_INT_FAIL(CODA_MOUNT_STATS); vrele(dvp); NDFREE(&ndp, NDF_ONLY_PNBUF); return (ENXIO); } dev = dvp->v_rdev; vrele(dvp); NDFREE(&ndp, NDF_ONLY_PNBUF); /* * Initialize the mount record and link it to the vfs struct. */ mi = dev2coda_mntinfo(dev); if (!mi) { MARK_INT_FAIL(CODA_MOUNT_STATS); printf("Coda mount: %s is not a cfs device\n", from); return (ENXIO); } if (!VC_OPEN(&mi->mi_vcomm)) { MARK_INT_FAIL(CODA_MOUNT_STATS); return (ENODEV); } /* * No initialization (here) of mi_vcomm! */ vfsp->mnt_data = mi; vfs_getnewfsid (vfsp); mi->mi_vfsp = vfsp; mi->mi_started = 0; /* XXX See coda_root() */ /* * Make a root vnode to placate the Vnode interface, but don't * actually make the CODA_ROOT call to venus until the first call to * coda_root in case a server is down while venus is starting. */ cp = make_coda_node(&rootfid, vfsp, VDIR); rootvp = CTOV(cp); rootvp->v_vflag |= VV_ROOT; cp = make_coda_node(&ctlfid, vfsp, VREG); coda_ctlvp = CTOV(cp); /* * Add vfs and rootvp to chain of vfs hanging off mntinfo. */ mi->mi_vfsp = vfsp; mi->mi_rootvp = rootvp; vfs_mountedfrom(vfsp, from); /* * Error is currently guaranteed to be zero, but in case some code * changes... */ CODADEBUG(1, myprintf(("coda_mount returned %d\n", error)););
int smbfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct smbfs_args *args = data; /* holds data from mount request */ struct smbmount *smp = NULL; struct smb_vc *vcp; struct smb_share *ssp = NULL; struct smb_cred scred; struct proc *p; int error; if (*data_len < sizeof *args) return EINVAL; p = l->l_proc; if (mp->mnt_flag & MNT_GETARGS) { smp = VFSTOSMBFS(mp); if (smp == NULL) return EIO; *args = smp->sm_args; *data_len = sizeof *args; return 0; } if (mp->mnt_flag & MNT_UPDATE) return EOPNOTSUPP; if (args->version != SMBFS_VERSION) { SMBVDEBUG("mount version mismatch: kernel=%d, mount=%d\n", SMBFS_VERSION, args->version); return EINVAL; } error = set_statvfs_info(path, UIO_USERSPACE, NULL, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); if (error) return error; smb_makescred(&scred, l, l->l_cred); error = smb_dev2share(args->dev_fd, SMBM_EXEC, &scred, &ssp); if (error) return error; smb_share_unlock(ssp); /* keep ref, but unlock */ vcp = SSTOVC(ssp); mp->mnt_stat.f_iosize = vcp->vc_txmax; mp->mnt_stat.f_namemax = (vcp->vc_hflags2 & SMB_FLAGS2_KNOWS_LONG_NAMES) ? 255 : 12; MALLOC(smp, struct smbmount *, sizeof(*smp), M_SMBFSDATA, M_WAITOK); memset(smp, 0, sizeof(*smp)); mp->mnt_data = smp; smp->sm_hash = hashinit(desiredvnodes, HASH_LIST, true, &smp->sm_hashlen); mutex_init(&smp->sm_hashlock, MUTEX_DEFAULT, IPL_NONE); smp->sm_share = ssp; smp->sm_root = NULL; smp->sm_args = *args; smp->sm_caseopt = args->caseopt; smp->sm_args.file_mode = (smp->sm_args.file_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG; smp->sm_args.dir_mode = (smp->sm_args.dir_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR; memset(mp->mnt_stat.f_mntfromname, 0, MNAMELEN); snprintf(mp->mnt_stat.f_mntfromname, MNAMELEN, "//%s@%s/%s", vcp->vc_username, vcp->vc_srvname, ssp->ss_name); vfs_getnewfsid(mp); return (0); }
int afs_mount(struct mount *mp, char *path, caddr_t data, struct nameidata *ndp, CTX_TYPE ctx) #endif { /* ndp contains the mounted-from device. Just ignore it. * we also don't care about our proc struct. */ size_t size; int error; #ifdef AFS_DARWIN80_ENV struct vfsioattr ioattr; /* vfs_statfs advertised as RO, but isn't */ /* new api will be needed to initialize this information (nfs needs to set mntfromname too) */ #endif STATFS_TYPE *mnt_stat = vfs_statfs(mp); if (vfs_isupdate(mp)) return EINVAL; AFS_GLOCK(); AFS_STATCNT(afs_mount); if (data == 0 && afs_globalVFS) { /* Don't allow remounts. */ AFS_GUNLOCK(); return (EBUSY); } afs_globalVFS = mp; #ifdef AFS_DARWIN80_ENV vfs_ioattr(mp, &ioattr); ioattr.io_devblocksize = (16 * 32768); vfs_setioattr(mp, &ioattr); /* f_iosize is handled in VFS_GETATTR */ #else mp->vfs_bsize = 8192; mp->mnt_stat.f_iosize = 8192; #endif vfs_getnewfsid(mp); #ifndef AFS_DARWIN80_ENV (void)copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); memset(mp->mnt_stat.f_mntonname + size, 0, MNAMELEN - size); #endif memset(mnt_stat->f_mntfromname, 0, MNAMELEN); if (data == 0) { strcpy(mnt_stat->f_mntfromname, "AFS"); /* null terminated string "AFS" will fit, just leave it be. */ vfs_setfsprivate(mp, NULL); } else { struct VenusFid *rootFid = NULL; struct volume *tvp; char volName[MNAMELEN]; (void)copyinstr(data, volName, MNAMELEN - 1, &size); memset(volName + size, 0, MNAMELEN - size); if (volName[0] == 0) { strcpy(mnt_stat->f_mntfromname, "AFS"); vfs_setfsprivate(mp, &afs_rootFid); } else { struct cell *localcell = afs_GetPrimaryCell(READ_LOCK); if (localcell == NULL) { AFS_GUNLOCK(); return ENODEV; } /* Set the volume identifier to "AFS:volume.name" */ snprintf(mnt_stat->f_mntfromname, MNAMELEN - 1, "AFS:%s", volName); tvp = afs_GetVolumeByName(volName, localcell->cellNum, 1, (struct vrequest *)0, READ_LOCK); if (tvp) { int volid = (tvp->roVol ? tvp->roVol : tvp->volume); MALLOC(rootFid, struct VenusFid *, sizeof(*rootFid), M_UFSMNT, M_WAITOK); rootFid->Cell = localcell->cellNum; rootFid->Fid.Volume = volid; rootFid->Fid.Vnode = 1; rootFid->Fid.Unique = 1; } else { AFS_GUNLOCK(); return ENODEV; } vfs_setfsprivate(mp, &rootFid); } }
int xfs_mount_common_sys(struct mount *mp, const char *path, void *data, struct nameidata *ndp, d_thread_t *p) { struct vnode *devvp; dev_t dev; int error; struct vattr vat; NNPFSDEB(XDEBVFOPS, ("xfs_mount: " "struct mount mp = %lx path = '%s' data = '%s'\n", (unsigned long)mp, path, (char *)data)); #ifdef ARLA_KNFS NNPFSDEB(XDEBVFOPS, ("xfs_mount: mount flags = %x\n", mp->mnt_flag)); /* * mountd(8) flushes all export entries when it starts * right now we ignore it (but should not) */ if (mp->mnt_flag & MNT_UPDATE || mp->mnt_flag & MNT_DELEXPORT) { NNPFSDEB(XDEBVFOPS, ("xfs_mount: ignoreing MNT_UPDATE or MNT_DELEXPORT\n")); return 0; } #endif NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, data, p); error = namei(ndp); if (error) { NNPFSDEB(XDEBVFOPS, ("namei failed, errno = %d\n", error)); return error; } devvp = ndp->ni_vp; if (devvp->v_type != VCHR) { vput(devvp); NNPFSDEB(XDEBVFOPS, ("not VCHR (%d)\n", devvp->v_type)); return ENXIO; } #if defined(__osf__) VOP_GETATTR(devvp, &vat, ndp->ni_cred, error); #elif defined(HAVE_FREEBSD_THREAD) error = VOP_GETATTR(devvp, &vat, p->td_proc->p_ucred, p); #else error = VOP_GETATTR(devvp, &vat, p->p_ucred, p); #endif vput(devvp); if (error) { NNPFSDEB(XDEBVFOPS, ("VOP_GETATTR failed, error = %d\n", error)); return error; } dev = VA_RDEV_TO_DEV(vat.va_rdev); NNPFSDEB(XDEBVFOPS, ("dev = %d.%d\n", major(dev), minor(dev))); if (!xfs_is_xfs_dev (dev)) { NNPFSDEB(XDEBVFOPS, ("%s is not a xfs device\n", (char *)data)); return ENXIO; } if (xfs[minor(dev)].status & NNPFS_MOUNTED) return EBUSY; xfs[minor(dev)].status = NNPFS_MOUNTED; xfs[minor(dev)].mp = mp; xfs[minor(dev)].root = 0; xfs[minor(dev)].nnodes = 0; xfs[minor(dev)].fd = minor(dev); nnfs_init_head(&xfs[minor(dev)].nodehead); VFS_TO_NNPFS(mp) = &xfs[minor(dev)]; #if defined(HAVE_KERNEL_VFS_GETNEWFSID) #if defined(HAVE_TWO_ARGUMENT_VFS_GETNEWFSID) vfs_getnewfsid(mp, MOUNT_AFS); #else vfs_getnewfsid(mp); #endif /* HAVE_TWO_ARGUMENT_VFS_GETNEWFSID */ #endif /* HAVE_KERNEL_VFS_GETNEWFSID */ mp->mnt_stat.f_bsize = DEV_BSIZE; #ifndef __osf__ mp->mnt_stat.f_iosize = DEV_BSIZE; mp->mnt_stat.f_owner = 0; #endif mp->mnt_stat.f_blocks = 4711 * 4711; mp->mnt_stat.f_bfree = 4711 * 4711; mp->mnt_stat.f_bavail = 4711 * 4711; mp->mnt_stat.f_files = 4711; mp->mnt_stat.f_ffree = 4711; mp->mnt_stat.f_flags = mp->mnt_flag; #ifdef __osf__ mp->mnt_stat.f_fsid.val[0] = dev; mp->mnt_stat.f_fsid.val[1] = MOUNT_NNPFS; MALLOC(mp->m_stat.f_mntonname, char *, strlen(path) + 1, M_PATHNAME, M_WAITOK); strcpy(mp->m_stat.f_mntonname, path); MALLOC(mp->m_stat.f_mntfromname, char *, sizeof("arla"), M_PATHNAME, M_WAITOK); strcpy(mp->m_stat.f_mntfromname, "arla"); #else /* __osf__ */ strncpy(mp->mnt_stat.f_mntonname, path, sizeof(mp->mnt_stat.f_mntonname)); strncpy(mp->mnt_stat.f_mntfromname, data, sizeof(mp->mnt_stat.f_mntfromname)); strncpy(mp->mnt_stat.f_fstypename, "xfs", sizeof(mp->mnt_stat.f_fstypename)); #endif /* __osf__ */ return 0; }
static errno_t fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata, vfs_context_t context) { int err = 0; int mntopts = 0; bool mounted = false; uint32_t max_read = ~0; size_t len; fuse_device_t fdev = NULL; struct fuse_data *data = NULL; fuse_mount_args fusefs_args; struct vfsstatfs *vfsstatfsp = vfs_statfs(mp); #if M_FUSE4X_ENABLE_BIGLOCK lck_mtx_t *biglock; #endif fuse_trace_printf_vfsop(); if (vfs_isupdate(mp)) { return ENOTSUP; } err = copyin(udata, &fusefs_args, sizeof(fusefs_args)); if (err) { return EINVAL; } /* * Interesting flags that we can receive from mount or may want to * otherwise forcibly set include: * * MNT_ASYNC * MNT_AUTOMOUNTED * MNT_DEFWRITE * MNT_DONTBROWSE * MNT_IGNORE_OWNERSHIP * MNT_JOURNALED * MNT_NODEV * MNT_NOEXEC * MNT_NOSUID * MNT_NOUSERXATTR * MNT_RDONLY * MNT_SYNCHRONOUS * MNT_UNION */ err = ENOTSUP; #if M_FUSE4X_ENABLE_UNSUPPORTED vfs_setlocklocal(mp); #endif /* M_FUSE4X_ENABLE_UNSUPPORTED */ /** Option Processing. **/ if (*fusefs_args.fstypename) { size_t typenamelen = strlen(fusefs_args.fstypename); if (typenamelen > FUSE_FSTYPENAME_MAXLEN) { return EINVAL; } snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s", FUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename); } if (!*fusefs_args.fsname) return EINVAL; if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) || (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) { return EINVAL; } if ((fusefs_args.init_timeout > FUSE_MAX_INIT_TIMEOUT) || (fusefs_args.init_timeout < FUSE_MIN_INIT_TIMEOUT)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_SPARSE) { mntopts |= FSESS_SPARSE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) { mntopts |= FSESS_AUTO_CACHE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) { if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { return EINVAL; } mntopts |= FSESS_AUTO_XATTR; } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { mntopts |= FSESS_NATIVE_XATTR; } if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) { mntopts |= FSESS_JAIL_SYMLINKS; } /* * Note that unlike Linux, which keeps allow_root in user-space and * passes allow_other in that case to the kernel, we let allow_root * reach the kernel. The 'if' ordering is important here. */ if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { log("fuse4x: caller is not a member of fuse4x admin group. " "Either add user (id=%d) to group (id=%d), " "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n", kauth_cred_getuid(kauth_cred_get()), fuse_admin_group); return EPERM; } mntopts |= FSESS_ALLOW_ROOT; } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) { if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { log("fuse4x: caller is not a member of fuse4x admin group. " "Either add user (id=%d) to group (id=%d), " "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n", kauth_cred_getuid(kauth_cred_get()), fuse_admin_group); return EPERM; } } mntopts |= FSESS_ALLOW_OTHER; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) { mntopts |= FSESS_NO_APPLEDOUBLE; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) { mntopts |= FSESS_NO_APPLEXATTR; } if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) { fsid_t fsid; mount_t other_mp; uint32_t target_dev; target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR, fusefs_args.fsid); fsid.val[0] = target_dev; fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; other_mp = vfs_getvfs(&fsid); if (other_mp != NULL) { return EPERM; } vfsstatfsp->f_fsid.val[0] = target_dev; vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; } else { vfs_getnewfsid(mp); } if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) { mntopts |= FSESS_NO_ATTRCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) { mntopts |= FSESS_NO_READAHEAD; } if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) { mntopts |= FSESS_NO_UBC; } if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) { mntopts |= FSESS_NO_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) { if (mntopts & FSESS_NO_VNCACHE) { return EINVAL; } mntopts |= FSESS_NEGATIVE_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) { /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */ if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) { log("fuse4x: cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'\n"); return EINVAL; } mntopts |= FSESS_NO_SYNCWRITES; vfs_clearflags(mp, MNT_SYNCHRONOUS); vfs_setflags(mp, MNT_ASYNC); /* We check for this only if we have nosyncwrites in the first place. */ if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) { mntopts |= FSESS_NO_SYNCONCLOSE; } } else { vfs_clearflags(mp, MNT_ASYNC); vfs_setflags(mp, MNT_SYNCHRONOUS); } if (mntopts & FSESS_NO_UBC) { /* If no buffer cache, disallow exec from file system. */ vfs_setflags(mp, MNT_NOEXEC); } vfs_setauthopaque(mp); vfs_setauthopaqueaccess(mp); if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) && (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) { mntopts |= FSESS_DEFAULT_PERMISSIONS; vfs_clearauthopaque(mp); } if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) { mntopts |= FSESS_DEFER_PERMISSIONS; } if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) { mntopts |= FSESS_EXTENDED_SECURITY; vfs_setextendedsecurity(mp); } if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) { vfs_setflags(mp, MNT_LOCAL); } /* done checking incoming option bits */ err = 0; vfs_setfsprivate(mp, NULL); fdev = fuse_device_get(fusefs_args.rdev); if (!fdev) { log("fuse4x: invalid device file (number=%d)\n", fusefs_args.rdev); return EINVAL; } fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; if (!data) { fuse_lck_mtx_unlock(fdev->mtx); return ENXIO; } #if M_FUSE4X_ENABLE_BIGLOCK biglock = data->biglock; fuse_biglock_lock(biglock); #endif if (data->dataflags & FSESS_MOUNTED) { #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(biglock); #endif fuse_lck_mtx_unlock(fdev->mtx); return EALREADY; } if (!(data->dataflags & FSESS_OPENED)) { fuse_lck_mtx_unlock(fdev->mtx); err = ENXIO; goto out; } data->dataflags |= FSESS_MOUNTED; OSAddAtomic(1, (SInt32 *)&fuse_mount_count); mounted = true; if (fdata_dead_get(data)) { fuse_lck_mtx_unlock(fdev->mtx); err = ENOTCONN; goto out; } if (!data->daemoncred) { panic("fuse4x: daemon found but identity unknown"); } if (fuse_vfs_context_issuser(context) && kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) { fuse_lck_mtx_unlock(fdev->mtx); err = EPERM; log("fuse4x: fuse daemon running by user_id=%d does not have privileges to mount on directory %s owned by user_id=%d\n", kauth_cred_getuid(data->daemoncred), vfsstatfsp->f_mntonname, kauth_cred_getuid(vfs_context_ucred(context))); goto out; } data->mp = mp; data->fdev = fdev; data->dataflags |= mntopts; data->daemon_timeout.tv_sec = fusefs_args.daemon_timeout; data->daemon_timeout.tv_nsec = 0; if (data->daemon_timeout.tv_sec) { data->daemon_timeout_p = &(data->daemon_timeout); } else { data->daemon_timeout_p = NULL; } data->init_timeout.tv_sec = fusefs_args.init_timeout; data->init_timeout.tv_nsec = 0; data->max_read = max_read; data->fssubtype = fusefs_args.fssubtype; data->mountaltflags = fusefs_args.altflags; data->noimplflags = (uint64_t)0; data->blocksize = fuse_round_size(fusefs_args.blocksize, FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE); data->iosize = fuse_round_size(fusefs_args.iosize, FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE); if (data->iosize < data->blocksize) { data->iosize = data->blocksize; } data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE; copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname, MNAMELEN - 1, &len); bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len); copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len); bzero(data->volname + len, MAXPATHLEN - len); /* previous location of vfs_setioattr() */ vfs_setfsprivate(mp, data); fuse_lck_mtx_unlock(fdev->mtx); /* Send a handshake message to the daemon. */ fuse_send_init(data, context); struct vfs_attr vfs_attr; VFSATTR_INIT(&vfs_attr); // Our vfs_getattr() doesn't look at most *_IS_ACTIVE()'s err = fuse_vfsop_getattr(mp, &vfs_attr, context); if (!err) { vfsstatfsp->f_bsize = vfs_attr.f_bsize; vfsstatfsp->f_iosize = vfs_attr.f_iosize; vfsstatfsp->f_blocks = vfs_attr.f_blocks; vfsstatfsp->f_bfree = vfs_attr.f_bfree; vfsstatfsp->f_bavail = vfs_attr.f_bavail; vfsstatfsp->f_bused = vfs_attr.f_bused; vfsstatfsp->f_files = vfs_attr.f_files; vfsstatfsp->f_ffree = vfs_attr.f_ffree; // vfsstatfsp->f_fsid already handled above vfsstatfsp->f_owner = kauth_cred_getuid(data->daemoncred); vfsstatfsp->f_flags = vfs_flags(mp); // vfsstatfsp->f_fstypename already handled above // vfsstatfsp->f_mntonname handled elsewhere // vfsstatfsp->f_mnfromname already handled above vfsstatfsp->f_fssubtype = data->fssubtype; } if (fusefs_args.altflags & FUSE_MOPT_BLOCKSIZE) { vfsstatfsp->f_bsize = data->blocksize; } else { //data->blocksize = vfsstatfsp->f_bsize; } if (fusefs_args.altflags & FUSE_MOPT_IOSIZE) { vfsstatfsp->f_iosize = data->iosize; } else { //data->iosize = (uint32_t)vfsstatfsp->f_iosize; vfsstatfsp->f_iosize = data->iosize; } out: if (err) { vfs_setfsprivate(mp, NULL); fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; /* again */ if (mounted) { OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); } if (data) { data->dataflags &= ~FSESS_MOUNTED; if (!(data->dataflags & FSESS_OPENED)) { #if M_FUSE4X_ENABLE_BIGLOCK assert(biglock == data->biglock); fuse_biglock_unlock(biglock); #endif fuse_device_close_final(fdev); /* data is gone now */ } } fuse_lck_mtx_unlock(fdev->mtx); } else { vnode_t fuse_rootvp = NULLVP; err = fuse_vfsop_root(mp, &fuse_rootvp, context); if (err) { goto out; /* go back and follow error path */ } err = vnode_ref(fuse_rootvp); (void)vnode_put(fuse_rootvp); if (err) { goto out; /* go back and follow error path */ } else { struct vfsioattr ioattr; vfs_ioattr(mp, &ioattr); ioattr.io_devblocksize = data->blocksize; vfs_setioattr(mp, &ioattr); } } #if M_FUSE4X_ENABLE_BIGLOCK fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; /* ...and again */ if(data) { assert(data->biglock == biglock); fuse_biglock_unlock(biglock); } fuse_lck_mtx_unlock(fdev->mtx); #endif return err; }
VMBlockVFSMount(struct mount *mp, // IN: mount(2) parameters struct thread *td) // IN: caller's thread context #endif { struct VMBlockMount *xmp; struct nameidata nd, *ndp = &nd; struct vnode *lowerrootvp, *vp; char *target; char *pathname; int len, error = 0; VMBLOCKDEBUG("VMBlockVFSMount(mp = %p)\n", (void *)mp); /* * TODO: Strip out extraneous export & other misc cruft. */ /* * Disallow the following: * 1. Mounting over the system root. * 2. Mount updates/remounts. (Reconsider for rw->ro, ro->rw?) * 3. Mounting VMBlock on top of a VMBlock. */ if ((mp->mnt_flag & MNT_ROOTFS) || (mp->mnt_flag & MNT_UPDATE) || (mp->mnt_vnodecovered->v_op == &VMBlockVnodeOps)) { return EOPNOTSUPP; } /* * XXX Should only be unlocked if mnt_flag & MNT_UPDATE. */ ASSERT_VOP_UNLOCKED(mp->mnt_vnodecovered, "Covered vnode already locked!"); /* * Look up path to lower layer (VMBlock source / DnD staging area). * (E.g., in the command "mount /tmp/VMwareDnD /var/run/vmblock", * /tmp/VMwareDnD is the staging area.) */ error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); if (error || target[len - 1] != '\0') { return EINVAL; } pathname = uma_zalloc(VMBlockPathnameZone, M_WAITOK); if (pathname == NULL) { return ENOMEM; } if (strlcpy(pathname, target, MAXPATHLEN) >= MAXPATHLEN) { uma_zfree(VMBlockPathnameZone, pathname); return ENAMETOOLONG; } /* * Find lower node and lock if not already locked. */ NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target, compat_td); error = namei(ndp); if (error) { NDFREE(ndp, 0); uma_zfree(VMBlockPathnameZone, pathname); return error; } NDFREE(ndp, NDF_ONLY_PNBUF); /* * Check multi VMBlock mount to avoid `lock against myself' panic. */ lowerrootvp = ndp->ni_vp; if (lowerrootvp == VPTOVMB(mp->mnt_vnodecovered)->lowerVnode) { VMBLOCKDEBUG("VMBlockVFSMount: multi vmblock mount?\n"); vput(lowerrootvp); uma_zfree(VMBlockPathnameZone, pathname); return EDEADLK; } xmp = malloc(sizeof *xmp, M_VMBLOCKFSMNT, M_WAITOK); /* * Record pointer (mountVFS) to the staging area's file system. Follow up * by grabbing a VMBlockNode for our layer's root. */ xmp->mountVFS = lowerrootvp->v_mount; error = VMBlockNodeGet(mp, lowerrootvp, &vp, pathname); /* * Make sure the node alias worked */ if (error) { COMPAT_VOP_UNLOCK(vp, 0, compat_td); vrele(lowerrootvp); free(xmp, M_VMBLOCKFSMNT); /* XXX */ uma_zfree(VMBlockPathnameZone, pathname); return error; } /* * Record a reference to the new filesystem's root vnode & mark it as such. */ xmp->rootVnode = vp; xmp->rootVnode->v_vflag |= VV_ROOT; /* * Unlock the node (either the lower or the alias) */ COMPAT_VOP_UNLOCK(vp, 0, compat_td); /* * If the staging area is a local filesystem, reflect that here, too. (We * could potentially allow NFS staging areas.) */ MNT_ILOCK(mp); mp->mnt_flag |= lowerrootvp->v_mount->mnt_flag & MNT_LOCAL; #if __FreeBSD_version >= 600000 && __FreeBSD_version < 1000000 mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & MNTK_MPSAFE; #endif MNT_IUNLOCK(mp); mp->mnt_data = (qaddr_t) xmp; vfs_getnewfsid(mp); vfs_mountedfrom(mp, target); VMBLOCKDEBUG("VMBlockVFSMount: lower %s, alias at %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); return 0; }
/* * Common code for mount and mountroot */ static int mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam, char *hst, struct vnode **vpp, struct ucred *cred, int nametimeo, int negnametimeo) { struct nfsmount *nmp; struct nfsnode *np; int error; struct vattr attrs; if (mp->mnt_flag & MNT_UPDATE) { nmp = VFSTONFS(mp); printf("%s: MNT_UPDATE is no longer handled here\n", __func__); free(nam, M_SONAME); return (0); } else { nmp = uma_zalloc(nfsmount_zone, M_WAITOK); bzero((caddr_t)nmp, sizeof (struct nfsmount)); TAILQ_INIT(&nmp->nm_bufq); mp->mnt_data = nmp; nmp->nm_getinfo = nfs_getnlminfo; nmp->nm_vinvalbuf = nfs_vinvalbuf; } vfs_getnewfsid(mp); nmp->nm_mountp = mp; mtx_init(&nmp->nm_mtx, "NFSmount lock", NULL, MTX_DEF); /* * V2 can only handle 32 bit filesizes. A 4GB-1 limit may be too * high, depending on whether we end up with negative offsets in * the client or server somewhere. 2GB-1 may be safer. * * For V3, nfs_fsinfo will adjust this as necessary. Assume maximum * that we can handle until we find out otherwise. */ if ((argp->flags & NFSMNT_NFSV3) == 0) nmp->nm_maxfilesize = 0xffffffffLL; else nmp->nm_maxfilesize = OFF_MAX; nmp->nm_timeo = NFS_TIMEO; nmp->nm_retry = NFS_RETRANS; if ((argp->flags & NFSMNT_NFSV3) && argp->sotype == SOCK_STREAM) { nmp->nm_wsize = nmp->nm_rsize = NFS_MAXDATA; } else { nmp->nm_wsize = NFS_WSIZE; nmp->nm_rsize = NFS_RSIZE; } nmp->nm_wcommitsize = hibufspace / (desiredvnodes / 1000); nmp->nm_readdirsize = NFS_READDIRSIZE; nmp->nm_numgrps = NFS_MAXGRPS; nmp->nm_readahead = NFS_DEFRAHEAD; nmp->nm_deadthresh = NFS_MAXDEADTHRESH; nmp->nm_nametimeo = nametimeo; nmp->nm_negnametimeo = negnametimeo; nmp->nm_tprintf_delay = nfs_tprintf_delay; if (nmp->nm_tprintf_delay < 0) nmp->nm_tprintf_delay = 0; nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay; if (nmp->nm_tprintf_initial_delay < 0) nmp->nm_tprintf_initial_delay = 0; nmp->nm_fhsize = argp->fhsize; bcopy((caddr_t)argp->fh, (caddr_t)nmp->nm_fh, argp->fhsize); bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN); nmp->nm_nam = nam; /* Set up the sockets and per-host congestion */ nmp->nm_sotype = argp->sotype; nmp->nm_soproto = argp->proto; nmp->nm_rpcops = &nfs_rpcops; nfs_decode_args(mp, nmp, argp, hst); /* * For Connection based sockets (TCP,...) defer the connect until * the first request, in case the server is not responding. */ if (nmp->nm_sotype == SOCK_DGRAM && (error = nfs_connect(nmp))) goto bad; /* * This is silly, but it has to be set so that vinifod() works. * We do not want to do an nfs_statfs() here since we can get * stuck on a dead server and we are holding a lock on the mount * point. */ mtx_lock(&nmp->nm_mtx); mp->mnt_stat.f_iosize = nfs_iosize(nmp); mtx_unlock(&nmp->nm_mtx); /* * A reference count is needed on the nfsnode representing the * remote root. If this object is not persistent, then backward * traversals of the mount point (i.e. "..") will not work if * the nfsnode gets flushed out of the cache. Ufs does not have * this problem, because one can identify root inodes by their * number == ROOTINO (2). */ error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE); if (error) goto bad; *vpp = NFSTOV(np); /* * Get file attributes and transfer parameters for the * mountpoint. This has the side effect of filling in * (*vpp)->v_type with the correct value. */ if (argp->flags & NFSMNT_NFSV3) nfs_fsinfo(nmp, *vpp, curthread->td_ucred, curthread); else VOP_GETATTR(*vpp, &attrs, curthread->td_ucred); /* * Lose the lock but keep the ref. */ VOP_UNLOCK(*vpp, 0); return (0); bad: nfs_disconnect(nmp); mtx_destroy(&nmp->nm_mtx); uma_zfree(nfsmount_zone, nmp); free(nam, M_SONAME); return (error); }
/* * Mount umap layer */ int umapfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct pathbuf *pb; struct nameidata nd; struct umap_args *args = data; struct vnode *lowerrootvp, *vp; struct umap_mount *amp; int error; #ifdef UMAPFS_DIAGNOSTIC int i; #endif if (args == NULL) return EINVAL; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { amp = MOUNTTOUMAPMOUNT(mp); if (amp == NULL) return EIO; args->la.target = NULL; args->nentries = amp->info_nentries; args->gnentries = amp->info_gnentries; *data_len = sizeof *args; return 0; } /* only for root */ error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_UMAP, NULL, NULL, NULL); if (error) return error; #ifdef UMAPFS_DIAGNOSTIC printf("umapfs_mount(mp = %p)\n", mp); #endif /* * Update is not supported */ if (mp->mnt_flag & MNT_UPDATE) return EOPNOTSUPP; /* * Find lower node */ error = pathbuf_copyin(args->umap_target, &pb); if (error) { return error; } NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, pb); if ((error = namei(&nd)) != 0) { pathbuf_destroy(pb); return error; } /* * Sanity check on lower vnode */ lowerrootvp = nd.ni_vp; pathbuf_destroy(pb); #ifdef UMAPFS_DIAGNOSTIC printf("vp = %p, check for VDIR...\n", lowerrootvp); #endif if (lowerrootvp->v_type != VDIR) { vput(lowerrootvp); return (EINVAL); } #ifdef UMAPFS_DIAGNOSTIC printf("mp = %p\n", mp); #endif amp = kmem_zalloc(sizeof(struct umap_mount), KM_SLEEP); mp->mnt_data = amp; amp->umapm_vfs = lowerrootvp->v_mount; if (amp->umapm_vfs->mnt_flag & MNT_LOCAL) mp->mnt_flag |= MNT_LOCAL; /* * Now copy in the number of entries and maps for umap mapping. */ if (args->nentries > MAPFILEENTRIES || args->gnentries > GMAPFILEENTRIES) { vput(lowerrootvp); return (error); } amp->info_nentries = args->nentries; amp->info_gnentries = args->gnentries; error = copyin(args->mapdata, amp->info_mapdata, 2*sizeof(u_long)*args->nentries); if (error) { vput(lowerrootvp); return (error); } #ifdef UMAPFS_DIAGNOSTIC printf("umap_mount:nentries %d\n",args->nentries); for (i = 0; i < args->nentries; i++) printf(" %ld maps to %ld\n", amp->info_mapdata[i][0], amp->info_mapdata[i][1]); #endif error = copyin(args->gmapdata, amp->info_gmapdata, 2*sizeof(u_long)*args->gnentries); if (error) { vput(lowerrootvp); return (error); } #ifdef UMAPFS_DIAGNOSTIC printf("umap_mount:gnentries %d\n",args->gnentries); for (i = 0; i < args->gnentries; i++) printf("\tgroup %ld maps to %ld\n", amp->info_gmapdata[i][0], amp->info_gmapdata[i][1]); #endif /* * Make sure the mount point's sufficiently initialized * that the node create call will work. */ vfs_getnewfsid(mp); amp->umapm_size = sizeof(struct umap_node); amp->umapm_tag = VT_UMAP; amp->umapm_bypass = umap_bypass; amp->umapm_vnodeop_p = umap_vnodeop_p; /* * fix up umap node for root vnode. */ VOP_UNLOCK(lowerrootvp); error = layer_node_create(mp, lowerrootvp, &vp); /* * Make sure the node alias worked */ if (error) { vrele(lowerrootvp); kmem_free(amp, sizeof(struct umap_mount)); return error; } /* * Keep a held reference to the root vnode. * It is vrele'd in umapfs_unmount. */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_vflag |= VV_ROOT; amp->umapm_rootvp = vp; VOP_UNLOCK(vp); error = set_statvfs_info(path, UIO_USERSPACE, args->umap_target, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); #ifdef UMAPFS_DIAGNOSTIC printf("umapfs_mount: lower %s, alias at %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); #endif return error; }
static int vboxvfs_mount(struct mount *mp, struct thread *td) { int rc; char *pszShare; int cbShare, cbOption; int uid = 0, gid = 0; struct sf_glob_info *pShFlGlobalInfo; SHFLSTRING *pShFlShareName = NULL; int cbShFlShareName; printf("%s: Enter\n", __FUNCTION__); if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS)) return EOPNOTSUPP; if (vfs_filteropt(mp->mnt_optnew, vboxvfs_opts)) { vfs_mount_error(mp, "%s", "Invalid option"); return EINVAL; } rc = vfs_getopt(mp->mnt_optnew, "from", (void **)&pszShare, &cbShare); if (rc || pszShare[cbShare-1] != '\0' || cbShare > 0xfffe) return EINVAL; rc = vfs_getopt(mp->mnt_optnew, "gid", (void **)&gid, &cbOption); if ((rc != ENOENT) && (rc || cbOption != sizeof(gid))) return EINVAL; rc = vfs_getopt(mp->mnt_optnew, "uid", (void **)&uid, &cbOption); if ((rc != ENOENT) && (rc || cbOption != sizeof(uid))) return EINVAL; pShFlGlobalInfo = RTMemAllocZ(sizeof(struct sf_glob_info)); if (!pShFlGlobalInfo) return ENOMEM; cbShFlShareName = offsetof (SHFLSTRING, String.utf8) + cbShare + 1; pShFlShareName = RTMemAllocZ(cbShFlShareName); if (!pShFlShareName) return VERR_NO_MEMORY; pShFlShareName->u16Length = cbShare; pShFlShareName->u16Size = cbShare + 1; memcpy (pShFlShareName->String.utf8, pszShare, cbShare + 1); rc = VbglR0SfMapFolder (&g_vboxSFClient, pShFlShareName, &pShFlGlobalInfo->map); RTMemFree(pShFlShareName); if (RT_FAILURE (rc)) { RTMemFree(pShFlGlobalInfo); printf("VbglR0SfMapFolder failed rc=%d\n", rc); return EPROTO; } pShFlGlobalInfo->uid = uid; pShFlGlobalInfo->gid = gid; mp->mnt_data = pShFlGlobalInfo; /** @todo root vnode. */ vfs_getnewfsid(mp); vfs_mountedfrom(mp, pszShare); printf("%s: Leave rc=0\n", __FUNCTION__); return 0; }
static int fuse_vfsop_mount(struct mount *mp) { int err; uint64_t mntopts, __mntopts; int max_read_set; uint32_t max_read; int daemon_timeout; int fd; size_t len; struct cdev *fdev; struct fuse_data *data; struct thread *td; struct file *fp, *fptmp; char *fspec, *subtype; struct vfsoptlist *opts; subtype = NULL; max_read_set = 0; max_read = ~0; err = 0; mntopts = 0; __mntopts = 0; td = curthread; fuse_trace_printf_vfsop(); if (mp->mnt_flag & MNT_UPDATE) return EOPNOTSUPP; mp->mnt_flag |= MNT_SYNCHRONOUS; mp->mnt_data = NULL; /* Get the new options passed to mount */ opts = mp->mnt_optnew; if (!opts) return EINVAL; /* `fspath' contains the mount point (eg. /mnt/fuse/sshfs); REQUIRED */ if (!vfs_getopts(opts, "fspath", &err)) return err; /* `from' contains the device name (eg. /dev/fuse0); REQUIRED */ fspec = vfs_getopts(opts, "from", &err); if (!fspec) return err; /* `fd' contains the filedescriptor for this session; REQUIRED */ if (vfs_scanopt(opts, "fd", "%d", &fd) != 1) return EINVAL; err = fuse_getdevice(fspec, td, &fdev); if (err != 0) return err; /* * With the help of underscored options the mount program * can inform us from the flags it sets by default */ FUSE_FLAGOPT(allow_other, FSESS_DAEMON_CAN_SPY); FUSE_FLAGOPT(push_symlinks_in, FSESS_PUSH_SYMLINKS_IN); FUSE_FLAGOPT(default_permissions, FSESS_DEFAULT_PERMISSIONS); FUSE_FLAGOPT(no_attrcache, FSESS_NO_ATTRCACHE); FUSE_FLAGOPT(no_readahed, FSESS_NO_READAHEAD); FUSE_FLAGOPT(no_datacache, FSESS_NO_DATACACHE); FUSE_FLAGOPT(no_namecache, FSESS_NO_NAMECACHE); FUSE_FLAGOPT(no_mmap, FSESS_NO_MMAP); FUSE_FLAGOPT(brokenio, FSESS_BROKENIO); if (vfs_scanopt(opts, "max_read=", "%u", &max_read) == 1) max_read_set = 1; if (vfs_scanopt(opts, "timeout=", "%u", &daemon_timeout) == 1) { if (daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT) daemon_timeout = FUSE_MIN_DAEMON_TIMEOUT; else if (daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) daemon_timeout = FUSE_MAX_DAEMON_TIMEOUT; } else { daemon_timeout = FUSE_DEFAULT_DAEMON_TIMEOUT; } subtype = vfs_getopts(opts, "subtype=", &err); FS_DEBUG2G("mntopts 0x%jx\n", (uintmax_t)mntopts); err = fget(td, fd, CAP_READ, &fp); if (err != 0) { FS_DEBUG("invalid or not opened device: data=%p\n", data); goto out; } fptmp = td->td_fpop; td->td_fpop = fp; err = devfs_get_cdevpriv((void **)&data); td->td_fpop = fptmp; fdrop(fp, td); FUSE_LOCK(); if (err != 0 || data == NULL || data->mp != NULL) { FS_DEBUG("invalid or not opened device: data=%p data.mp=%p\n", data, data != NULL ? data->mp : NULL); err = ENXIO; FUSE_UNLOCK(); goto out; } if (fdata_get_dead(data)) { FS_DEBUG("device is dead during mount: data=%p\n", data); err = ENOTCONN; FUSE_UNLOCK(); goto out; } /* Sanity + permission checks */ if (!data->daemoncred) panic("fuse daemon found, but identity unknown"); if (mntopts & FSESS_DAEMON_CAN_SPY) err = priv_check(td, PRIV_VFS_FUSE_ALLOWOTHER); if (err == 0 && td->td_ucred->cr_uid != data->daemoncred->cr_uid) /* are we allowed to do the first mount? */ err = priv_check(td, PRIV_VFS_FUSE_MOUNT_NONUSER); if (err) { FUSE_UNLOCK(); goto out; } /* We need this here as this slot is used by getnewvnode() */ mp->mnt_stat.f_iosize = PAGE_SIZE; mp->mnt_data = data; data->ref++; data->mp = mp; data->dataflags |= mntopts; data->max_read = max_read; data->daemon_timeout = daemon_timeout; #ifdef XXXIP if (!priv_check(td, PRIV_VFS_FUSE_SYNC_UNMOUNT)) data->dataflags |= FSESS_CAN_SYNC_UNMOUNT; #endif FUSE_UNLOCK(); vfs_getnewfsid(mp); mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_MPSAFE; if (subtype) { strlcat(mp->mnt_stat.f_fstypename, ".", MFSNAMELEN); strlcat(mp->mnt_stat.f_fstypename, subtype, MFSNAMELEN); } copystr(fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &len); bzero(mp->mnt_stat.f_mntfromname + len, MNAMELEN - len); FS_DEBUG2G("mp %p: %s\n", mp, mp->mnt_stat.f_mntfromname); /* Now handshaking with daemon */ fuse_internal_send_init(data, td); out: if (err) { FUSE_LOCK(); if (data->mp == mp) { /* * Destroy device only if we acquired reference to * it */ FS_DEBUG("mount failed, destroy device: data=%p mp=%p" " err=%d\n", data, mp, err); data->mp = NULL; fdata_trydestroy(data); } FUSE_UNLOCK(); dev_rel(fdev); } return err; }
/* * mp - path - addr in user space of mount point (ie /usr or whatever) * data - addr in user space of mount params */ static int nwfs_mount(struct mount *mp) { struct nwfs_args args; /* will hold data from mount request */ int error; struct nwmount *nmp = NULL; struct ncp_conn *conn = NULL; struct ncp_handle *handle = NULL; struct vnode *vp; struct thread *td; char *pc,*pe; td = curthread; if (mp->mnt_flag & MNT_ROOTFS) return (EOPNOTSUPP); if (mp->mnt_flag & MNT_UPDATE) { nwfs_printf("MNT_UPDATE not implemented"); return (EOPNOTSUPP); } error = vfs_copyopt(mp->mnt_optnew, "nwfs_args", &args, sizeof args); if (error) return (error); if (args.version != NWFS_VERSION) { nwfs_printf("mount version mismatch: kernel=%d, mount=%d\n",NWFS_VERSION,args.version); return (1); } error = ncp_conn_getbyref(args.connRef, td , td->td_ucred,NCPM_EXECUTE,&conn); if (error) { nwfs_printf("invalid connection refernce %d\n",args.connRef); return (error); } error = ncp_conn_gethandle(conn, NULL, &handle); if (error) { nwfs_printf("can't get connection handle\n"); return (error); } ncp_conn_unlock(conn, td); /* we keep the ref */ mp->mnt_stat.f_iosize = conn->buffer_size; /* We must malloc our own mount info */ nmp = malloc(sizeof(struct nwmount),M_NWFSDATA, M_WAITOK | M_USE_RESERVE | M_ZERO); if (nmp == NULL) { nwfs_printf("could not alloc nwmount\n"); error = ENOMEM; goto bad; } mp->mnt_data = nmp; nmp->connh = handle; nmp->n_root = NULL; nmp->n_id = nwfsid++; nmp->m = args; nmp->m.file_mode = (nmp->m.file_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG; nmp->m.dir_mode = (nmp->m.dir_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR; if ((error = nwfs_initnls(nmp)) != 0) goto bad; pc = mp->mnt_stat.f_mntfromname; pe = pc+sizeof(mp->mnt_stat.f_mntfromname); bzero(pc, MNAMELEN); *(pc++) = '/'; pc = index(strncpy(pc, conn->li.server, pe-pc-2),0); if (pc < pe-1) { *(pc++) = ':'; pc=index(strncpy(pc, conn->li.user, pe-pc-2),0); if (pc < pe-1) { *(pc++) = '/'; strncpy(pc, nmp->m.mounted_vol, pe-pc-2); } } /* protect against invalid mount points */ nmp->m.mount_point[sizeof(nmp->m.mount_point)-1] = '\0'; vfs_getnewfsid(mp); error = nwfs_root(mp, LK_EXCLUSIVE, &vp); if (error) goto bad; /* * Lose the lock but keep the ref. */ VOP_UNLOCK(vp, 0); NCPVODEBUG("rootvp.vrefcnt=%d\n",vrefcnt(vp)); return error; bad: if (nmp) free(nmp, M_NWFSDATA); if (handle) ncp_conn_puthandle(handle, NULL, 0); return error; }
/* * Mount null layer */ static int nullfs_mount(struct mount *mp) { int error = 0; struct vnode *lowerrootvp, *vp; struct vnode *nullm_rootvp; struct null_mount *xmp; struct thread *td = curthread; char *target; int isvnunlocked = 0, len; struct nameidata nd, *ndp = &nd; NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp); if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_NULLFS)) return (EPERM); if (mp->mnt_flag & MNT_ROOTFS) return (EOPNOTSUPP); /* * Update is a no-op */ if (mp->mnt_flag & MNT_UPDATE) { /* * Only support update mounts for NFS export. */ if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) return (0); else return (EOPNOTSUPP); } /* * Get argument */ error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len); if (error || target[len - 1] != '\0') return (EINVAL); /* * Unlock lower node to avoid possible deadlock. */ if ((mp->mnt_vnodecovered->v_op == &null_vnodeops) && VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) { VOP_UNLOCK(mp->mnt_vnodecovered, 0); isvnunlocked = 1; } /* * Find lower node */ NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target, curthread); error = namei(ndp); /* * Re-lock vnode. * XXXKIB This is deadlock-prone as well. */ if (isvnunlocked) vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY); if (error) return (error); NDFREE(ndp, NDF_ONLY_PNBUF); /* * Sanity check on lower vnode */ lowerrootvp = ndp->ni_vp; /* * Check multi null mount to avoid `lock against myself' panic. */ if (lowerrootvp == VTONULL(mp->mnt_vnodecovered)->null_lowervp) { NULLFSDEBUG("nullfs_mount: multi null mount?\n"); vput(lowerrootvp); return (EDEADLK); } xmp = (struct null_mount *) malloc(sizeof(struct null_mount), M_NULLFSMNT, M_WAITOK | M_ZERO); /* * Save reference to underlying FS */ xmp->nullm_vfs = lowerrootvp->v_mount; /* * Save reference. Each mount also holds * a reference on the root vnode. */ error = null_nodeget(mp, lowerrootvp, &vp); /* * Make sure the node alias worked */ if (error) { free(xmp, M_NULLFSMNT); return (error); } /* * Keep a held reference to the root vnode. * It is vrele'd in nullfs_unmount. */ nullm_rootvp = vp; nullm_rootvp->v_vflag |= VV_ROOT; xmp->nullm_rootvp = nullm_rootvp; /* * Unlock the node (either the lower or the alias) */ VOP_UNLOCK(vp, 0); if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) { MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); } xmp->nullm_flags |= NULLM_CACHE; if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0) xmp->nullm_flags &= ~NULLM_CACHE; MNT_ILOCK(mp); if ((xmp->nullm_flags & NULLM_CACHE) != 0) { mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag & (MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED); } mp->mnt_kern_flag |= MNTK_LOOKUP_EXCL_DOTDOT; MNT_IUNLOCK(mp); mp->mnt_data = xmp; vfs_getnewfsid(mp); if ((xmp->nullm_flags & NULLM_CACHE) != 0) { MNT_ILOCK(xmp->nullm_vfs); TAILQ_INSERT_TAIL(&xmp->nullm_vfs->mnt_uppers, mp, mnt_upper_link); MNT_IUNLOCK(xmp->nullm_vfs); } vfs_mountedfrom(mp, target); NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); return (0); }
/* * VFS Operations. * * mount system call */ static int devfs_vfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { struct devfs_mount_info info; struct devfs_mnt_data *mnt; size_t size; int error; devfs_debug(DEVFS_DEBUG_DEBUG, "(vfsops) devfs_mount() called!\n"); if (mp->mnt_flag & MNT_UPDATE) return (EOPNOTSUPP); if (data == NULL) { bzero(&info, sizeof(info)); } else { if ((error = copyin(data, &info, sizeof(info))) != 0) return (error); } mp->mnt_flag |= MNT_LOCAL; mp->mnt_kern_flag |= MNTK_NOSTKMNT | MNTK_ALL_MPSAFE; mp->mnt_data = NULL; vfs_getnewfsid(mp); size = sizeof("devfs") - 1; bcopy("devfs", mp->mnt_stat.f_mntfromname, size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); copyinstr(path, mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname) -1, &size); devfs_vfs_statfs(mp, &mp->mnt_stat, cred); /* * XXX: save other mount info passed from userland or so. */ mnt = kmalloc(sizeof(*mnt), M_DEVFS, M_WAITOK | M_ZERO); lockmgr(&devfs_lock, LK_EXCLUSIVE); mp->mnt_data = (qaddr_t)mnt; if (info.flags & DEVFS_MNT_JAIL) mnt->jailed = 1; else mnt->jailed = jailed(cred); mnt->leak_count = 0; mnt->file_count = 0; mnt->mp = mp; TAILQ_INIT(&mnt->orphan_list); mnt->root_node = devfs_allocp(Nroot, "", NULL, mp, NULL); KKASSERT(mnt->root_node); lockmgr(&devfs_lock, LK_RELEASE); vfs_add_vnodeops(mp, &devfs_vnode_norm_vops, &mp->mnt_vn_norm_ops); vfs_add_vnodeops(mp, &devfs_vnode_dev_vops, &mp->mnt_vn_spec_ops); devfs_debug(DEVFS_DEBUG_DEBUG, "calling devfs_mount_add\n"); devfs_mount_add(mnt); return (0); }
int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l, const char *devpath) { hfs_callback_args cbargs; hfs_libcb_argsopen argsopen; hfs_libcb_argsread argsread; struct hfsmount *hmp; kauth_cred_t cred; int error; cred = l ? l->l_cred : NOCRED; error = 0; hmp = NULL; /* Create mounted volume structure. */ hmp = (struct hfsmount*)malloc(sizeof(struct hfsmount), M_HFSMNT, M_WAITOK); if (hmp == NULL) { error = ENOMEM; goto error; } memset(hmp, 0, sizeof(struct hfsmount)); mp->mnt_data = hmp; mp->mnt_flag |= MNT_LOCAL; vfs_getnewfsid(mp); hmp->hm_mountp = mp; hmp->hm_dev = devvp->v_rdev; hmp->hm_devvp = devvp; /* * Use libhfs to open the volume and read the volume header and other * useful information. */ hfslib_init_cbargs(&cbargs); argsopen.cred = argsread.cred = cred; argsopen.l = argsread.l = l; argsopen.devvp = devvp; cbargs.read = (void*)&argsread; cbargs.openvol = (void*)&argsopen; if ((error = hfslib_open_volume(devpath, mp->mnt_flag & MNT_RDONLY, &hmp->hm_vol, &cbargs)) != 0) goto error; /* Make sure this is not a journaled volume whose journal is dirty. */ if (!hfslib_is_journal_clean(&hmp->hm_vol)) { printf("volume journal is dirty; not mounting\n"); error = EIO; goto error; } mp->mnt_fs_bshift = 0; while ((1 << mp->mnt_fs_bshift) < hmp->hm_vol.vh.block_size) mp->mnt_fs_bshift++; mp->mnt_dev_bshift = DEV_BSHIFT; return 0; error: if (hmp != NULL) free(hmp, M_HFSMNT); return error; }
/* * mp - path - addr in user space of mount point (ie /usr or whatever) * data - addr in user space of mount params */ static int nwfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { struct nwfs_args args; /* will hold data from mount request */ int error; struct nwmount *nmp = NULL; struct ncp_conn *conn = NULL; struct ncp_handle *handle = NULL; struct vnode *vp; char *pc,*pe; if (data == NULL) { nwfs_printf("missing data argument\n"); return 1; } if (mp->mnt_flag & MNT_UPDATE) { nwfs_printf("MNT_UPDATE not implemented"); return (EOPNOTSUPP); } error = copyin(data, (caddr_t)&args, sizeof(struct nwfs_args)); if (error) return (error); if (args.version != NWFS_VERSION) { nwfs_printf("mount version mismatch: kernel=%d, mount=%d\n",NWFS_VERSION,args.version); return (1); } error = ncp_conn_getbyref(args.connRef,curthread,cred,NCPM_EXECUTE,&conn); if (error) { nwfs_printf("invalid connection reference %d\n",args.connRef); return (error); } error = ncp_conn_gethandle(conn, NULL, &handle); if (error) { nwfs_printf("can't get connection handle\n"); return (error); } ncp_conn_unlock(conn,curthread); /* we keep the ref */ mp->mnt_stat.f_iosize = conn->buffer_size; /* We must malloc our own mount info */ nmp = kmalloc(sizeof(struct nwmount), M_NWFSDATA, M_WAITOK | M_USE_RESERVE | M_ZERO); mp->mnt_data = (qaddr_t)nmp; nmp->connh = handle; nmp->n_root = NULL; nmp->n_id = nwfsid++; nmp->m = args; nmp->m.file_mode = (nmp->m.file_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG; nmp->m.dir_mode = (nmp->m.dir_mode & (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR; if ((error = nwfs_initnls(nmp)) != 0) goto bad; pc = mp->mnt_stat.f_mntfromname; pe = pc+sizeof(mp->mnt_stat.f_mntfromname); bzero(pc, MNAMELEN); *(pc++) = '/'; pc = index(strncpy(pc, conn->li.server, pe-pc-2),0); if (pc < pe-1) { *(pc++) = ':'; pc=index(strncpy(pc, conn->li.user, pe-pc-2),0); if (pc < pe-1) { *(pc++) = '/'; strncpy(pc, nmp->m.mounted_vol, pe-pc-2); } } /* protect against invalid mount points */ nmp->m.mount_point[sizeof(nmp->m.mount_point)-1] = '\0'; vfs_add_vnodeops(mp, &nwfs_vnode_vops, &mp->mnt_vn_norm_ops); vfs_getnewfsid(mp); error = nwfs_root(mp, &vp); if (error) goto bad; /* * Lose the lock but keep the ref. */ vn_unlock(vp); NCPVODEBUG("rootvp.vrefcnt=%d\n",vp->v_sysref.refcnt); return error; bad: if (nmp) kfree(nmp, M_NWFSDATA); if (handle) ncp_conn_puthandle(handle, NULL, 0); return error; }
/* * Mount union filesystem */ int union_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; int error = 0; struct union_args *args = data; struct vnode *lowerrootvp = NULLVP; struct vnode *upperrootvp = NULLVP; struct union_mount *um = 0; const char *cp; char *xp; int len; size_t size; if (*data_len < sizeof *args) return EINVAL; #ifdef UNION_DIAGNOSTIC printf("union_mount(mp = %p)\n", mp); #endif if (mp->mnt_flag & MNT_GETARGS) { um = MOUNTTOUNIONMOUNT(mp); if (um == NULL) return EIO; args->target = NULL; args->mntflags = um->um_op; *data_len = sizeof *args; return 0; } /* * Update is a no-op */ if (mp->mnt_flag & MNT_UPDATE) { /* * Need to provide. * 1. a way to convert between rdonly and rdwr mounts. * 2. support for nfs exports. */ error = EOPNOTSUPP; goto bad; } lowerrootvp = mp->mnt_vnodecovered; vref(lowerrootvp); /* * Find upper node. */ error = namei_simple_user(args->target, NSM_FOLLOW_NOEMULROOT, &upperrootvp); if (error != 0) goto bad; if (upperrootvp->v_type != VDIR) { error = EINVAL; goto bad; } um = kmem_zalloc(sizeof(struct union_mount), KM_SLEEP); /* * Keep a held reference to the target vnodes. * They are vrele'd in union_unmount. * * Depending on the _BELOW flag, the filesystems are * viewed in a different order. In effect, this is the * same as providing a mount under option to the mount syscall. */ um->um_op = args->mntflags & UNMNT_OPMASK; switch (um->um_op) { case UNMNT_ABOVE: um->um_lowervp = lowerrootvp; um->um_uppervp = upperrootvp; break; case UNMNT_BELOW: um->um_lowervp = upperrootvp; um->um_uppervp = lowerrootvp; break; case UNMNT_REPLACE: vrele(lowerrootvp); lowerrootvp = NULLVP; um->um_uppervp = upperrootvp; um->um_lowervp = lowerrootvp; break; default: error = EINVAL; goto bad; } mp->mnt_iflag |= IMNT_MPSAFE; /* * Unless the mount is readonly, ensure that the top layer * supports whiteout operations */ if ((mp->mnt_flag & MNT_RDONLY) == 0) { vn_lock(um->um_uppervp, LK_EXCLUSIVE | LK_RETRY); error = VOP_WHITEOUT(um->um_uppervp, (struct componentname *) 0, LOOKUP); VOP_UNLOCK(um->um_uppervp); if (error) goto bad; } um->um_cred = l->l_cred; kauth_cred_hold(um->um_cred); um->um_cmode = UN_DIRMODE &~ l->l_proc->p_cwdi->cwdi_cmask; /* * Depending on what you think the MNT_LOCAL flag might mean, * you may want the && to be || on the conditional below. * At the moment it has been defined that the filesystem is * only local if it is all local, ie the MNT_LOCAL flag implies * that the entire namespace is local. If you think the MNT_LOCAL * flag implies that some of the files might be stored locally * then you will want to change the conditional. */ if (um->um_op == UNMNT_ABOVE) { if (((um->um_lowervp == NULLVP) || (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) && (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) mp->mnt_flag |= MNT_LOCAL; } /* * Copy in the upper layer's RDONLY flag. This is for the benefit * of lookup() which explicitly checks the flag, rather than asking * the filesystem for it's own opinion. This means, that an update * mount of the underlying filesystem to go from rdonly to rdwr * will leave the unioned view as read-only. */ mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY); mp->mnt_data = um; vfs_getnewfsid(mp); error = set_statvfs_info( path, UIO_USERSPACE, NULL, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); if (error) goto bad; switch (um->um_op) { case UNMNT_ABOVE: cp = "<above>:"; break; case UNMNT_BELOW: cp = "<below>:"; break; case UNMNT_REPLACE: cp = ""; break; default: cp = "<invalid>:"; #ifdef DIAGNOSTIC panic("union_mount: bad um_op"); #endif break; } len = strlen(cp); memcpy(mp->mnt_stat.f_mntfromname, cp, len); xp = mp->mnt_stat.f_mntfromname + len; len = MNAMELEN - len; (void) copyinstr(args->target, xp, len - 1, &size); memset(xp + size, 0, len - size); #ifdef UNION_DIAGNOSTIC printf("union_mount: from %s, on %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); #endif /* Setup the readdir hook if it's not set already */ if (!vn_union_readdir_hook) vn_union_readdir_hook = union_readdirhook; return (0); bad: if (um) kmem_free(um, sizeof(struct union_mount)); if (upperrootvp) vrele(upperrootvp); if (lowerrootvp) vrele(lowerrootvp); return (error); }
static int dirfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { dirfs_mount_t dmp; struct stat st; size_t done, nlen; int error; dbg(1, "called\n"); if (mp->mnt_flag & MNT_UPDATE) { dmp = VFS_TO_DIRFS(mp); if (dmp->dm_rdonly == 0 && (mp->mnt_flag & MNT_RDONLY)) { /* XXX We should make sure all writes are synced */ dmp->dm_rdonly = 1; debug(2, "dirfs read-write -> read-only\n"); } if (dmp->dm_rdonly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { debug(2, "dirfs read-only -> read-write\n"); dmp->dm_rdonly = 0; } return 0; } dmp = kmalloc(sizeof(*dmp), M_DIRFS, M_WAITOK | M_ZERO); mp->mnt_data = (qaddr_t)dmp; dmp->dm_mount = mp; error = copyinstr(data, &dmp->dm_path, MAXPATHLEN, &done); if (error) { /* Attempt to copy from kernel address */ error = copystr(data, &dmp->dm_path, MAXPATHLEN, &done); if (error) { kfree(dmp, M_DIRFS); goto failure; } } /* Strip / character at the end to avoid problems */ nlen = strnlen(dmp->dm_path, MAXPATHLEN); if (dmp->dm_path[nlen-1] == '/') dmp->dm_path[nlen-1] = 0; /* Make sure host directory exists and it is indeed a directory. */ if ((stat(dmp->dm_path, &st)) == 0) { if (!S_ISDIR(st.st_mode)) { kfree(dmp, M_DIRFS); error = EINVAL; goto failure; } } else { error = errno; goto failure; } lockinit(&dmp->dm_lock, "dfsmnt", 0, LK_CANRECURSE); vfs_add_vnodeops(mp, &dirfs_vnode_vops, &mp->mnt_vn_norm_ops); vfs_getnewfsid(mp); /* Who is running the vkernel */ dmp->dm_uid = getuid(); dmp->dm_gid = getgid(); TAILQ_INIT(&dmp->dm_fdlist); RB_INIT(&dmp->dm_inotree); kmalloc_raise_limit(M_DIRFS_NODE, 0); dirfs_statfs(mp, &mp->mnt_stat, cred); failure: KTR_LOG(dirfs_mount, (dmp->dm_path) ? dmp->dm_path : "NULL", dmp, mp, error); return error; }