/* * Free reference to umap layer */ int umapfs_unmount(struct mount *mp, int mntflags) { struct umap_mount *amp = MOUNTTOUMAPMOUNT(mp); struct vnode *rtvp = amp->umapm_rootvp; int error; int flags = 0; #ifdef UMAPFS_DIAGNOSTIC printf("umapfs_unmount(mp = %p)\n", mp); #endif if (mntflags & MNT_FORCE) flags |= FORCECLOSE; if (rtvp->v_usecount > 1 && (mntflags & MNT_FORCE) == 0) return (EBUSY); if ((error = vflush(mp, rtvp, flags)) != 0) return (error); #ifdef UMAPFS_DIAGNOSTIC vprint("alias root of lower", rtvp); #endif /* * Blow it away for future re-use */ vgone(rtvp); /* * Finally, throw away the umap_mount structure */ kmem_free(amp, sizeof(struct umap_mount)); mp->mnt_data = NULL; return 0; }
/* * Mount umap layer */ int umapfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct pathbuf *pb; struct nameidata nd; struct umap_args *args = data; struct vnode *lowerrootvp, *vp; struct umap_mount *amp; int error; #ifdef UMAPFS_DIAGNOSTIC int i; #endif if (args == NULL) return EINVAL; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { amp = MOUNTTOUMAPMOUNT(mp); if (amp == NULL) return EIO; args->la.target = NULL; args->nentries = amp->info_nentries; args->gnentries = amp->info_gnentries; *data_len = sizeof *args; return 0; } /* only for root */ error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT, KAUTH_REQ_SYSTEM_MOUNT_UMAP, NULL, NULL, NULL); if (error) return error; #ifdef UMAPFS_DIAGNOSTIC printf("umapfs_mount(mp = %p)\n", mp); #endif /* * Update is not supported */ if (mp->mnt_flag & MNT_UPDATE) return EOPNOTSUPP; /* * Find lower node */ error = pathbuf_copyin(args->umap_target, &pb); if (error) { return error; } NDINIT(&nd, LOOKUP, FOLLOW|LOCKLEAF, pb); if ((error = namei(&nd)) != 0) { pathbuf_destroy(pb); return error; } /* * Sanity check on lower vnode */ lowerrootvp = nd.ni_vp; pathbuf_destroy(pb); #ifdef UMAPFS_DIAGNOSTIC printf("vp = %p, check for VDIR...\n", lowerrootvp); #endif if (lowerrootvp->v_type != VDIR) { vput(lowerrootvp); return (EINVAL); } #ifdef UMAPFS_DIAGNOSTIC printf("mp = %p\n", mp); #endif amp = kmem_zalloc(sizeof(struct umap_mount), KM_SLEEP); mp->mnt_data = amp; amp->umapm_vfs = lowerrootvp->v_mount; if (amp->umapm_vfs->mnt_flag & MNT_LOCAL) mp->mnt_flag |= MNT_LOCAL; /* * Now copy in the number of entries and maps for umap mapping. */ if (args->nentries > MAPFILEENTRIES || args->gnentries > GMAPFILEENTRIES) { vput(lowerrootvp); return (error); } amp->info_nentries = args->nentries; amp->info_gnentries = args->gnentries; error = copyin(args->mapdata, amp->info_mapdata, 2*sizeof(u_long)*args->nentries); if (error) { vput(lowerrootvp); return (error); } #ifdef UMAPFS_DIAGNOSTIC printf("umap_mount:nentries %d\n",args->nentries); for (i = 0; i < args->nentries; i++) printf(" %ld maps to %ld\n", amp->info_mapdata[i][0], amp->info_mapdata[i][1]); #endif error = copyin(args->gmapdata, amp->info_gmapdata, 2*sizeof(u_long)*args->gnentries); if (error) { vput(lowerrootvp); return (error); } #ifdef UMAPFS_DIAGNOSTIC printf("umap_mount:gnentries %d\n",args->gnentries); for (i = 0; i < args->gnentries; i++) printf("\tgroup %ld maps to %ld\n", amp->info_gmapdata[i][0], amp->info_gmapdata[i][1]); #endif /* * Make sure the mount point's sufficiently initialized * that the node create call will work. */ vfs_getnewfsid(mp); amp->umapm_size = sizeof(struct umap_node); amp->umapm_tag = VT_UMAP; amp->umapm_bypass = umap_bypass; amp->umapm_vnodeop_p = umap_vnodeop_p; /* * fix up umap node for root vnode. */ VOP_UNLOCK(lowerrootvp); error = layer_node_create(mp, lowerrootvp, &vp); /* * Make sure the node alias worked */ if (error) { vrele(lowerrootvp); kmem_free(amp, sizeof(struct umap_mount)); return error; } /* * Keep a held reference to the root vnode. * It is vrele'd in umapfs_unmount. */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_vflag |= VV_ROOT; amp->umapm_rootvp = vp; VOP_UNLOCK(vp); error = set_statvfs_info(path, UIO_USERSPACE, args->umap_target, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); #ifdef UMAPFS_DIAGNOSTIC printf("umapfs_mount: lower %s, alias at %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); #endif return error; }
/* * This is the 08-June-1999 bypass routine. * See layer_vnops.c:layer_bypass for more details. */ int umap_bypass(void *v) { struct vop_generic_args /* { struct vnodeop_desc *a_desc; <other random data follows, presumably> } */ *ap = v; int (**our_vnodeop_p)(void *); kauth_cred_t *credpp = NULL, credp = 0; kauth_cred_t savecredp = 0, savecompcredp = 0; kauth_cred_t compcredp = 0; struct vnode **this_vp_p; int error; struct vnode *old_vps[VDESC_MAX_VPS], *vp0; struct vnode **vps_p[VDESC_MAX_VPS]; struct vnode ***vppp; struct vnodeop_desc *descp = ap->a_desc; int reles, i, flags; struct componentname **compnamepp = 0; #ifdef DIAGNOSTIC /* * We require at least one vp. */ if (descp->vdesc_vp_offsets == NULL || descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) panic("%s: no vp's in map.\n", __func__); #endif vps_p[0] = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap); vp0 = *vps_p[0]; flags = MOUNTTOUMAPMOUNT(vp0->v_mount)->umapm_flags; our_vnodeop_p = vp0->v_op; if (flags & LAYERFS_MBYPASSDEBUG) printf("%s: %s\n", __func__, descp->vdesc_name); /* * Map the vnodes going in. * Later, we'll invoke the operation based on * the first mapped vnode's operation vector. */ reles = descp->vdesc_flags; for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) break; /* bail out at end of list */ vps_p[i] = this_vp_p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap); /* * We're not guaranteed that any but the first vnode * are of our type. Check for and don't map any * that aren't. (We must always map first vp or vclean fails.) */ if (i && (*this_vp_p == NULL || (*this_vp_p)->v_op != our_vnodeop_p)) { old_vps[i] = NULL; } else { old_vps[i] = *this_vp_p; *(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p); /* * XXX - Several operations have the side effect * of vrele'ing their vp's. We must account for * that. (This should go away in the future.) */ if (reles & VDESC_VP0_WILLRELE) vref(*this_vp_p); } } /* * Fix the credentials. (That's the purpose of this layer.) */ if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) { credpp = VOPARG_OFFSETTO(kauth_cred_t*, descp->vdesc_cred_offset, ap); /* Save old values */ savecredp = *credpp; if (savecredp != NOCRED && savecredp != FSCRED) *credpp = kauth_cred_dup(savecredp); credp = *credpp; if ((flags & LAYERFS_MBYPASSDEBUG) && kauth_cred_geteuid(credp) != 0) printf("umap_bypass: user was %d, group %d\n", kauth_cred_geteuid(credp), kauth_cred_getegid(credp)); /* Map all ids in the credential structure. */ umap_mapids(vp0->v_mount, credp); if ((flags & LAYERFS_MBYPASSDEBUG) && kauth_cred_geteuid(credp) != 0) printf("umap_bypass: user now %d, group %d\n", kauth_cred_geteuid(credp), kauth_cred_getegid(credp)); }