/* ARGSUSED */ static void nm_inactive(vnode_t *vp, cred_t *crp, caller_context_t *ct) { struct namenode *nodep = VTONM(vp); vfs_t *vfsp = vp->v_vfsp; mutex_enter(&vp->v_lock); ASSERT(vp->v_count >= 1); if (--vp->v_count != 0) { mutex_exit(&vp->v_lock); return; } mutex_exit(&vp->v_lock); if (!(nodep->nm_flag & NMNMNT)) { ASSERT(nodep->nm_filep->f_vnode == nodep->nm_filevp); (void) closef(nodep->nm_filep); } vn_invalid(vp); vn_free(vp); if (vfsp != &namevfs) VFS_RELE(vfsp); namenodeno_free(nodep->nm_vattr.va_nodeid); kmem_free(nodep, sizeof (struct namenode)); }
/* * dsl_crypto_key_unload * * Remove the key from the in memory keystore. * * First we have to remove the minor node for a ZVOL or unmount * the filesystem. This is so that we flush all pending IO for it to disk * so we won't need to encrypt anything with this key. Anything in flight * should already have a lock on the keys it needs. * We can't assume that userland has already successfully unmounted the * dataset though in many cases it will have. * * If the key can't be removed return the failure back to our caller. */ int dsl_crypto_key_unload(const char *dsname) { dsl_dataset_t *ds; objset_t *os; int error; spa_t *spa; dsl_pool_t *dp; #ifdef _KERNEL dmu_objset_type_t os_type; //vfs_t *vfsp; struct vfsmount *vfsp; #endif /* _KERNEL */ error = dsl_pool_hold(dsname, FTAG, &dp); if (error != 0) return (error); /* XXX - should we use own_exclusive() here? */ if ((error = dsl_dataset_hold(dp, dsname, FTAG, &ds)) != 0) { dsl_pool_rele(dp, FTAG); return (error); } if ((error = dmu_objset_from_ds(ds, &os)) != 0) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); } #ifdef _KERNEL /* * Make sure that the device node has gone for ZVOLs * and that filesystems are umounted. */ #if 0 // FIXME os_type = dmu_objset_type(os); if (os_type == DMU_OST_ZVOL) { error = zvol_remove_minor(dsname); if (error == ENXIO) error = 0; } else if (os_type == DMU_OST_ZFS) { vfsp = zfs_get_vfs(dsname); if (vfsp != NULL) { error = vn_vfswlock(vfsp->vfs_vnodecovered); VFS_RELE(vfsp); if (error == 0) error = dounmount(vfsp, 0, CRED()); } } if (error != 0) { dsl_dataset_rele(ds, FTAG); return (error); } #endif #endif /* _KERNEL */ /* * Make sure all dbufs are synced. * * It is essential for encrypted datasets to ensure that * there is no further pending IO before removing the key. */ if (dmu_objset_is_dirty(os, 0)) // FIXME, 0? txg_wait_synced(dmu_objset_pool(os), 0); dmu_objset_evict_dbufs(os); spa = dsl_dataset_get_spa(ds); error = zcrypt_keystore_remove(spa, ds->ds_object); dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); }
/* * The disk has been changed! */ void pc_diskchanged(struct pcfs *fsp) { struct pcnode *pcp, *npcp = NULL; struct pchead *hp; struct vnode *vp; extern vfs_t EIO_vfs; struct vfs *vfsp; /* * Eliminate all pcnodes (dir & file) associated with this fs. * If the node is internal, ie, no references outside of * pcfs itself, then release the associated vnode structure. * Invalidate the in core FAT. * Invalidate cached data blocks and blocks waiting for I/O. */ PC_DPRINTF1(1, "pc_diskchanged fsp=0x%p\n", (void *)fsp); vfsp = PCFSTOVFS(fsp); for (hp = pcdhead; hp < &pcdhead[NPCHASH]; hp++) { for (pcp = hp->pch_forw; pcp != (struct pcnode *)hp; pcp = npcp) { npcp = pcp -> pc_forw; vp = PCTOV(pcp); if ((vp->v_vfsp == vfsp) && !(pcp->pc_flags & PC_RELEHOLD)) { mutex_enter(&(vp)->v_lock); if (vp->v_count > 0) { mutex_exit(&(vp)->v_lock); continue; } mutex_exit(&(vp)->v_lock); VN_HOLD(vp); remque(pcp); vp->v_data = NULL; vp->v_vfsp = &EIO_vfs; vp->v_type = VBAD; VN_RELE(vp); if (!(pcp->pc_flags & PC_EXTERNAL)) { (void) pvn_vplist_dirty(vp, (u_offset_t)0, pcfs_putapage, B_INVAL | B_TRUNC, (struct cred *)NULL); vn_free(vp); } kmem_free(pcp, sizeof (struct pcnode)); fsp->pcfs_nrefs --; VFS_RELE(vfsp); } } } for (hp = pcfhead; fsp->pcfs_frefs && hp < &pcfhead[NPCHASH]; hp++) { for (pcp = hp->pch_forw; fsp->pcfs_frefs && pcp != (struct pcnode *)hp; pcp = npcp) { npcp = pcp -> pc_forw; vp = PCTOV(pcp); if ((vp->v_vfsp == vfsp) && !(pcp->pc_flags & PC_RELEHOLD)) { mutex_enter(&(vp)->v_lock); if (vp->v_count > 0) { mutex_exit(&(vp)->v_lock); continue; } mutex_exit(&(vp)->v_lock); VN_HOLD(vp); remque(pcp); vp->v_data = NULL; vp->v_vfsp = &EIO_vfs; vp->v_type = VBAD; VN_RELE(vp); if (!(pcp->pc_flags & PC_EXTERNAL)) { (void) pvn_vplist_dirty(vp, (u_offset_t)0, pcfs_putapage, B_INVAL | B_TRUNC, (struct cred *)NULL); vn_free(vp); } kmem_free(pcp, sizeof (struct pcnode)); fsp->pcfs_frefs--; fsp->pcfs_nrefs--; VFS_RELE(vfsp); } } } #ifdef undef if (fsp->pcfs_frefs) { rw_exit(&pcnodes_lock); panic("pc_diskchanged: frefs"); } if (fsp->pcfs_nrefs) { rw_exit(&pcnodes_lock); panic("pc_diskchanged: nrefs"); } #endif if (!(vfsp->vfs_flag & VFS_UNMOUNTED) && fsp->pcfs_fatp != (uchar_t *)0) { pc_invalfat(fsp); } else { binval(fsp->pcfs_xdev); } }
void pc_rele(struct pcnode *pcp) { struct pcfs *fsp; struct vnode *vp; int err; vp = PCTOV(pcp); PC_DPRINTF1(8, "pc_rele vp=0x%p\n", (void *)vp); fsp = VFSTOPCFS(vp->v_vfsp); ASSERT(fsp->pcfs_flags & PCFS_LOCKED); rw_enter(&pcnodes_lock, RW_WRITER); pcp->pc_flags |= PC_RELEHOLD; retry: if (vp->v_type != VDIR && (pcp->pc_flags & PC_INVAL) == 0) { /* * If the file was removed while active it may be safely * truncated now. */ if (pcp->pc_entry.pcd_filename[0] == PCD_ERASED) { (void) pc_truncate(pcp, 0); } else if (pcp->pc_flags & PC_CHG) { (void) pc_nodeupdate(pcp); } err = syncpcp(pcp, B_INVAL); if (err) { (void) syncpcp(pcp, B_INVAL | B_FORCE); } } if (vn_has_cached_data(vp)) { /* * pvn_vplist_dirty will abort all old pages */ (void) pvn_vplist_dirty(vp, (u_offset_t)0, pcfs_putapage, B_INVAL, (struct cred *)NULL); } (void) pc_syncfat(fsp); mutex_enter(&vp->v_lock); if (vn_has_cached_data(vp)) { mutex_exit(&vp->v_lock); goto retry; } ASSERT(!vn_has_cached_data(vp)); vp->v_count--; /* release our hold from vn_rele */ if (vp->v_count > 0) { /* Is this check still needed? */ PC_DPRINTF1(3, "pc_rele: pcp=0x%p HELD AGAIN!\n", (void *)pcp); mutex_exit(&vp->v_lock); pcp->pc_flags &= ~PC_RELEHOLD; rw_exit(&pcnodes_lock); return; } remque(pcp); rw_exit(&pcnodes_lock); /* * XXX - old code had a check for !(pcp->pc_flags & PC_INVAL) * here. Seems superfluous/incorrect, but then earlier on PC_INVAL * was never set anywhere in PCFS. Now it is, and we _have_ to drop * the file reference here. Else, we'd screw up umount/modunload. */ if ((vp->v_type == VREG)) { fsp->pcfs_frefs--; } fsp->pcfs_nrefs--; VFS_RELE(vp->v_vfsp); if (fsp->pcfs_nrefs < 0) { panic("pc_rele: nrefs count"); } if (fsp->pcfs_frefs < 0) { panic("pc_rele: frefs count"); } mutex_exit(&vp->v_lock); vn_invalid(vp); vn_free(vp); kmem_free(pcp, sizeof (struct pcnode)); }
/* ARGSUSED */ static int zfsctl_snapdir_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp, int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, int *direntflags, pathname_t *realpnp) { zfsctl_snapdir_t *sdp = dvp->v_data; objset_t *snap; char snapname[MAXNAMELEN]; char real[MAXNAMELEN]; char *mountpoint; zfs_snapentry_t *sep, search; struct mounta margs; vfs_t *vfsp; size_t mountpoint_len; avl_index_t where; zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data; int err; /* * No extended attributes allowed under .zfs */ if (flags & LOOKUP_XATTR) return (EINVAL); ASSERT(dvp->v_type == VDIR); /* * If we get a recursive call, that means we got called * from the domount() code while it was trying to look up the * spec (which looks like a local path for zfs). We need to * add some flag to domount() to tell it not to do this lookup. */ if (MUTEX_HELD(&sdp->sd_lock)) return (ENOENT); ZFS_ENTER(zfsvfs); if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) { ZFS_EXIT(zfsvfs); return (0); } if (flags & FIGNORECASE) { boolean_t conflict = B_FALSE; err = dmu_snapshot_realname(zfsvfs->z_os, nm, real, MAXNAMELEN, &conflict); if (err == 0) { nm = real; } else if (err != ENOTSUP) { ZFS_EXIT(zfsvfs); return (err); } if (realpnp) (void) strlcpy(realpnp->pn_buf, nm, realpnp->pn_bufsize); if (conflict && direntflags) *direntflags = ED_CASE_CONFLICT; } mutex_enter(&sdp->sd_lock); search.se_name = (char *)nm; if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) { *vpp = sep->se_root; VN_HOLD(*vpp); err = traverse(vpp); if (err) { VN_RELE(*vpp); *vpp = NULL; } else if (*vpp == sep->se_root) { /* * The snapshot was unmounted behind our backs, * try to remount it. */ goto domount; } else { /* * VROOT was set during the traverse call. We need * to clear it since we're pretending to be part * of our parent's vfs. */ (*vpp)->v_flag &= ~VROOT; } mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); return (err); } /* * The requested snapshot is not currently mounted, look it up. */ err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname); if (err) { mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); /* * handle "ls *" or "?" in a graceful manner, * forcing EILSEQ to ENOENT. * Since shell ultimately passes "*" or "?" as name to lookup */ return (err == EILSEQ ? ENOENT : err); } if (dmu_objset_hold(snapname, FTAG, &snap) != 0) { mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); return (ENOENT); } sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP); sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP); (void) strcpy(sep->se_name, nm); *vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap)); avl_insert(&sdp->sd_snaps, sep, where); dmu_objset_rele(snap, FTAG); domount: mountpoint_len = strlen(refstr_value(dvp->v_vfsp->vfs_mntpt)) + strlen("/.zfs/snapshot/") + strlen(nm) + 1; mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP); (void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s", refstr_value(dvp->v_vfsp->vfs_mntpt), nm); margs.spec = snapname; margs.dir = mountpoint; margs.flags = MS_SYSSPACE | MS_NOMNTTAB; margs.fstype = "zfs"; margs.dataptr = NULL; margs.datalen = 0; margs.optptr = NULL; margs.optlen = 0; err = domount("zfs", &margs, *vpp, kcred, &vfsp); kmem_free(mountpoint, mountpoint_len); if (err == 0) { /* * Return the mounted root rather than the covered mount point. * Takes the GFS vnode at .zfs/snapshot/<snapname> and returns * the ZFS vnode mounted on top of the GFS node. This ZFS * vnode is the root of the newly created vfsp. */ VFS_RELE(vfsp); err = traverse(vpp); } if (err == 0) { /* * Fix up the root vnode mounted on .zfs/snapshot/<snapname>. * * This is where we lie about our v_vfsp in order to * make .zfs/snapshot/<snapname> accessible over NFS * without requiring manual mounts of <snapname>. */ ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs); VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs; (*vpp)->v_vfsp = zfsvfs->z_vfs; (*vpp)->v_flag &= ~VROOT; } mutex_exit(&sdp->sd_lock); ZFS_EXIT(zfsvfs); /* * If we had an error, drop our hold on the vnode and * zfsctl_snapshot_inactive() will clean up. */ if (err) { VN_RELE(*vpp); *vpp = NULL; } return (err); }
/* * gfs_file_inactive() * * Called from the VOP_INACTIVE() routine. If necessary, this routine will * remove the given vnode from the parent directory and clean up any references * in the VFS layer. * * If the vnode was not removed (due to a race with vget), then NULL is * returned. Otherwise, a pointer to the private data is returned. */ void * gfs_file_inactive(struct vnode *vp) { int i; gfs_dirent_t *ge = NULL; gfs_file_t *fp = vnode_fsnode(vp); gfs_dir_t *dp = NULL; void *data; if (!fp) return NULL; if (fp->gfs_parent == NULL /*|| (vp->v_flag & V_XATTRDIR)*/) goto found; /* * XXX cope with a FreeBSD-specific race wherein the parent's * snapshot data can be freed before the parent is */ if ((dp = vnode_fsnode(fp->gfs_parent)) == NULL) return (NULL); /* * First, see if this vnode is cached in the parent. */ gfs_dir_lock(dp); /* * Find it in the set of static entries. */ for (i = 0; i < dp->gfsd_nstatic; i++) { ge = &dp->gfsd_static[i]; if (ge->gfse_vnode == vp) goto found; } /* * If 'ge' is NULL, then it is a dynamic entry. */ ge = NULL; found: #ifdef TODO if (vp->v_flag & V_XATTRDIR) VI_LOCK(fp->gfs_parent); #endif VN_HOLD(vp); /* * Really remove this vnode */ data = vnode_fsnode(vp); if (ge != NULL) { /* * If this was a statically cached entry, simply set the * cached vnode to NULL. */ ge->gfse_vnode = NULL; } VN_RELE(vp); /* * Free vnode and release parent */ dprintf("freeing vp %p and parent %p\n", vp, fp->gfs_parent); if (fp->gfs_parent) { if (dp) gfs_dir_unlock(dp); //VOP_UNLOCK(vp, 0); VN_RELE(fp->gfs_parent); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } else { ASSERT(vp->v_vfsp != NULL); VFS_RELE(vp->v_vfsp); } #ifdef TODO if (vp->v_flag & V_XATTRDIR) VI_UNLOCK(fp->gfs_parent); #endif return (data); }
/* * getflabel - * * Return pointer to the ts_label associated with the specified file, * or returns NULL if error occurs. Caller is responsible for doing * a label_rele of the ts_label. */ ts_label_t * getflabel(vnode_t *vp) { vfs_t *vfsp, *rvfsp; vnode_t *rvp, *rvp2; zone_t *zone; ts_label_t *zl; int err; boolean_t vfs_is_held = B_FALSE; char vpath[MAXPATHLEN]; ASSERT(vp); vfsp = vp->v_vfsp; if (vfsp == NULL) return (NULL); rvp = vp; /* * Traverse lofs mounts and fattach'es to get the real vnode */ if (VOP_REALVP(rvp, &rvp2, NULL) == 0) rvp = rvp2; rvfsp = rvp->v_vfsp; /* rvp/rvfsp now represent the real vnode/vfs we will be using */ /* Go elsewhere to handle all nfs files. */ if (strncmp(vfssw[rvfsp->vfs_fstype].vsw_name, "nfs", 3) == 0) return (getflabel_nfs(rvfsp)); /* * Fast path, for objects in a labeled zone: everything except * for lofs/nfs will be just the label of that zone. */ if ((rvfsp->vfs_zone != NULL) && (rvfsp->vfs_zone != global_zone)) { if ((strcmp(vfssw[rvfsp->vfs_fstype].vsw_name, "lofs") != 0)) { zone = rvfsp->vfs_zone; zone_hold(zone); goto zone_out; /* return this label */ } } /* * Get the vnode path -- it may be missing or weird for some * cases, like devices. In those cases use the label of the * current zone. */ err = vnodetopath(rootdir, rvp, vpath, sizeof (vpath), kcred); if ((err != 0) || (*vpath != '/')) { zone = curproc->p_zone; zone_hold(zone); goto zone_out; } /* * For zfs filesystem, return the explicit label property if a * meaningful one exists. */ if (strncmp(vfssw[rvfsp->vfs_fstype].vsw_name, "zfs", 3) == 0) { ts_label_t *tsl; tsl = getflabel_zfs(rvfsp); /* if label found, return it, otherwise continue... */ if (tsl != NULL) return (tsl); } /* * If a mountpoint exists, hold the vfs while we reference it. * Otherwise if mountpoint is NULL it should not be held (e.g., * a hold/release on spec_vfs would result in an attempted free * and panic.) */ if (vfsp->vfs_mntpt != NULL) { VFS_HOLD(vfsp); vfs_is_held = B_TRUE; } zone = zone_find_by_any_path(vpath, B_FALSE); /* * If the vnode source zone is properly set to a non-global zone, or * any zone if the mount is R/W, then use the label of that zone. */ if ((zone != global_zone) || ((vfsp->vfs_flag & VFS_RDONLY) != 0)) goto zone_out; /* return this label */ /* * Otherwise, if we're not in the global zone, use the label of * our zone. */ if ((zone = curproc->p_zone) != global_zone) { zone_hold(zone); goto zone_out; /* return this label */ } /* * We're in the global zone and the mount is R/W ... so the file * may actually be in the global zone -- or in the root of any zone. * Always build our own path for the file, to be sure it's simplified * (i.e., no ".", "..", "//", and so on). */ zone_rele(zone); zone = zone_find_by_any_path(vpath, B_FALSE); zone_out: if ((curproc->p_zone == global_zone) && (zone == global_zone)) { vfs_t *nvfs; boolean_t exported = B_FALSE; refstr_t *mntpt_ref; char *mntpt; /* * File is in the global zone - check whether it's admin_high. * If it's in a filesys that was exported from the global zone, * it's admin_low by definition. Otherwise, if it's in a * filesys that's NOT exported to any zone, it's admin_high. * * And for these files if there wasn't a valid mount resource, * the file must be admin_high (not exported, probably a global * zone device). */ if (!vfs_is_held) goto out_high; mntpt_ref = vfs_getmntpoint(vfsp); mntpt = (char *)refstr_value(mntpt_ref); if ((mntpt != NULL) && (*mntpt == '/')) { zone_t *to_zone; to_zone = zone_find_by_any_path(mntpt, B_FALSE); zone_rele(to_zone); if (to_zone != global_zone) { /* force admin_low */ exported = B_TRUE; } } if (mntpt_ref) refstr_rele(mntpt_ref); if (!exported) { size_t plen = strlen(vpath); vfs_list_read_lock(); nvfs = vfsp->vfs_next; while (nvfs != vfsp) { const char *rstr; size_t rlen = 0; /* * Skip checking this vfs if it's not lofs * (the only way to export from the global * zone to a zone). */ if (strncmp(vfssw[nvfs->vfs_fstype].vsw_name, "lofs", 4) != 0) { nvfs = nvfs->vfs_next; continue; } rstr = refstr_value(nvfs->vfs_resource); if (rstr != NULL) rlen = strlen(rstr); /* * Check for a match: does this vfs correspond * to our global zone file path? I.e., check * if the resource string of this vfs is a * prefix of our path. */ if ((rlen > 0) && (rlen <= plen) && (strncmp(rstr, vpath, rlen) == 0) && (vpath[rlen] == '/' || vpath[rlen] == '\0')) { /* force admin_low */ exported = B_TRUE; break; } nvfs = nvfs->vfs_next; } vfs_list_unlock(); } if (!exported) goto out_high; } if (vfs_is_held) VFS_RELE(vfsp); /* * Now that we have the "home" zone for the file, return the slabel * of that zone. */ zl = zone->zone_slabel; label_hold(zl); zone_rele(zone); return (zl); out_high: if (vfs_is_held) VFS_RELE(vfsp); label_hold(l_admin_high); zone_rele(zone); return (l_admin_high); }
/* * gfs_file_inactive() * * Called from the VOP_INACTIVE() routine. If necessary, this routine will * remove the given vnode from the parent directory and clean up any references * in the VFS layer. * * If the vnode was not removed (due to a race with vget), then NULL is * returned. Otherwise, a pointer to the private data is returned. */ void * gfs_file_inactive(vnode_t *vp) { int i; gfs_dirent_t *ge = NULL; gfs_file_t *fp = vp->v_data; gfs_dir_t *dp = NULL; void *data; if (fp->gfs_parent == NULL || (vp->v_flag & V_XATTRDIR)) goto found; dp = fp->gfs_parent->v_data; /* * First, see if this vnode is cached in the parent. */ gfs_dir_lock(dp); /* * Find it in the set of static entries. */ for (i = 0; i < dp->gfsd_nstatic; i++) { ge = &dp->gfsd_static[i]; if (ge->gfse_vnode == vp) goto found; } /* * If 'ge' is NULL, then it is a dynamic entry. */ ge = NULL; found: if (vp->v_flag & V_XATTRDIR) { mutex_enter(&fp->gfs_parent->v_lock); } mutex_enter(&vp->v_lock); if (vp->v_count == 1) { /* * Really remove this vnode */ data = vp->v_data; if (ge != NULL) { /* * If this was a statically cached entry, simply set the * cached vnode to NULL. */ ge->gfse_vnode = NULL; } if (vp->v_flag & V_XATTRDIR) { fp->gfs_parent->v_xattrdir = NULL; mutex_exit(&fp->gfs_parent->v_lock); } mutex_exit(&vp->v_lock); /* * Free vnode and release parent */ if (fp->gfs_parent) { if (dp) { gfs_dir_unlock(dp); } VN_RELE(fp->gfs_parent); } else { ASSERT(vp->v_vfsp != NULL); VFS_RELE(vp->v_vfsp); } vn_free(vp); } else { VN_RELE_LOCKED(vp); data = NULL; mutex_exit(&vp->v_lock); if (vp->v_flag & V_XATTRDIR) { mutex_exit(&fp->gfs_parent->v_lock); } if (dp) gfs_dir_unlock(dp); } return (data); }
/* ARGSUSED */ int mount(long *lp, rval_t *rp) { vnode_t *vp = NULL; struct vfs *vfsp; /* dummy argument */ int error; struct mounta *uap; #if defined(_LP64) struct mounta native; /* * Make a struct mounta if we are DATAMODEL_LP64 */ uap = &native; uap->spec = (char *)*lp++; uap->dir = (char *)*lp++; uap->flags = (int)*lp++; uap->fstype = (char *)*lp++; uap->dataptr = (char *)*lp++; uap->datalen = (int)*lp++; uap->optptr = (char *)*lp++; uap->optlen = (int)*lp++; #else /* !defined(_LP64) */ /* * 32 bit kernels can take a shortcut and just cast * the args array to the structure. */ uap = (struct mounta *)lp; #endif /* _LP64 */ /* * Resolve second path name (mount point). */ if (error = lookupname(uap->dir, UIO_USERSPACE, FOLLOW, NULLVPP, &vp)) return (set_errno(error)); /* * Some mount flags are disallowed through the system call interface. */ uap->flags &= MS_MASK; if ((vp->v_flag & VPXFS) && ((uap->flags & MS_GLOBAL) != MS_GLOBAL)) { /* * Clustering: if we're doing a mount onto the global * namespace, and the mount is not a global mount, return * an error. */ error = ENOTSUP; } else if (uap->flags & MS_GLOBAL) { /* * Clustering: global mount specified. */ if ((cluster_bootflags & CLUSTER_BOOTED) == 0) { /* * If we're not booted as a cluster, * global mounts are not allowed. */ error = ENOTSUP; } else { error = domount("pxfs", uap, vp, CRED(), &vfsp); if (!error) VFS_RELE(vfsp); } } else { error = domount(NULL, uap, vp, CRED(), &vfsp); if (!error) VFS_RELE(vfsp); } VN_RELE(vp); rp->r_val2 = error; return (error ? set_errno(error) : 0); }
static vnode_t * make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp, struct vnodeops *vops, int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *), int *newnode, cred_t *cr) { rnode4_t *rp; rnode4_t *trp; vnode_t *vp; mntinfo4_t *mi; ASSERT(RW_READ_HELD(&rhtp->r_lock)); mi = VFTOMI4(vfsp); start: if ((rp = r4find(rhtp, fh, vfsp)) != NULL) { vp = RTOV4(rp); *newnode = 0; return (vp); } rw_exit(&rhtp->r_lock); mutex_enter(&rp4freelist_lock); if (rp4freelist != NULL && rnode4_new >= nrnode) { rp = rp4freelist; rp4_rmfree(rp); mutex_exit(&rp4freelist_lock); vp = RTOV4(rp); if (rp->r_flags & R4HASHED) { rw_enter(&rp->r_hashq->r_lock, RW_WRITER); mutex_enter(&vp->v_lock); if (vp->v_count > 1) { vp->v_count--; mutex_exit(&vp->v_lock); rw_exit(&rp->r_hashq->r_lock); rw_enter(&rhtp->r_lock, RW_READER); goto start; } mutex_exit(&vp->v_lock); rp4_rmhash_locked(rp); rw_exit(&rp->r_hashq->r_lock); } r4inactive(rp, cr); mutex_enter(&vp->v_lock); if (vp->v_count > 1) { vp->v_count--; mutex_exit(&vp->v_lock); rw_enter(&rhtp->r_lock, RW_READER); goto start; } mutex_exit(&vp->v_lock); vn_invalid(vp); /* * destroy old locks before bzero'ing and * recreating the locks below. */ uninit_rnode4(rp); /* * Make sure that if rnode is recycled then * VFS count is decremented properly before * reuse. */ VFS_RELE(vp->v_vfsp); vn_reinit(vp); } else { vnode_t *new_vp; mutex_exit(&rp4freelist_lock); rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP); new_vp = vn_alloc(KM_SLEEP); atomic_add_long((ulong_t *)&rnode4_new, 1); #ifdef DEBUG clstat4_debug.nrnode.value.ui64++; #endif vp = new_vp; } bzero(rp, sizeof (*rp)); rp->r_vnode = vp; nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL); nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL); mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL); rp->created_v4 = 0; list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t), offsetof(nfs4_open_stream_t, os_node)); rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head; rp->r_lo_head.lo_next_rnode = &rp->r_lo_head; cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL); cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL); rp->r_flags = R4READDIRWATTR; rp->r_fh = fh; rp->r_hashq = rhtp; sfh4_hold(rp->r_fh); rp->r_server = mi->mi_curr_serv; rp->r_deleg_type = OPEN_DELEGATE_NONE; rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE; nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL); rddir4_cache_create(rp); rp->r_putapage = putapage; vn_setops(vp, vops); vp->v_data = (caddr_t)rp; vp->v_vfsp = vfsp; VFS_HOLD(vfsp); vp->v_type = VNON; if (isrootfh(fh, rp)) vp->v_flag = VROOT; vn_exists(vp); /* * There is a race condition if someone else * alloc's the rnode while no locks are held, so we * check again and recover if found. */ rw_enter(&rhtp->r_lock, RW_WRITER); if ((trp = r4find(rhtp, fh, vfsp)) != NULL) { vp = RTOV4(trp); *newnode = 0; rw_exit(&rhtp->r_lock); rp4_addfree(rp, cr); rw_enter(&rhtp->r_lock, RW_READER); return (vp); } rp4_addhash(rp); *newnode = 1; return (vp); }
/* * gfs_file_inactive() * * Called from the VOP_INACTIVE() routine. If necessary, this routine will * remove the given vnode from the parent directory and clean up any references * in the VFS layer. * * If the vnode was not removed (due to a race with vget), then NULL is * returned. Otherwise, a pointer to the private data is returned. */ void * gfs_file_inactive(vnode_t *vp) { int i; gfs_dirent_t *ge = NULL; gfs_file_t *fp = vp->v_data; gfs_dir_t *dp = NULL; void *data; if (fp->gfs_parent == NULL || (vp->v_flag & V_XATTRDIR)) goto found; /* * XXX cope with a FreeBSD-specific race wherein the parent's * snapshot data can be freed before the parent is */ if ((dp = fp->gfs_parent->v_data) == NULL) return (NULL); /* * First, see if this vnode is cached in the parent. */ gfs_dir_lock(dp); /* * Find it in the set of static entries. */ for (i = 0; i < dp->gfsd_nstatic; i++) { ge = &dp->gfsd_static[i]; if (ge->gfse_vnode == vp) goto found; } /* * If 'ge' is NULL, then it is a dynamic entry. */ ge = NULL; found: if (vp->v_flag & V_XATTRDIR) VI_LOCK(fp->gfs_parent); VI_LOCK(vp); /* * Really remove this vnode */ data = vp->v_data; if (ge != NULL) { /* * If this was a statically cached entry, simply set the * cached vnode to NULL. */ ge->gfse_vnode = NULL; } VI_UNLOCK(vp); /* * Free vnode and release parent */ if (fp->gfs_parent) { if (dp) gfs_dir_unlock(dp); VI_LOCK(fp->gfs_parent); fp->gfs_parent->v_usecount--; VI_UNLOCK(fp->gfs_parent); } else { ASSERT(vp->v_vfsp != NULL); VFS_RELE(vp->v_vfsp); } if (vp->v_flag & V_XATTRDIR) VI_UNLOCK(fp->gfs_parent); return (data); }