static int zfs_replay_setattr(void *arg1, char *arg2, boolean_t byteswap) { zfsvfs_t *zfsvfs = (zfsvfs_t *)arg1; lr_setattr_t *lr = (lr_setattr_t *)arg2; znode_t *zp; xvattr_t xva; vattr_t *vap = &xva.xva_vattr; int error; void *start; xva_init(&xva); if (byteswap) { byteswap_uint64_array(lr, sizeof (*lr)); if ((lr->lr_mask & AT_XVATTR) && zfsvfs->z_version >= ZPL_VERSION_INITIAL) zfs_replay_swap_attrs((lr_attr_t *)(lr + 1)); } if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode, lr->lr_uid, lr->lr_gid, 0, lr->lr_foid); vap->va_size = lr->lr_size; ZFS_TIME_DECODE(&vap->va_atime, lr->lr_atime); ZFS_TIME_DECODE(&vap->va_mtime, lr->lr_mtime); /* * Fill in xvattr_t portions if necessary. */ start = (lr_setattr_t *)(lr + 1); #ifdef HAVE_ZPL if (vap->va_mask & AT_XVATTR) { zfs_replay_xvattr((lr_attr_t *)start, &xva); start = (caddr_t)start + ZIL_XVAT_SIZE(((lr_attr_t *)start)->lr_attr_masksize); } else xva.xva_vattr.va_mask &= ~AT_XVATTR; #endif /* HAVE_ZPL */ zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start, lr->lr_uid, lr->lr_gid); error = VOP_SETATTR(ZTOV(zp), vap, 0, kcred, NULL); zfs_fuid_info_free(zfsvfs->z_fuid_replay); zfsvfs->z_fuid_replay = NULL; VN_RELE(ZTOV(zp)); return (error); }
static int zfs_replay_setattr(zfsvfs_t *zfsvfs, lr_setattr_t *lr, boolean_t byteswap) { znode_t *zp; xvattr_t xva; vattr_t *vap = &xva.xva_vattr; vnode_t *vp; int error; void *start; xva_init(&xva); if (byteswap) { byteswap_uint64_array(lr, sizeof (*lr)); if ((lr->lr_mask & AT_XVATTR) && zfsvfs->z_version >= ZPL_VERSION_INITIAL) zfs_replay_swap_attrs((lr_attr_t *)(lr + 1)); } if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode, lr->lr_uid, lr->lr_gid, 0, lr->lr_foid); vap->va_size = lr->lr_size; ZFS_TIME_DECODE(&vap->va_atime, lr->lr_atime); ZFS_TIME_DECODE(&vap->va_mtime, lr->lr_mtime); /* * Fill in xvattr_t portions if necessary. */ start = (lr_setattr_t *)(lr + 1); if (vap->va_mask & AT_XVATTR) { zfs_replay_xvattr((lr_attr_t *)start, &xva); start = (caddr_t)start + ZIL_XVAT_SIZE(((lr_attr_t *)start)->lr_attr_masksize); } else xva.xva_vattr.va_mask &= ~AT_XVATTR; zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start, lr->lr_uid, lr->lr_gid); vp = ZTOV(zp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_SETATTR(vp, vap, kcred); VOP_UNLOCK(vp, 0); zfs_fuid_info_free(zfsvfs->z_fuid_replay); zfsvfs->z_fuid_replay = NULL; VN_RELE(vp); return (error); }
/* * Replaying ACLs is complicated by FUID support. * The log record may contain some optional data * to be used for replaying FUID's. These pieces * are the actual FUIDs that were created initially. * The FUID table index may no longer be valid and * during zfs_create() a new index may be assigned. * Because of this the log will contain the original * doman+rid in order to create a new FUID. * * The individual ACEs may contain an ephemeral uid/gid which is no * longer valid and will need to be replaced with an actual FUID. * */ static int zfs_replay_acl(zfsvfs_t *zfsvfs, lr_acl_t *lr, boolean_t byteswap) { ace_t *ace = (ace_t *)(lr + 1); vsecattr_t vsa; znode_t *zp; int error; if (byteswap) { byteswap_uint64_array(lr, sizeof (*lr)); zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE); if (lr->lr_fuidcnt) { byteswap_uint64_array((caddr_t)ace + ZIL_ACE_LENGTH(lr->lr_acl_bytes), lr->lr_fuidcnt * sizeof (uint64_t)); } } if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) { /* * As we can log acls out of order, it's possible the * file has been removed. In this case just drop the acl * and return success. */ if (error == ENOENT) error = 0; return (error); } bzero(&vsa, sizeof (vsa)); vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS; vsa.vsa_aclcnt = lr->lr_aclcnt; vsa.vsa_aclentp = ace; vsa.vsa_aclentsz = lr->lr_acl_bytes; vsa.vsa_aclflags = lr->lr_acl_flags; if (lr->lr_fuidcnt) { void *fuidstart = (caddr_t)ace + ZIL_ACE_LENGTH(lr->lr_acl_bytes); zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, &fuidstart, lr->lr_fuidcnt, lr->lr_domcnt, 0, 0); } error = VOP_SETSECATTR(ZTOV(zp), &vsa, 0, kcred, NULL); if (zfsvfs->z_fuid_replay) zfs_fuid_info_free(zfsvfs->z_fuid_replay); zfsvfs->z_fuid_replay = NULL; VN_RELE(ZTOV(zp)); return (error); }
static int VMBlockLookup(struct vnode *dvp, // IN: Directory to look in char *nm, // IN: Name of component to lookup in directory struct vnode **vpp, // OUT: Pointer to vnode representing found file struct pathname *pnp,// IN: Full pathname being looked up int flags, // IN: Lookup flags (see vnode.h) struct vnode *rdir, // IN: Vnode of root device struct cred *cr // IN: Credentials of caller #if OS_VFS_VERSION >= 5 , caller_context_t *ctx // IN: Caller's context , int *direntflags // IN: , struct pathname *rpnp // IN: #endif ) { struct vnode *realVp; VMBlockMountInfo *mip; int ret; Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMblockLookup: entry\n"); /* First ensure that we are looking in a directory. */ if (dvp->v_type != VDIR) { return ENOTDIR; } /* Don't invoke lookup for ourself. */ if (nm[0] == '\0' || (nm[0] == '.' && nm[1] == '\0')) { VN_HOLD(dvp); *vpp = dvp; return 0; } *vpp = NULL; /* Make sure nm exists before creating our link to it. */ mip = VPTOMIP(dvp); ret = VOP_LOOKUP(mip->redirectVnode, nm, &realVp, pnp, flags, rdir, cr #if OS_VFS_VERSION >= 5 , ctx, direntflags, rpnp #endif ); if (ret) { return ret; } ret = VMBlockVnodeGet(vpp, realVp, nm, strlen(nm), dvp, dvp->v_vfsp, FALSE); if (ret) { VN_RELE(realVp); return ret; } return 0; }
void cpr_statef_close(void) { if (C_VP) { if (!cpr_reusable_mode) (void) VOP_DUMPCTL(C_VP, DUMP_FREE, NULL, NULL); (void) VOP_CLOSE(C_VP, FWRITE, 1, (offset_t)0, CRED(), NULL); VN_RELE(C_VP); C_VP = 0; } }
int xfs_acl_vset( xfs_vnode_t *vp, void *acl, size_t size, int kind) { posix_acl_xattr_header *ext_acl = acl; xfs_acl_t *xfs_acl; int error; int basicperms = 0; /* more than std unix perms? */ if (!acl) return -EINVAL; if (!(_ACL_ALLOC(xfs_acl))) return -ENOMEM; error = posix_acl_xattr_to_xfs(ext_acl, size, xfs_acl); if (error) { _ACL_FREE(xfs_acl); return -error; } if (!xfs_acl->acl_cnt) { _ACL_FREE(xfs_acl); return 0; } VN_HOLD(vp); error = xfs_acl_allow_set(vp, kind); if (error) goto out; /* Incoming ACL exists, set file mode based on its value */ if (kind == _ACL_TYPE_ACCESS) xfs_acl_setmode(vp, xfs_acl, &basicperms); /* * If we have more than std unix permissions, set up the actual attr. * Otherwise, delete any existing attr. This prevents us from * having actual attrs for permissions that can be stored in the * standard permission bits. */ if (!basicperms) { xfs_acl_set_attr(vp, xfs_acl, kind, &error); } else { xfs_acl_vremove(vp, _ACL_TYPE_ACCESS); } out: VN_RELE(vp); _ACL_FREE(xfs_acl); return -error; }
int osi_UFSClose(struct osi_file *afile) { AFS_STATCNT(osi_Close); if (afile->vnode) { VN_RELE(afile->vnode); } osi_FreeSmallSpace(afile); return 0; }
/* * NLM_UNSHARE, NLM4_UNSHARE * * Release a DOS-style share reservation */ void nlm_do_unshare(nlm4_shareargs *argp, nlm4_shareres *resp, struct svc_req *sr) { struct nlm_globals *g; struct nlm_host *host; struct netbuf *addr; vnode_t *vp = NULL; char *netid; int error; struct shrlock shr; nlm_copy_netobj(&resp->cookie, &argp->cookie); netid = svc_getnetid(sr->rq_xprt); addr = svc_getrpccaller(sr->rq_xprt); g = zone_getspecific(nlm_zone_key, curzone); host = nlm_host_find(g, netid, addr); if (host == NULL) { resp->stat = nlm4_denied_nolocks; return; } DTRACE_PROBE3(unshare__start, struct nlm_globals *, g, struct nlm_host *, host, nlm4_shareargs *, argp); if (NLM_IN_GRACE(g)) { resp->stat = nlm4_denied_grace_period; goto out; } vp = nlm_fh_to_vp(&argp->share.fh); if (vp == NULL) { resp->stat = nlm4_stale_fh; goto out; } /* Convert to local form. */ nlm_init_shrlock(&shr, &argp->share, host); error = VOP_SHRLOCK(vp, F_UNSHARE, &shr, FREAD | FWRITE, CRED(), NULL); (void) error; resp->stat = nlm4_granted; out: DTRACE_PROBE3(unshare__end, struct nlm_globals *, g, struct nlm_host *, host, nlm4_shareres *, resp); if (vp != NULL) VN_RELE(vp); nlm_host_release(g, host); }
static void segmf_free(struct seg *seg) { struct segmf_data *data = seg->s_data; pgcnt_t npages = seg_pages(seg); kmem_free(data->map, npages * sizeof (segmf_map_t)); VN_RELE(data->vp); mutex_destroy(&data->lock); kmem_free(data, sizeof (*data)); }
static int zfs_replay_remove(zfsvfs_t *zfsvfs, lr_remove_t *lr, boolean_t byteswap) { char *name = (char *)(lr + 1); /* name follows lr_remove_t */ znode_t *dzp; struct componentname cn; vnode_t *vp; int error; int vflg = 0; if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) return (error); if (lr->lr_common.lrc_txtype & TX_CI) vflg |= FIGNORECASE; cn.cn_nameptr = name; cn.cn_namelen = strlen(name); cn.cn_nameiop = DELETE; cn.cn_flags = ISLASTCN; //cn.cn_lkflags = LK_EXCLUSIVE | LK_RETRY; cn.cn_cred = kcred; vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY); error = VOP_LOOKUP(ZTOV(dzp), &vp, &cn); if (error != 0) { VOP_UNLOCK(ZTOV(dzp)); goto fail; } error = vn_lock(vp, LK_EXCLUSIVE); if (error != 0) { VOP_UNLOCK(ZTOV(dzp)); vrele(vp); goto fail; } switch ((int)lr->lr_common.lrc_txtype) { case TX_REMOVE: error = VOP_REMOVE(ZTOV(dzp), vp, &cn /*,vflg*/); break; case TX_RMDIR: error = VOP_RMDIR(ZTOV(dzp), vp, &cn /*,vflg*/); break; default: error = ENOTSUP; } vput(vp); VOP_UNLOCK(ZTOV(dzp)); fail: VN_RELE(ZTOV(dzp)); return (error); }
STATIC int xfs_fssetdm_by_handle( xfs_mount_t *mp, void __user *arg, struct file *parfilp, struct inode *parinode) { int error; struct fsdmidata fsd; xfs_fsop_setdm_handlereq_t dmhreq; struct inode *inode; bhv_desc_t *bdp; vnode_t *vp; if (!capable(CAP_MKNOD)) return -XFS_ERROR(EPERM); if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) return -XFS_ERROR(EFAULT); error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &vp, &inode); if (error) return -error; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { VN_RELE(vp); return -XFS_ERROR(EPERM); } if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { VN_RELE(vp); return -XFS_ERROR(EFAULT); } bdp = bhv_base_unlocked(VN_BHV_HEAD(vp)); error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL); VN_RELE(vp); if (error) return -error; return 0; }
static int zfs_replay_link(void *arg1, void *arg2, boolean_t byteswap) { zfsvfs_t *zfsvfs = arg1; lr_link_t *lr = arg2; char *name = (char *)(lr + 1); /* name follows lr_link_t */ znode_t *dzp, *zp; struct componentname cn; int error; int vflg = 0; if (byteswap) byteswap_uint64_array(lr, sizeof (*lr)); if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0) return (error); if ((error = zfs_zget(zfsvfs, lr->lr_link_obj, &zp)) != 0) { VN_RELE(ZTOV(dzp)); return (error); } if (lr->lr_common.lrc_txtype & TX_CI) vflg |= FIGNORECASE; cn.cn_nameptr = name; cn.cn_cred = kcred; cn.cn_thread = curthread; cn.cn_flags = SAVENAME; vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY); vn_lock(ZTOV(zp), LK_EXCLUSIVE | LK_RETRY); error = VOP_LINK(ZTOV(dzp), ZTOV(zp), &cn /*,vflg*/); VOP_UNLOCK(ZTOV(zp), 0); VOP_UNLOCK(ZTOV(dzp), 0); VN_RELE(ZTOV(zp)); VN_RELE(ZTOV(dzp)); return (error); }
int ufs_xattr_getattrdir( vnode_t *dvp, struct inode **sip, int flags, struct cred *cr) { struct vfs *vfsp; struct inode *ip, *sdp; int error; ip = VTOI(dvp); if (flags & LOOKUP_XATTR) { if (ip && ((ip->i_oeftflag) != 0)) { vfsp = dvp->v_vfsp; error = ufs_iget(vfsp, ip->i_oeftflag, sip, cr); if (error) return (error); sdp = *sip; /* * Make sure it really is an ATTRDIR */ if ((sdp->i_mode & IFMT) != IFATTRDIR) { cmn_err(CE_NOTE, "ufs_getattrdir: inode %d" " points to attribute directory %d " "which is not an attribute directory;" "run fsck on file system", (int)ip->i_number, (int)sdp->i_number); VN_RELE(ITOV(sdp)); return (ENOENT); } ITOV(sdp)->v_type = VDIR; ITOV(sdp)->v_flag |= V_XATTRDIR; error = 0; goto out; } else if (flags & CREATE_XATTR_DIR) { error = ufs_xattrmkdir(ip, sip, 1, cr); } else { error = ENOENT; goto out; } } else if (flags & CREATE_XATTR_DIR) { error = ufs_xattrmkdir(ip, sip, 1, cr); } else { error = ENOENT; } out: return (error); }
/* * Replaying ACLs is complicated by FUID support. * The log record may contain some optional data * to be used for replaying FUID's. These pieces * are the actual FUIDs that were created initially. * The FUID table index may no longer be valid and * during zfs_create() a new index may be assigned. * Because of this the log will contain the original * doman+rid in order to create a new FUID. * * The individual ACEs may contain an ephemeral uid/gid which is no * longer valid and will need to be replaced with an actual FUID. * */ static int zfs_replay_acl(void *arg1, void *arg2, boolean_t byteswap) { zfsvfs_t *zfsvfs = arg1; lr_acl_t *lr = arg2; ace_t *ace = (ace_t *)(lr + 1); vsecattr_t vsa; znode_t *zp; vnode_t *vp; int error; if (byteswap) { byteswap_uint64_array(lr, sizeof (*lr)); zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE); if (lr->lr_fuidcnt) { byteswap_uint64_array((caddr_t)ace + ZIL_ACE_LENGTH(lr->lr_acl_bytes), lr->lr_fuidcnt * sizeof (uint64_t)); } } if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); bzero(&vsa, sizeof (vsa)); vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS; vsa.vsa_aclcnt = lr->lr_aclcnt; vsa.vsa_aclentp = ace; vsa.vsa_aclentsz = lr->lr_acl_bytes; vsa.vsa_aclflags = lr->lr_acl_flags; if (lr->lr_fuidcnt) { void *fuidstart = (caddr_t)ace + ZIL_ACE_LENGTH(lr->lr_acl_bytes); zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, &fuidstart, lr->lr_fuidcnt, lr->lr_domcnt, 0, 0); } vp = ZTOV(zp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = zfs_setsecattr(vp, &vsa, 0, kcred, NULL); VOP_UNLOCK(vp, 0); if (zfsvfs->z_fuid_replay) zfs_fuid_info_free(zfsvfs->z_fuid_replay); zfsvfs->z_fuid_replay = NULL; VN_RELE(vp); return (error); }
/* * Look up a logical name in the global zone. * Provides the ability to map the global zone's device name * to an alternate name within a zone. The primary example * is the virtual console device /dev/zcons/[zonename]/zconsole * mapped to /[zonename]/root/dev/zconsole. */ static void prof_lookup_globaldev(struct sdev_node *dir, struct sdev_node *gdir, char *name, char *rename) { int error; struct vnode *avp, *gdv, *gddv; struct sdev_node *newdv; struct vattr vattr = {0}; struct pathname pn; /* check if node already exists */ newdv = sdev_cache_lookup(dir, rename); if (newdv) { ASSERT(newdv->sdev_state != SDEV_ZOMBIE); SDEV_SIMPLE_RELE(newdv); return; } /* sanity check arguments */ if (!gdir || pn_get(name, UIO_SYSSPACE, &pn)) return; /* perform a relative lookup of the global /dev instance */ gddv = SDEVTOV(gdir); VN_HOLD(gddv); error = lookuppnvp(&pn, NULL, FOLLOW, NULLVPP, &gdv, rootdir, gddv, kcred); pn_free(&pn); if (error) { sdcmn_err10(("prof_lookup_globaldev: %s not found\n", name)); return; } ASSERT(gdv && gdv->v_type != VLNK); /* * Found the entry in global /dev, figure out attributes * by looking at backing store. Call into devfs for default. * Note, mapped device is persisted under the new name */ prof_getattr(dir, rename, gdv, &vattr, &avp, NULL); if (gdv->v_type != VDIR) { VN_RELE(gdv); gdir = NULL; } else gdir = VTOSDEV(gdv); if (prof_mknode(dir, rename, &newdv, &vattr, avp, (void *)gdir, kcred) == 0) { ASSERT(newdv->sdev_state != SDEV_ZOMBIE); SDEV_SIMPLE_RELE(newdv); } }
STATIC void linvfs_unfreeze_fs( struct super_block *sb) { vfs_t *vfsp = LINVFS_GET_VFS(sb); vnode_t *vp; int error; VFS_ROOT(vfsp, &vp, error); VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, 0, XFS_IOC_THAW, 0, error); VN_RELE(vp); }
static int VMBlockUnmount(struct vfs *vfsp, // IN: This file system int flag, // IN: Unmount flags struct cred *credp) // IN: Credentials of caller { VMBlockMountInfo *mip; int ret; Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMBlockUnmount: entry\n"); ret = secpolicy_fs_unmount(credp, vfsp); if (ret) { return ret; } mip = (VMBlockMountInfo *)vfsp->vfs_data; mutex_enter(&mip->root->v_lock); if (mip->root->v_count > 1) { mutex_exit(&mip->root->v_lock); return EBUSY; } mutex_exit(&mip->root->v_lock); VN_RELE(vfsp->vfs_vnodecovered); /* * We don't need to VN_RELE() mip->redirectVnode since it's the realVnode * for mip->root. That means when we VN_RELE() mip->root and * VMBlockInactive() is called, VMBlockVnodePut() will VN_RELE() * mip->redirectVnode for us. It's like magic, but better. */ VN_RELE(mip->root); pn_free(&mip->redirectPath); kmem_free(mip, sizeof *mip); vfsp->vfs_flag |= VFS_UNMOUNTED; return 0; }
/* * Leaf driver close entry point. We make a vnode and go through specfs in * order to obtain open close exclusions guarantees. Note that we drop * OTYP_LYR if it was specified - we are going through specfs and it provides * last close semantics (FLKYR is provided to close(9E)). */ int dev_lclose(dev_t dev, int flag, int otype, struct cred *cred) { struct vnode *vp; int error; struct vnode *cvp; char *funcname; ulong_t offset; vp = makespecvp(dev, (otype == OTYP_BLK) ? VBLK : VCHR); error = VOP_CLOSE(vp, flag | FKLYR, 1, (offset_t)0, cred, NULL); /* * Release the extra dev_lopen hold on the common vnode. We inline a * VN_RELE(cvp) call so that we can detect more dev_lclose calls than * dev_lopen calls without panic. See vn_rele. If our inline of * vn_rele called VOP_INACTIVE(cvp, CRED(), ...) we would panic on the * "release the makespecvp vnode" VN_RELE(vp) that follows - so * instead we diagnose this situation. Note that the driver has * still seen a double close(9E), but that would have occurred with * the old dev_close implementation too. */ cvp = STOV(VTOCS(vp)); mutex_enter(&cvp->v_lock); switch (cvp->v_count) { default: cvp->v_count--; break; case 0: VTOS(vp)->s_commonvp = NULL; /* avoid panic */ /*FALLTHROUGH*/ case 1: /* * The following message indicates a serious problem in the * identified driver, the driver should be fixed. If obtaining * a panic dump is needed to diagnose the driver problem then * adding "set dev_lclose_ce=3" to /etc/system will cause a * panic when this occurs. */ funcname = modgetsymname((uintptr_t)caller(), &offset); cmn_err(dev_lclose_ce, "dev_lclose: extra close of dev_t 0x%lx " "from %s`%s()", dev, mod_containing_pc(caller()), funcname ? funcname : "unknown..."); break; } mutex_exit(&cvp->v_lock); /* release the makespecvp vnode. */ VN_RELE(vp); return (error); }
/* * Add the given share in the specified server. * If the share is a disk share, smb_vfs_hold() is * invoked to ensure that there is a hold on the * corresponding file system before the share is * added to shares AVL. * * If the share is an Autohome share and it is * already in the AVL only a reference count for * that share is incremented. */ static int smb_kshare_export(smb_server_t *sv, smb_kshare_t *shr) { smb_avl_t *share_avl; smb_kshare_t *auto_shr; vnode_t *vp; int rc = 0; share_avl = &sv->sv_export.e_share_avl; if (!STYPE_ISDSK(shr->shr_type)) { if ((rc = smb_avl_add(share_avl, shr)) != 0) { cmn_err(CE_WARN, "export[%s]: failed caching (%d)", shr->shr_name, rc); } return (rc); } if ((auto_shr = smb_avl_lookup(share_avl, shr)) != NULL) { if ((auto_shr->shr_flags & SMB_SHRF_AUTOHOME) == 0) { smb_avl_release(share_avl, auto_shr); return (EEXIST); } mutex_enter(&auto_shr->shr_mutex); auto_shr->shr_autocnt++; mutex_exit(&auto_shr->shr_mutex); smb_avl_release(share_avl, auto_shr); return (0); } if ((rc = smb_server_sharevp(sv, shr->shr_path, &vp)) != 0) { cmn_err(CE_WARN, "export[%s(%s)]: failed obtaining vnode (%d)", shr->shr_name, shr->shr_path, rc); return (rc); } if ((rc = smb_vfs_hold(&sv->sv_export, vp->v_vfsp)) == 0) { if ((rc = smb_avl_add(share_avl, shr)) != 0) { cmn_err(CE_WARN, "export[%s]: failed caching (%d)", shr->shr_name, rc); smb_vfs_rele(&sv->sv_export, vp->v_vfsp); } } else { cmn_err(CE_WARN, "export[%s(%s)]: failed holding VFS (%d)", shr->shr_name, shr->shr_path, rc); } VN_RELE(vp); return (rc); }
static int zpl_xattr_get_dir(struct inode *ip, const char *name, void *value, size_t size, cred_t *cr) { struct inode *dxip = NULL; struct inode *xip = NULL; loff_t pos = 0; int error; /* Lookup the xattr directory */ error = -zfs_lookup(ip, NULL, &dxip, LOOKUP_XATTR, cr, NULL, NULL); if (error) goto out; /* Lookup a specific xattr name in the directory */ error = -zfs_lookup(dxip, (char *)name, &xip, 0, cr, NULL, NULL); if (error) goto out; if (!size) { error = i_size_read(xip); goto out; } if (size < i_size_read(xip)) { error = -ERANGE; goto out; } error = zpl_read_common(xip, value, size, &pos, UIO_SYSSPACE, 0, cr); out: if (xip) VN_RELE(xip); if (dxip) VN_RELE(dxip); return (error); }
void sv_exchange(vnode_t **vpp) { vnode_t *mvp; sv_stats.sv_exchange++; /* RTOV always returns the master vnode */ mvp = RTOV4(VTOR4(*vpp)); VN_HOLD(mvp) VN_RELE(*vpp); *vpp = mvp; }
static int zfsfuse_readlink(fuse_req_t req, fuse_ino_t ino) { vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); znode_t *znode; int error = zfs_zget(zfsvfs, ino, &znode, B_FALSE); if(error) { ZFS_EXIT(zfsvfs); /* If the inode we are trying to get was recently deleted dnode_hold_impl will return EEXIST instead of ENOENT */ return error == EEXIST ? ENOENT : error; } ASSERT(znode != NULL); vnode_t *vp = ZTOV(znode); ASSERT(vp != NULL); char buffer[PATH_MAX + 1]; iovec_t iovec; uio_t uio; uio.uio_iov = &iovec; uio.uio_iovcnt = 1; uio.uio_segflg = UIO_SYSSPACE; uio.uio_fmode = 0; uio.uio_llimit = RLIM64_INFINITY; iovec.iov_base = buffer; iovec.iov_len = sizeof(buffer) - 1; uio.uio_resid = iovec.iov_len; uio.uio_loffset = 0; cred_t cred; zfsfuse_getcred(req, &cred); error = VOP_READLINK(vp, &uio, &cred, NULL); VN_RELE(vp); ZFS_EXIT(zfsvfs); if(!error) { VERIFY(uio.uio_loffset < sizeof(buffer)); buffer[uio.uio_loffset] = '\0'; fuse_reply_readlink(req, buffer); } return error; }
/* * Replaying ACLs is complicated by FUID support. * The log record may contain some optional data * to be used for replaying FUID's. These pieces * are the actual FUIDs that were created initially. * The FUID table index may no longer be valid and * during zfs_create() a new index may be assigned. * Because of this the log will contain the original * doman+rid in order to create a new FUID. * * The individual ACEs may contain an ephemeral uid/gid which is no * longer valid and will need to be replaced with an actual FUID. * */ static int zfs_replay_acl(zfsvfs_t *zfsvfs, lr_acl_t *lr, boolean_t byteswap) { ace_t *ace = (ace_t *)(lr + 1); vsecattr_t vsa; znode_t *zp; int error; if (byteswap) { byteswap_uint64_array(lr, sizeof (*lr)); zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE); if (lr->lr_fuidcnt) { byteswap_uint64_array((caddr_t)ace + ZIL_ACE_LENGTH(lr->lr_acl_bytes), lr->lr_fuidcnt * sizeof (uint64_t)); } } if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) return (error); #ifdef TODO bzero(&vsa, sizeof (vsa)); vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS; vsa.vsa_aclcnt = lr->lr_aclcnt; vsa.vsa_aclentp = ace; vsa.vsa_aclentsz = lr->lr_acl_bytes; vsa.vsa_aclflags = lr->lr_acl_flags; if (lr->lr_fuidcnt) { void *fuidstart = (caddr_t)ace + ZIL_ACE_LENGTH(lr->lr_acl_bytes); zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart, &fuidstart, lr->lr_fuidcnt, lr->lr_domcnt, 0, 0); } error = VOP_SETSECATTR(ZTOV(zp), &vsa, 0, kcred, NULL); if (zfsvfs->z_fuid_replay) zfs_fuid_info_free(zfsvfs->z_fuid_replay); #else error = EOPNOTSUPP; #endif zfsvfs->z_fuid_replay = NULL; VN_RELE(ZTOV(zp)); return (error); }
/* * Free a list of visible directories */ void free_visible(struct exp_visible *head) { struct exp_visible *visp, *next; for (visp = head; visp; visp = next) { if (visp->vis_vp != NULL) VN_RELE(visp->vis_vp); next = visp->vis_next; srv_secinfo_list_free(visp->vis_secinfo, visp->vis_seccnt); kmem_free(visp, sizeof (*visp)); } }
/* * Last chance for a zone to see a node. If our parent dir is * SDEV_ZONED, then we look up the "zone" property for the node. If the * property is found and matches the current zone name, we allow it. * Note that this isn't quite correct for the global zone peeking inside * a zone's /dev - for that to work, we'd have to have a per-dev-mount * zone ref squirreled away. */ static int prof_zone_matched(char *name, struct sdev_node *dir) { vnode_t *gvn = SDEVTOV(dir->sdev_origin); struct pathname pn; vnode_t *vn = NULL; char zonename[ZONENAME_MAX]; int znlen = ZONENAME_MAX; int ret; ASSERT((dir->sdev_flags & SDEV_ZONED) != 0); sdcmn_err10(("sdev_node %p is zoned, looking for %s\n", (void *)dir, name)); if (pn_get(name, UIO_SYSSPACE, &pn)) return (0); VN_HOLD(gvn); ret = lookuppnvp(&pn, NULL, FOLLOW, NULLVPP, &vn, rootdir, gvn, kcred); pn_free(&pn); if (ret != 0) { sdcmn_err10(("prof_zone_matched: %s not found\n", name)); return (0); } /* * VBLK doesn't matter, and the property name is in fact treated * as a const char *. */ ret = e_ddi_getlongprop_buf(vn->v_rdev, VBLK, (char *)"zone", DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, (caddr_t)zonename, &znlen); VN_RELE(vn); if (ret == DDI_PROP_NOT_FOUND) { sdcmn_err10(("vnode %p: no zone prop\n", (void *)vn)); return (0); } else if (ret != DDI_PROP_SUCCESS) { sdcmn_err10(("vnode %p: zone prop error: %d\n", (void *)vn, ret)); return (0); } sdcmn_err10(("vnode %p zone prop: %s\n", (void *)vn, zonename)); return (strcmp(zonename, curproc->p_zone->zone_name) == 0); }
/* * Free the storage obtained from lxpr_getnode(). */ void lxpr_freenode(lxpr_node_t *lxpnp) { ASSERT(lxpnp != NULL); ASSERT(LXPTOV(lxpnp) != NULL); /* * delete any association with realvp */ if (lxpnp->lxpr_realvp != NULL) VN_RELE(lxpnp->lxpr_realvp); /* * delete any association with parent vp */ if (lxpnp->lxpr_parent != NULL) VN_RELE(lxpnp->lxpr_parent); /* * Release the lxprnode. */ kmem_cache_free(lxpr_node_cache, lxpnp); }
static void spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl) { size_t buflen; char *buf; vnode_t *vp; int oflags = FWRITE | FTRUNC | FCREAT | FOFFMAX; char tempname[128]; /* * If the nvlist is empty (NULL), then remove the old cachefile. */ if (nvl == NULL) { (void) vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE); return; } /* * Pack the configuration into a buffer. */ VERIFY(nvlist_size(nvl, &buflen, NV_ENCODE_XDR) == 0); buf = kmem_alloc(buflen, KM_SLEEP); VERIFY(nvlist_pack(nvl, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0); /* * Write the configuration to disk. We need to do the traditional * 'write to temporary file, sync, move over original' to make sure we * always have a consistent view of the data. */ (void) snprintf(tempname, sizeof (tempname), "%s.tmp", dp->scd_path); if (vn_open(tempname, UIO_SYSSPACE, oflags, 0644, &vp, CRCREAT, 0) != 0) goto out; if (vn_rdwr(UIO_WRITE, vp, buf, buflen, 0, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, NULL) == 0 && VOP_FSYNC(vp, FSYNC, kcred, NULL) == 0) { (void) vn_rename(tempname, dp->scd_path, UIO_SYSSPACE); } (void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL); VN_RELE(vp); out: (void) vn_remove(tempname, UIO_SYSSPACE, RMFILE); kmem_free(buf, buflen); }
int xfs_acl_vget( xfs_vnode_t *vp, void *acl, size_t size, int kind) { int error; xfs_acl_t *xfs_acl = NULL; posix_acl_xattr_header *ext_acl = acl; int flags = 0; VN_HOLD(vp); if(size) { if (!(_ACL_ALLOC(xfs_acl))) { error = ENOMEM; goto out; } memset(xfs_acl, 0, sizeof(xfs_acl_t)); } else flags = ATTR_KERNOVAL; xfs_acl_get_attr(vp, xfs_acl, kind, flags, &error); if (error) goto out; if (!size) { error = -posix_acl_xattr_size(XFS_ACL_MAX_ENTRIES); } else { if (xfs_acl_invalid(xfs_acl)) { error = EINVAL; goto out; } if (kind == _ACL_TYPE_ACCESS) { xfs_vattr_t va; va.va_mask = XFS_AT_MODE; XVOP_GETATTR(vp, &va, 0, sys_cred, error); if (error) goto out; xfs_acl_sync_mode(va.va_mode, xfs_acl); } error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); } out: VN_RELE(vp); if(xfs_acl) _ACL_FREE(xfs_acl); return -error; }
/* ARGSUSED */ static void xattr_dir_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) { xattr_dir_t *dp; dp = gfs_dir_inactive(vp); /* will track v_count */ if (dp != NULL) { /* vp was freed */ if (dp->xattr_realvp != NULL) VN_RELE(dp->xattr_realvp); kmem_free(dp, ((gfs_file_t *)dp)->gfs_size); } }
/* * Clean up any znodes that had no links when we either crashed or * (force) umounted the file system. */ void zfs_unlinked_drain(zfsvfs_t *zfsvfs) { zap_cursor_t zc; zap_attribute_t zap; dmu_object_info_t doi; znode_t *zp; int error; printf("ZFS: unlinked drain\n"); /* * Interate over the contents of the unlinked set. */ for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_unlinkedobj); zap_cursor_retrieve(&zc, &zap) == 0; zap_cursor_advance(&zc)) { /* * See what kind of object we have in list */ error = dmu_object_info(zfsvfs->z_os, zap.za_first_integer, &doi); if (error != 0) continue; ASSERT((doi.doi_type == DMU_OT_PLAIN_FILE_CONTENTS) || (doi.doi_type == DMU_OT_DIRECTORY_CONTENTS)); /* * We need to re-mark these list entries for deletion, * so we pull them back into core and set zp->z_unlinked. */ error = zfs_zget(zfsvfs, zap.za_first_integer, &zp); /* * We may pick up znodes that are already marked for deletion. * This could happen during the purge of an extended attribute * directory. All we need to do is skip over them, since they * are already in the system marked z_unlinked. */ if (error != 0) continue; zp->z_unlinked = B_TRUE; VN_RELE(ZTOV(zp)); } zap_cursor_fini(&zc); printf("ZFS: unlinked drain completed.\n"); }