static int null_nrename(struct vop_nrename_args *ap) { struct mount *lmp; lmp = MOUNTTONULLMOUNT(ap->a_fnch->mount)->nullm_vfs; if (lmp != MOUNTTONULLMOUNT(ap->a_tnch->mount)->nullm_vfs) return (EINVAL); ap->a_head.a_ops = lmp->mnt_vn_norm_ops; return vop_nrename_ap(ap); }
static int nullfs_readdir(struct vnop_readdir_args * ap) { struct vnode *vp, *lvp; int error; struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); /* assumption is that any vp that comes through here had to go through lookup */ lck_mtx_lock(&null_mp->nullm_lock); if (nullfs_isspecialvp(ap->a_vp)) { error = nullfs_special_readdir(ap); lck_mtx_unlock(&null_mp->nullm_lock); return error; } lck_mtx_unlock(&null_mp->nullm_lock); vp = ap->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context); vnode_put(lvp); } return error; }
static int nullfs_getattr(struct vnop_getattr_args * args) { int error; struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); lck_mtx_lock(&null_mp->nullm_lock); if (nullfs_isspecialvp(args->a_vp)) { error = nullfs_special_getattr(args); lck_mtx_unlock(&null_mp->nullm_lock); return error; } lck_mtx_unlock(&null_mp->nullm_lock); /* this will return a different inode for third than read dir will */ struct vnode * lowervp = NULLVPTOLOWERVP(args->a_vp); error = vnode_getwithref(lowervp); if (error == 0) { error = VNOP_GETATTR(lowervp, args->a_vap, args->a_context); vnode_put(lowervp); if (error == 0) { /* fix up fsid so it doesn't say the underlying fs*/ VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]); } } return error; }
/* * Mount null layer */ static int nullfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred) { int error = 0; struct null_args args; struct vnode *rootvp; struct null_mount *xmp; size_t size; struct nlookupdata nd; fhandle_t fh; NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp); /* * Get argument */ error = copyin(data, (caddr_t)&args, sizeof(struct null_args)); if (error) return (error); /* * XXX: Should we process mount export info ? * If not, returning zero here is enough as the actual ro/rw update is * being done in sys_mount(). */ if (mp->mnt_flag & MNT_UPDATE) { xmp = MOUNTTONULLMOUNT(mp); error = vfs_export(mp, &xmp->export, &args.export); return (error); }
static int null_mountctl(struct vop_mountctl_args *ap) { struct mount *mp; int error; mp = ap->a_head.a_ops->head.vv_mount; switch(ap->a_op) { case MOUNTCTL_SET_EXPORT: if (ap->a_ctllen != sizeof(struct export_args)) error = EINVAL; else error = nullfs_export(mp, ap->a_op, (const void *)ap->a_ctl); break; case MOUNTCTL_MOUNTFLAGS: error = vop_stdmountctl(ap); break; default: error = EOPNOTSUPP; break; } return (error); #if 0 ap->a_head.a_ops = MOUNTTONULLMOUNT(ap->a_nch->mount)->nullm_vfs->mnt_vn_norm_ops; return vop_mountctl_ap(ap); #endif }
static int null_nrmdir(struct vop_nrmdir_args *ap) { ap->a_head.a_ops = MOUNTTONULLMOUNT(ap->a_nch->mount)->nullm_vfs->mnt_vn_norm_ops; return vop_nrmdir_ap(ap); }
static int nullfs_root(struct mount * mp, struct vnode ** vpp, __unused vfs_context_t ctx) { struct vnode * vp; int error; NULLFSDEBUG("nullfs_root(mp = %p, vp = %p)\n", (void *)mp, (void *)MOUNTTONULLMOUNT(mp)->nullm_rootvp); /* * Return locked reference to root. */ vp = MOUNTTONULLMOUNT(mp)->nullm_rootvp; error = vnode_get(vp); if (error) return error; *vpp = vp; return 0; }
static int null_reclaim(struct vnop_reclaim_args * ap) { struct vnode * vp; struct null_node * xp; struct vnode * lowervp; struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); vp = ap->a_vp; xp = VTONULL(vp); lowervp = xp->null_lowervp; lck_mtx_lock(&null_mp->nullm_lock); vnode_removefsref(vp); if (lowervp != NULL) { /* root and second don't have a lowervp, so nothing to release and nothing * got hashed */ if (xp->null_flags & NULL_FLAG_HASHED) { /* only call this if we actually made it into the hash list. reclaim gets called also to clean up a vnode that got created when it didn't need to under race conditions */ null_hashrem(xp); } vnode_getwithref(lowervp); vnode_rele(lowervp); vnode_put(lowervp); } if (vp == null_mp->nullm_rootvp) { null_mp->nullm_rootvp = NULL; } else if (vp == null_mp->nullm_secondvp) { null_mp->nullm_secondvp = NULL; } else if (vp == null_mp->nullm_thirdcovervp) { null_mp->nullm_thirdcovervp = NULL; } lck_mtx_unlock(&null_mp->nullm_lock); cache_purge(vp); vnode_clearfsnode(vp); FREE(xp, M_TEMP); return 0; }
/* the mountpoint lock should be held going into this function */ static int nullfs_isspecialvp(struct vnode * vp) { struct null_mount * null_mp; null_mp = MOUNTTONULLMOUNT(vnode_mount(vp)); /* only check for root and second here, third is special in a different way, * related only to lookup and readdir */ if (vp && (vp == null_mp->nullm_rootvp || vp == null_mp->nullm_secondvp)) { return 1; } return 0; }
/* helper function to handle locking where possible */ static int nullfs_checkspecialvp(struct vnode* vp) { int result = 0; struct null_mount * null_mp; null_mp = MOUNTTONULLMOUNT(vnode_mount(vp)); lck_mtx_lock(&null_mp->nullm_lock); result = (nullfs_isspecialvp(vp)); lck_mtx_unlock(&null_mp->nullm_lock); return result; }
/* * Do not allow the VOP_INACTIVE to be passed to the lower layer, * since the reference count on the lower vnode is not related to * ours. */ static int null_inactive(struct vop_inactive_args *ap __unused) { struct vnode *vp; struct mount *mp; struct null_mount *xmp; vp = ap->a_vp; mp = vp->v_mount; xmp = MOUNTTONULLMOUNT(mp); if ((xmp->nullm_flags & NULLM_CACHE) == 0) { /* * If this is the last reference and caching of the * nullfs vnodes is not enabled, then free up the * vnode so as not to tie up the lower vnodes. */ vp->v_object = NULL; vrecycle(vp); } return (0); } /* * Now, the nullfs vnode and, due to the sharing lock, the lower * vnode, are exclusively locked, and we shall destroy the null vnode.
static int nullfs_vfs_getattr(struct mount * mp, struct vfs_attr * vfap, vfs_context_t ctx) { struct vnode * coveredvp = NULL; struct vfs_attr vfa; struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); vol_capabilities_attr_t capabilities; struct vfsstatfs * sp = vfs_statfs(mp); struct timespec tzero = {0, 0}; NULLFSDEBUG("%s\n", __FUNCTION__); /* Set default capabilities in case the lower file system is gone */ memset(&capabilities, 0, sizeof(capabilities)); capabilities.capabilities[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_HIDDEN_FILES; capabilities.valid[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_HIDDEN_FILES; if (nullfs_vfs_getlowerattr(vnode_mount(null_mp->nullm_lowerrootvp), &vfa, ctx) == 0) { if (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) { memcpy(&capabilities, &vfa.f_capabilities, sizeof(capabilities)); /* don't support vget */ capabilities.capabilities[VOL_CAPABILITIES_FORMAT] &= ~(VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_PATH_FROM_ID); capabilities.capabilities[VOL_CAPABILITIES_FORMAT] |= VOL_CAP_FMT_HIDDEN_FILES; /* Always support UF_HIDDEN */ capabilities.valid[VOL_CAPABILITIES_FORMAT] &= ~(VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_PATH_FROM_ID); capabilities.valid[VOL_CAPABILITIES_FORMAT] |= VOL_CAP_FMT_HIDDEN_FILES; /* Always support UF_HIDDEN */ /* dont' support interfaces that only make sense on a writable file system * or one with specific vnops implemented */ capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] = 0; capabilities.valid[VOL_CAPABILITIES_INTERFACES] &= ~(VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_READDIRATTR | VOL_CAP_INT_EXCHANGEDATA | VOL_CAP_INT_COPYFILE | VOL_CAP_INT_ALLOCATE | VOL_CAP_INT_VOL_RENAME | VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK); } } if (VFSATTR_IS_ACTIVE(vfap, f_create_time)) VFSATTR_RETURN(vfap, f_create_time, tzero); if (VFSATTR_IS_ACTIVE(vfap, f_modify_time)) VFSATTR_RETURN(vfap, f_modify_time, tzero); if (VFSATTR_IS_ACTIVE(vfap, f_access_time)) VFSATTR_RETURN(vfap, f_access_time, tzero); if (VFSATTR_IS_ACTIVE(vfap, f_bsize)) VFSATTR_RETURN(vfap, f_bsize, sp->f_bsize); if (VFSATTR_IS_ACTIVE(vfap, f_iosize)) VFSATTR_RETURN(vfap, f_iosize, sp->f_iosize); if (VFSATTR_IS_ACTIVE(vfap, f_owner)) VFSATTR_RETURN(vfap, f_owner, 0); if (VFSATTR_IS_ACTIVE(vfap, f_blocks)) VFSATTR_RETURN(vfap, f_blocks, sp->f_blocks); if (VFSATTR_IS_ACTIVE(vfap, f_bfree)) VFSATTR_RETURN(vfap, f_bfree, sp->f_bfree); if (VFSATTR_IS_ACTIVE(vfap, f_bavail)) VFSATTR_RETURN(vfap, f_bavail, sp->f_bavail); if (VFSATTR_IS_ACTIVE(vfap, f_bused)) VFSATTR_RETURN(vfap, f_bused, sp->f_bused); if (VFSATTR_IS_ACTIVE(vfap, f_files)) VFSATTR_RETURN(vfap, f_files, sp->f_files); if (VFSATTR_IS_ACTIVE(vfap, f_ffree)) VFSATTR_RETURN(vfap, f_ffree, sp->f_ffree); if (VFSATTR_IS_ACTIVE(vfap, f_fssubtype)) VFSATTR_RETURN(vfap, f_fssubtype, 0); if (VFSATTR_IS_ACTIVE(vfap, f_capabilities)) { memcpy(&vfap->f_capabilities, &capabilities, sizeof(vol_capabilities_attr_t)); VFSATTR_SET_SUPPORTED(vfap, f_capabilities); } if (VFSATTR_IS_ACTIVE(vfap, f_attributes)) { vol_attributes_attr_t * volattr = &vfap->f_attributes; volattr->validattr.commonattr = 0; volattr->validattr.volattr = ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES; volattr->validattr.dirattr = 0; volattr->validattr.fileattr = 0; volattr->validattr.forkattr = 0; volattr->nativeattr.commonattr = 0; volattr->nativeattr.volattr = ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES; volattr->nativeattr.dirattr = 0; volattr->nativeattr.fileattr = 0; volattr->nativeattr.forkattr = 0; VFSATTR_SET_SUPPORTED(vfap, f_attributes); } if (VFSATTR_IS_ACTIVE(vfap, f_vol_name)) { /* The name of the volume is the same as the directory we mounted on */ coveredvp = vfs_vnodecovered(mp); if (coveredvp) { const char * name = vnode_getname_printable(coveredvp); strlcpy(vfap->f_vol_name, name, MAXPATHLEN); vnode_putname_printable(name); VFSATTR_SET_SUPPORTED(vfap, f_vol_name); vnode_put(coveredvp); } } return 0; }
/* * Free reference to null layer */ static int nullfs_unmount(struct mount * mp, int mntflags, __unused vfs_context_t ctx) { struct null_mount * mntdata; struct vnode * vp; int error, flags; NULLFSDEBUG("nullfs_unmount: mp = %p\n", (void *)mp); /* check entitlement or superuser*/ if (!IOTaskHasEntitlement(current_task(), NULLFS_ENTITLEMENT) && vfs_context_suser(ctx) != 0) { return EPERM; } if (mntflags & MNT_FORCE) { flags = FORCECLOSE; } else { flags = 0; } mntdata = MOUNTTONULLMOUNT(mp); vp = mntdata->nullm_rootvp; // release our reference on the root before flushing. // it will get pulled out of the mount structure by reclaim vnode_getalways(vp); error = vflush(mp, vp, flags); if (error) { vnode_put(vp); return (error); } if (vnode_isinuse(vp,1) && flags == 0) { vnode_put(vp); return EBUSY; } vnode_rele(vp); // Drop reference taken by nullfs_mount vnode_put(vp); // Drop ref taken above //Force close to get rid of the last vnode (void)vflush(mp, NULL, FORCECLOSE); /* no more vnodes, so tear down the mountpoint */ lck_mtx_lock(&mntdata->nullm_lock); vfs_setfsprivate(mp, NULL); vnode_getalways(mntdata->nullm_lowerrootvp); vnode_rele(mntdata->nullm_lowerrootvp); vnode_put(mntdata->nullm_lowerrootvp); lck_mtx_unlock(&mntdata->nullm_lock); nullfs_destroy_lck(&mntdata->nullm_lock); FREE(mntdata, M_TEMP); uint64_t vflags = vfs_flags(mp); vfs_setflags(mp, vflags & ~MNT_LOCAL); return (0); }
static int nullfs_special_getattr(struct vnop_getattr_args * args) { mount_t mp = vnode_mount(args->a_vp); struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); ino_t ino = NULL_ROOT_INO; struct vnode_attr covered_rootattr; vnode_t checkvp = null_mp->nullm_lowerrootvp; VATTR_INIT(&covered_rootattr); VATTR_WANTED(&covered_rootattr, va_uid); VATTR_WANTED(&covered_rootattr, va_gid); VATTR_WANTED(&covered_rootattr, va_create_time); VATTR_WANTED(&covered_rootattr, va_modify_time); VATTR_WANTED(&covered_rootattr, va_access_time); /* prefer to get this from the lower root vp, but if not (i.e. forced unmount * of lower fs) try the mount point covered vnode */ if (vnode_getwithvid(checkvp, null_mp->nullm_lowerrootvid)) { checkvp = vfs_vnodecovered(mp); if (checkvp == NULL) { return EIO; } } int error = vnode_getattr(checkvp, &covered_rootattr, args->a_context); vnode_put(checkvp); if (error) { /* we should have been able to get attributes fore one of the two choices so * fail if we didn't */ return error; } /* we got the attributes of the vnode we cover so plow ahead */ if (args->a_vp == null_mp->nullm_secondvp) { ino = NULL_SECOND_INO; } VATTR_RETURN(args->a_vap, va_type, vnode_vtype(args->a_vp)); VATTR_RETURN(args->a_vap, va_rdev, 0); VATTR_RETURN(args->a_vap, va_nlink, 3); /* always just ., .., and the child */ VATTR_RETURN(args->a_vap, va_total_size, 0); // hoping this is ok VATTR_RETURN(args->a_vap, va_data_size, 0); // hoping this is ok VATTR_RETURN(args->a_vap, va_data_alloc, 0); VATTR_RETURN(args->a_vap, va_iosize, vfs_statfs(mp)->f_iosize); VATTR_RETURN(args->a_vap, va_fileid, ino); VATTR_RETURN(args->a_vap, va_linkid, ino); VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(mp)->f_fsid.val[0]); // return the fsid of the mount point VATTR_RETURN(args->a_vap, va_filerev, 0); VATTR_RETURN(args->a_vap, va_gen, 0); VATTR_RETURN(args->a_vap, va_flags, UF_HIDDEN); /* mark our fake directories as hidden. People shouldn't be enocouraged to poke around in them */ if (ino == NULL_SECOND_INO) { VATTR_RETURN(args->a_vap, va_parentid, NULL_ROOT_INO); /* no parent at the root, so the only other vnode that goes through this path is second and its parent is 1.*/ } if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) { /* force dr_xr_xr_x */ VATTR_RETURN(args->a_vap, va_mode, S_IFDIR | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); } if (VATTR_IS_ACTIVE(args->a_vap, va_uid)) { VATTR_RETURN(args->a_vap, va_uid, covered_rootattr.va_uid); } if (VATTR_IS_ACTIVE(args->a_vap, va_gid)) { VATTR_RETURN(args->a_vap, va_gid, covered_rootattr.va_gid); } if (VATTR_IS_ACTIVE(args->a_vap, va_create_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_create_time); args->a_vap->va_create_time.tv_sec = covered_rootattr.va_create_time.tv_sec; args->a_vap->va_create_time.tv_nsec = covered_rootattr.va_create_time.tv_nsec; } if (VATTR_IS_ACTIVE(args->a_vap, va_modify_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_modify_time); args->a_vap->va_modify_time.tv_sec = covered_rootattr.va_modify_time.tv_sec; args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_modify_time.tv_nsec; } if (VATTR_IS_ACTIVE(args->a_vap, va_access_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_access_time); args->a_vap->va_modify_time.tv_sec = covered_rootattr.va_access_time.tv_sec; args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_access_time.tv_nsec; } return 0; }
/* the mountpoint lock should be held going into this function */ static int null_special_lookup(struct vnop_lookup_args * ap) { struct componentname * cnp = ap->a_cnp; struct vnode * dvp = ap->a_dvp; struct vnode * ldvp = NULL; struct vnode * lvp = NULL; struct vnode * vp = NULL; struct mount * mp = vnode_mount(dvp); struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); int error = ENOENT; if (dvp == null_mp->nullm_rootvp) { /* handle . and .. */ if (cnp->cn_nameptr[0] == '.') { if (cnp->cn_namelen == 1 || (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.')) { /* this is the root so both . and .. give back the root */ vp = dvp; error = vnode_get(vp); goto end; } } /* our virtual wrapper directory should be d but D is acceptable if the * lower file system is case insensitive */ if (cnp->cn_namelen == 1 && (cnp->cn_nameptr[0] == 'd' || (null_mp->nullm_flags & NULLM_CASEINSENSITIVE ? cnp->cn_nameptr[0] == 'D' : 0))) { error = 0; if (null_mp->nullm_secondvp == NULL) { error = null_getnewvnode(mp, NULL, dvp, &vp, cnp, 0); if (error) { goto end; } null_mp->nullm_secondvp = vp; } else { vp = null_mp->nullm_secondvp; error = vnode_get(vp); } } } else if (dvp == null_mp->nullm_secondvp) { /* handle . and .. */ if (cnp->cn_nameptr[0] == '.') { if (cnp->cn_namelen == 1) { vp = dvp; error = vnode_get(vp); goto end; } else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { /* parent here is the root vp */ vp = null_mp->nullm_rootvp; error = vnode_get(vp); goto end; } } /* nullmp->nullm_lowerrootvp was set at mount time so don't need to lock to * access it */ /* v_name should be null terminated but cn_nameptr is not necessarily. cn_namelen is the number of characters before the null in either case */ error = vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid); if (error) { goto end; } /* We don't want to mess with case insensitivity and unicode, so the plan to check here is 1. try to get the lower root's parent 2. If we get a parent, then perform a lookup on the lower file system using the parent and the passed in cnp 3. If that worked and we got a vp, then see if the vp is lowerrootvp. If so we got a match 4. Anything else results in ENOENT. */ error = null_get_lowerparent(null_mp->nullm_lowerrootvp, &ldvp, ap->a_context); if (error == 0) { error = VNOP_LOOKUP(ldvp, &lvp, cnp, ap->a_context); vnode_put(ldvp); if (error == 0) { if (lvp == null_mp->nullm_lowerrootvp) { /* always check the hashmap for a vnode for this, the root of the * mirrored system */ error = null_nodeget(mp, lvp, dvp, &vp, cnp, 0); if (error == 0 && null_mp->nullm_thirdcovervp == NULL) { /* if nodeget succeeded then vp has an iocount*/ null_mp->nullm_thirdcovervp = vp; } } else { error = ENOENT; } vnode_put(lvp); } } vnode_put(null_mp->nullm_lowerrootvp); } end: if (error == 0) { *ap->a_vpp = vp; } return error; }
static int nullfs_special_readdir(struct vnop_readdir_args * ap) { struct vnode * vp = ap->a_vp; struct uio * uio = ap->a_uio; struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(vp)); off_t offset = uio_offset(uio); int error = ERANGE; int items = 0; ino_t ino = 0; const char * name = NULL; if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) return (EINVAL); if (offset == 0) { /* . case */ if (vp == null_mp->nullm_rootvp) { ino = NULL_ROOT_INO; } else /* only get here if vp matches nullm_rootvp or nullm_secondvp */ { ino = NULL_SECOND_INO; } error = store_entry_special(ino, ".", uio); if (error) { goto out; } offset++; items++; } if (offset == 1) { /* .. case */ /* only get here if vp matches nullm_rootvp or nullm_secondvp */ ino = NULL_ROOT_INO; error = store_entry_special(ino, "..", uio); if (error) { goto out; } offset++; items++; } if (offset == 2) { /* the directory case */ if (vp == null_mp->nullm_rootvp) { ino = NULL_SECOND_INO; name = "d"; } else /* only get here if vp matches nullm_rootvp or nullm_secondvp */ { ino = NULL_THIRD_INO; if (vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid)) { /* In this case the lower file system has been ripped out from under us, but we don't want to error out Instead we just want d to look empty. */ error = 0; goto out; } name = vnode_getname_printable(null_mp->nullm_lowerrootvp); } error = store_entry_special(ino, name, uio); if (ino == NULL_THIRD_INO) { vnode_putname_printable(name); vnode_put(null_mp->nullm_lowerrootvp); } if (error) { goto out; } offset++; items++; } out: if (error == EMSGSIZE) { error = 0; /* return success if we ran out of space, but we wanted to make sure that we didn't update offset and items incorrectly */ } uio_setoffset(uio, offset); if (ap->a_numdirent) { *ap->a_numdirent = items; } return error; }
/* * We have to carry on the locking protocol on the null layer vnodes * as we progress through the tree. We also have to enforce read-only * if this layer is mounted read-only. */ static int null_lookup(struct vnop_lookup_args * ap) { struct componentname * cnp = ap->a_cnp; struct vnode * dvp = ap->a_dvp; struct vnode *vp, *ldvp, *lvp; struct mount * mp; struct null_mount * null_mp; int error; NULLFSDEBUG("%s parent: %p component: %.*s\n", __FUNCTION__, ap->a_dvp, cnp->cn_namelen, cnp->cn_nameptr); mp = vnode_mount(dvp); /* rename and delete are not allowed. this is a read only file system */ if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME || cnp->cn_nameiop == CREATE) { return (EROFS); } null_mp = MOUNTTONULLMOUNT(mp); lck_mtx_lock(&null_mp->nullm_lock); if (nullfs_isspecialvp(dvp)) { error = null_special_lookup(ap); lck_mtx_unlock(&null_mp->nullm_lock); return error; } lck_mtx_unlock(&null_mp->nullm_lock); // . and .. handling if (cnp->cn_nameptr[0] == '.') { if (cnp->cn_namelen == 1) { vp = dvp; } else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { /* mount point crossing is handled in null_special_lookup */ vp = vnode_parent(dvp); } else { goto notdot; } error = vp ? vnode_get(vp) : ENOENT; if (error == 0) { *ap->a_vpp = vp; } return error; } notdot: ldvp = NULLVPTOLOWERVP(dvp); vp = lvp = NULL; /* * Hold ldvp. The reference on it, owned by dvp, is lost in * case of dvp reclamation. */ error = vnode_getwithref(ldvp); if (error) { return error; } error = VNOP_LOOKUP(ldvp, &lvp, cnp, ap->a_context); vnode_put(ldvp); if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { if (ldvp == lvp) { vp = dvp; error = vnode_get(vp); } else { error = null_nodeget(mp, lvp, dvp, &vp, cnp, 0); } if (error == 0) { *ap->a_vpp = vp; } } /* if we got lvp, drop the iocount from VNOP_LOOKUP */ if (lvp != NULL) { vnode_put(lvp); } return (error); }