int layerfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) { struct vnode *vp; int error; error = VFS_VGET(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, ino, &vp); if (error) { *vpp = NULL; return error; } VOP_UNLOCK(vp); error = layer_node_create(mp, vp, vpp); if (error) { vrele(vp); *vpp = NULL; return error; } error = vn_lock(*vpp, LK_EXCLUSIVE); if (error) { vrele(*vpp); *vpp = NULL; return error; } return 0; }
int layerfs_fhtovp(struct mount *mp, struct fid *fidp, struct vnode **vpp) { struct vnode *vp; int error; error = VFS_FHTOVP(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, fidp, &vp); if (error) { *vpp = NULL; return error; } VOP_UNLOCK(vp); error = layer_node_create(mp, vp, vpp); if (error) { vput(vp); *vpp = NULL; return (error); } error = vn_lock(*vpp, LK_EXCLUSIVE); if (error) { vrele(*vpp); *vpp = NULL; return error; } return 0; }
int layerfs_statvfs(struct mount *mp, struct statvfs *sbp) { struct statvfs *sbuf; int error; sbuf = kmem_zalloc(sizeof(*sbuf), KM_SLEEP); if (sbuf == NULL) { return ENOMEM; } error = VFS_STATVFS(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, sbuf); if (error) { goto done; } /* Copy across the relevant data and fake the rest. */ sbp->f_flag = sbuf->f_flag; sbp->f_bsize = sbuf->f_bsize; sbp->f_frsize = sbuf->f_frsize; sbp->f_iosize = sbuf->f_iosize; sbp->f_blocks = sbuf->f_blocks; sbp->f_bfree = sbuf->f_bfree; sbp->f_bavail = sbuf->f_bavail; sbp->f_bresvd = sbuf->f_bresvd; sbp->f_files = sbuf->f_files; sbp->f_ffree = sbuf->f_ffree; sbp->f_favail = sbuf->f_favail; sbp->f_fresvd = sbuf->f_fresvd; sbp->f_namemax = sbuf->f_namemax; copy_statvfs_info(sbp, mp); done: kmem_free(sbuf, sizeof(*sbuf)); return error; }
int layerfs_loadvnode(struct mount *mp, struct vnode *vp, const void *key, size_t key_len, const void **new_key) { struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp); struct vnode *lowervp; struct layer_node *xp; KASSERT(key_len == sizeof(struct vnode *)); memcpy(&lowervp, key, key_len); xp = kmem_alloc(lmp->layerm_size, KM_SLEEP); if (xp == NULL) return ENOMEM; /* Share the interlock with the lower node. */ mutex_obj_hold(lowervp->v_interlock); uvm_obj_setlock(&vp->v_uobj, lowervp->v_interlock); vp->v_tag = lmp->layerm_tag; vp->v_type = lowervp->v_type; vp->v_op = lmp->layerm_vnodeop_p; if (vp->v_type == VBLK || vp->v_type == VCHR) spec_node_init(vp, lowervp->v_rdev); vp->v_data = xp; xp->layer_vnode = vp; xp->layer_lowervp = lowervp; xp->layer_flags = 0; uvm_vnp_setsize(vp, 0); /* Add a reference to the lower node. */ vref(lowervp); *new_key = &xp->layer_lowervp; return 0; }
/* * VFS start. Nothing needed here - the start routine on the underlying * filesystem will have been called when that filesystem was mounted. */ int layerfs_start(struct mount *mp, int flags) { #ifdef notyet return VFS_START(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, flags); #else return 0; #endif }
/* * layer_node_create: try to find an existing layerfs vnode refering to it, * otherwise make a new vnode which contains a reference to the lower vnode. * * => Caller should lock the lower node. */ int layer_node_create(struct mount *mp, struct vnode *lowervp, struct vnode **nvpp) { struct vnode *aliasvp; struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp); mutex_enter(&lmp->layerm_hashlock); aliasvp = layer_node_find(mp, lowervp); if (aliasvp != NULL) { /* * Note: layer_node_find() has taken another reference to * the alias vnode and moved the lock holding to aliasvp. */ #ifdef LAYERFS_DIAGNOSTIC if (layerfs_debug) vprint("layer_node_create: exists", aliasvp); #endif } else { int error; mutex_exit(&lmp->layerm_hashlock); /* * Get a new vnode. Make it to reference the layer_node. * Note: aliasvp will be return with the reference held. */ error = (lmp->layerm_alloc)(mp, lowervp, &aliasvp); if (error) return error; #ifdef LAYERFS_DIAGNOSTIC if (layerfs_debug) printf("layer_node_create: create new alias vnode\n"); #endif } /* * Now that we acquired a reference on the upper vnode, release one * on the lower node. The existence of the layer_node retains one * reference to the lower node. */ vrele(lowervp); KASSERT(lowervp->v_usecount > 0); #ifdef LAYERFS_DIAGNOSTIC if (layerfs_debug) vprint("layer_node_create: alias", aliasvp); #endif *nvpp = aliasvp; return 0; }
int layerfs_root(struct mount *mp, struct vnode **vpp) { struct vnode *vp; vp = MOUNTTOLAYERMOUNT(mp)->layerm_rootvp; if (vp == NULL) { *vpp = NULL; return EINVAL; } /* * Return root vnode with locked and with a reference held. */ vref(vp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); *vpp = vp; return 0; }
/* * layer_node_find: find and return alias for lower vnode or NULL. * * => Return alias vnode referenced. if already exists. * => The layermp's hashlock must be held on entry, we will unlock on success. */ struct vnode * layer_node_find(struct mount *mp, struct vnode *lowervp) { struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp); struct layer_node_hashhead *hd; struct layer_node *a; struct vnode *vp; int error; /* * Find hash bucket and search the (two-way) linked list looking * for a layerfs node structure which is referencing the lower vnode. * If found, the increment the layer_node reference count, but NOT * the lower vnode's reference counter. */ KASSERT(mutex_owned(&lmp->layerm_hashlock)); hd = LAYER_NHASH(lmp, lowervp); loop: LIST_FOREACH(a, hd, layer_hash) { if (a->layer_lowervp != lowervp) { continue; } vp = LAYERTOV(a); if (vp->v_mount != mp) { continue; } mutex_enter(vp->v_interlock); mutex_exit(&lmp->layerm_hashlock); error = vget(vp, 0); if (error) { mutex_enter(&lmp->layerm_hashlock); goto loop; } return vp; } return NULL; }
int layerfs_quotactl(struct mount *mp, struct quotactl_args *args) { return VFS_QUOTACTL(MOUNTTOLAYERMOUNT(mp)->layerm_vfs, args); }
/* * layer_node_alloc: make a new layerfs vnode. * * => vp is the alias vnode, lowervp is the lower vnode. * => We will hold a reference to lowervp. */ int layer_node_alloc(struct mount *mp, struct vnode *lowervp, struct vnode **vpp) { struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp); struct layer_node_hashhead *hd; struct layer_node *xp; struct vnode *vp, *nvp; int error; /* Get a new vnode and share its interlock with underlying vnode. */ error = getnewvnode(lmp->layerm_tag, mp, lmp->layerm_vnodeop_p, lowervp->v_interlock, &vp); if (error) { return error; } vp->v_type = lowervp->v_type; mutex_enter(vp->v_interlock); vp->v_iflag |= VI_LAYER; mutex_exit(vp->v_interlock); xp = kmem_alloc(lmp->layerm_size, KM_SLEEP); if (xp == NULL) { ungetnewvnode(vp); return ENOMEM; } if (vp->v_type == VBLK || vp->v_type == VCHR) { spec_node_init(vp, lowervp->v_rdev); } /* * Before inserting the node into the hash, check if other thread * did not race with us. If so - return that node, destroy ours. */ mutex_enter(&lmp->layerm_hashlock); if ((nvp = layer_node_find(mp, lowervp)) != NULL) { ungetnewvnode(vp); kmem_free(xp, lmp->layerm_size); *vpp = nvp; return 0; } vp->v_data = xp; vp->v_vflag = (vp->v_vflag & ~VV_MPSAFE) | (lowervp->v_vflag & VV_MPSAFE); xp->layer_vnode = vp; xp->layer_lowervp = lowervp; xp->layer_flags = 0; /* * Insert the new node into the hash. * Add a reference to the lower node. */ vref(lowervp); hd = LAYER_NHASH(lmp, lowervp); LIST_INSERT_HEAD(hd, xp, layer_hash); uvm_vnp_setsize(vp, 0); mutex_exit(&lmp->layerm_hashlock); *vpp = vp; return 0; }
/* * Mount overlay layer */ int ov_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; int error = 0; struct overlay_args *args = data; struct vnode *lowerrootvp, *vp; struct overlay_mount *nmp; struct layer_mount *lmp; #ifdef OVERLAYFS_DIAGNOSTIC printf("ov_mount(mp = %p)\n", mp); #endif if (args == NULL) return EINVAL; if (*data_len < sizeof *args) return EINVAL; if (mp->mnt_flag & MNT_GETARGS) { lmp = MOUNTTOLAYERMOUNT(mp); if (lmp == NULL) return EIO; args->la.target = NULL; *data_len = sizeof *args; return 0; } /* * Update is not supported */ if (mp->mnt_flag & MNT_UPDATE) return EOPNOTSUPP; /* * Find lower node */ lowerrootvp = mp->mnt_vnodecovered; vref(lowerrootvp); if ((error = vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY))) { vrele(lowerrootvp); return (error); } /* * First cut at fixing up upper mount point */ nmp = kmem_zalloc(sizeof(struct overlay_mount), KM_SLEEP); mp->mnt_data = nmp; nmp->ovm_vfs = lowerrootvp->v_mount; if (nmp->ovm_vfs->mnt_flag & MNT_LOCAL) mp->mnt_flag |= MNT_LOCAL; /* * Make sure that the mount point is sufficiently initialized * that the node create call will work. */ vfs_getnewfsid(mp); nmp->ovm_size = sizeof (struct overlay_node); nmp->ovm_tag = VT_OVERLAY; nmp->ovm_bypass = layer_bypass; nmp->ovm_vnodeop_p = overlay_vnodeop_p; /* * Fix up overlay node for root vnode */ VOP_UNLOCK(lowerrootvp); error = layer_node_create(mp, lowerrootvp, &vp); /* * Make sure the fixup worked */ if (error) { vrele(lowerrootvp); kmem_free(nmp, sizeof(struct overlay_mount)); return error; } /* * Keep a held reference to the root vnode. * It is vrele'd in ov_unmount. */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_vflag |= VV_ROOT; nmp->ovm_rootvp = vp; VOP_UNLOCK(vp); error = set_statvfs_info(path, UIO_USERSPACE, args->la.target, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); #ifdef OVERLAYFS_DIAGNOSTIC printf("ov_mount: lower %s, alias at %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); #endif return error; }