/* * We need to process our own vnode unlock and then clear the * interlock flag as it applies only to our vnode, not the * vnodes below us on the stack. */ static int null_unlock(struct vop_unlock_args *ap) { struct vnode *vp = ap->a_vp; int flags = ap->a_flags; int mtxlkflag = 0; struct null_node *nn; struct vnode *lvp; int error; if ((flags & LK_INTERLOCK) != 0) mtxlkflag = 1; else if (mtx_owned(VI_MTX(vp)) == 0) { VI_LOCK(vp); mtxlkflag = 2; } nn = VTONULL(vp); if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { VI_LOCK_FLAGS(lvp, MTX_DUPOK); flags |= LK_INTERLOCK; vholdl(lvp); VI_UNLOCK(vp); error = VOP_UNLOCK(lvp, flags); vdrop(lvp); if (mtxlkflag == 0) VI_LOCK(vp); } else { if (mtxlkflag == 2) VI_UNLOCK(vp); error = vop_stdunlock(ap); } return (error); }
/* * Get the cached vnode. */ static struct vnode * unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp, struct vnode *dvp, char *path) { struct unionfs_node_hashhead *hd; struct unionfs_node *unp; struct vnode *vp; KASSERT((uvp == NULLVP || uvp->v_type == VDIR), ("unionfs_get_cached_vnode: v_type != VDIR")); KASSERT((lvp == NULLVP || lvp->v_type == VDIR), ("unionfs_get_cached_vnode: v_type != VDIR")); VI_LOCK(dvp); hd = unionfs_get_hashhead(dvp, path); LIST_FOREACH(unp, hd, un_hash) { if (!strcmp(unp->un_path, path)) { vp = UNIONFSTOV(unp); VI_LOCK_FLAGS(vp, MTX_DUPOK); VI_UNLOCK(dvp); vp->v_iflag &= ~VI_OWEINACT; if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) { VI_UNLOCK(vp); vp = NULLVP; } else VI_UNLOCK(vp); return (vp); } } VI_UNLOCK(dvp); return (NULLVP); }
/* ARGSUSED */ static int null_getwritemount(struct vop_getwritemount_args *ap) { struct null_node *xp; struct vnode *lowervp; struct vnode *vp; vp = ap->a_vp; VI_LOCK(vp); xp = VTONULL(vp); if (xp && (lowervp = xp->null_lowervp)) { VI_LOCK_FLAGS(lowervp, MTX_DUPOK); VI_UNLOCK(vp); vholdl(lowervp); VI_UNLOCK(lowervp); VOP_GETWRITEMOUNT(lowervp, ap->a_mpp); vdrop(lowervp); } else { VI_UNLOCK(vp); *(ap->a_mpp) = NULL; } return (0); }
/* * We need to process our own vnode lock and then clear the * interlock flag as it applies only to our vnode, not the * vnodes below us on the stack. */ static int null_lock(struct vop_lock1_args *ap) { struct vnode *vp = ap->a_vp; int flags = ap->a_flags; struct null_node *nn; struct vnode *lvp; int error; if ((flags & LK_INTERLOCK) == 0) { VI_LOCK(vp); ap->a_flags = flags |= LK_INTERLOCK; } nn = VTONULL(vp); /* * If we're still active we must ask the lower layer to * lock as ffs has special lock considerations in it's * vop lock. */ if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) { VI_LOCK_FLAGS(lvp, MTX_DUPOK); VI_UNLOCK(vp); /* * We have to hold the vnode here to solve a potential * reclaim race. If we're forcibly vgone'd while we * still have refs, a thread could be sleeping inside * the lowervp's vop_lock routine. When we vgone we will * drop our last ref to the lowervp, which would allow it * to be reclaimed. The lowervp could then be recycled, * in which case it is not legal to be sleeping in it's VOP. * We prevent it from being recycled by holding the vnode * here. */ vholdl(lvp); error = VOP_LOCK(lvp, flags); /* * We might have slept to get the lock and someone might have * clean our vnode already, switching vnode lock from one in * lowervp to v_lock in our own vnode structure. Handle this * case by reacquiring correct lock in requested mode. */ if (VTONULL(vp) == NULL && error == 0) { ap->a_flags &= ~(LK_TYPE_MASK | LK_INTERLOCK); switch (flags & LK_TYPE_MASK) { case LK_SHARED: ap->a_flags |= LK_SHARED; break; case LK_UPGRADE: case LK_EXCLUSIVE: ap->a_flags |= LK_EXCLUSIVE; break; default: panic("Unsupported lock request %d\n", ap->a_flags); } VOP_UNLOCK(lvp, 0); error = vop_stdlock(ap); } vdrop(lvp); } else error = vop_stdlock(ap); return (error); }