static void union_renamelock_exit(struct mount *mp) { struct union_mount *um = MOUNTTOUNIONMOUNT(mp); VFS_RENAMELOCK_EXIT(um->um_uppervp->v_mount); }
/* * union_rmdir(struct vnode *a_dvp, struct vnode *a_vp, * struct componentname *a_cnp) */ static int union_rmdir(struct vop_old_rmdir_args *ap) { struct union_node *dun = VTOUNION(ap->a_dvp); struct union_node *un = VTOUNION(ap->a_vp); struct componentname *cnp = ap->a_cnp; struct thread *td = cnp->cn_td; struct vnode *upperdvp; struct vnode *uppervp; int error; if ((upperdvp = union_lock_upper(dun, td)) == NULLVP) panic("union rmdir: null upper vnode"); if ((uppervp = union_lock_upper(un, td)) != NULLVP) { if (union_dowhiteout(un, cnp->cn_cred, td)) cnp->cn_flags |= CNP_DOWHITEOUT; error = VOP_RMDIR(upperdvp, uppervp, ap->a_cnp); union_unlock_upper(uppervp, td); } else { error = union_mkwhiteout( MOUNTTOUNIONMOUNT(ap->a_dvp->v_mount), dun->un_uppervp, ap->a_cnp, un->un_path); } union_unlock_upper(upperdvp, td); return (error); }
int union_root(struct mount *mp, struct vnode **vpp) { struct union_mount *um = MOUNTTOUNIONMOUNT(mp); int error; /* * Return locked reference to root. */ vref(um->um_uppervp); if (um->um_lowervp) vref(um->um_lowervp); error = union_allocvp(vpp, mp, NULL, NULL, NULL, um->um_uppervp, um->um_lowervp, 1); if (error) { vrele(um->um_uppervp); if (um->um_lowervp) vrele(um->um_lowervp); return error; } vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); return 0; }
static int union_renamelock_enter(struct mount *mp) { struct union_mount *um = MOUNTTOUNIONMOUNT(mp); /* Lock just the upper fs, where the action happens. */ return VFS_RENAMELOCK_ENTER(um->um_uppervp->v_mount); }
/* * Check access permission on the union vnode. * The access check being enforced is to check * against both the underlying vnode, and any * copied vnode. This ensures that no additional * file permissions are given away simply because * the user caused an implicit file copy. * * union_access(struct vnode *a_vp, int a_mode, * struct ucred *a_cred, struct thread *a_td) */ static int union_access(struct vop_access_args *ap) { struct union_node *un = VTOUNION(ap->a_vp); struct thread *td = ap->a_td; int error = EACCES; struct vnode *vp; /* * Disallow write attempts on filesystems mounted read-only. */ if ((ap->a_mode & VWRITE) && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) { switch (ap->a_vp->v_type) { case VREG: case VDIR: case VLNK: return (EROFS); default: break; } } if ((vp = union_lock_upper(un, td)) != NULLVP) { ap->a_head.a_ops = *vp->v_ops; ap->a_vp = vp; error = vop_access_ap(ap); union_unlock_upper(vp, td); return(error); } if ((vp = un->un_lowervp) != NULLVP) { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); ap->a_head.a_ops = *vp->v_ops; ap->a_vp = vp; /* * Remove VWRITE from a_mode if our mount point is RW, because * we want to allow writes and lowervp may be read-only. */ if ((un->un_vnode->v_mount->mnt_flag & MNT_RDONLY) == 0) ap->a_mode &= ~VWRITE; error = vop_access_ap(ap); if (error == 0) { struct union_mount *um; um = MOUNTTOUNIONMOUNT(un->un_vnode->v_mount); if (um->um_op == UNMNT_BELOW) { ap->a_cred = um->um_cred; error = vop_access_ap(ap); } } vn_unlock(vp); } return(error); }
int union_statvfs(struct mount *mp, struct statvfs *sbp) { int error; struct union_mount *um = MOUNTTOUNIONMOUNT(mp); struct statvfs *sbuf = malloc(sizeof(*sbuf), M_TEMP, M_WAITOK | M_ZERO); unsigned long lbsize; #ifdef UNION_DIAGNOSTIC printf("union_statvfs(mp = %p, lvp = %p, uvp = %p)\n", mp, um->um_lowervp, um->um_uppervp); #endif if (um->um_lowervp) { error = VFS_STATVFS(um->um_lowervp->v_mount, sbuf); if (error) goto done; } /* now copy across the "interesting" information and fake the rest */ lbsize = sbuf->f_bsize; sbp->f_blocks = sbuf->f_blocks - sbuf->f_bfree; sbp->f_files = sbuf->f_files - sbuf->f_ffree; error = VFS_STATVFS(um->um_uppervp->v_mount, sbuf); if (error) goto done; sbp->f_flag = sbuf->f_flag; sbp->f_bsize = sbuf->f_bsize; sbp->f_frsize = sbuf->f_frsize; sbp->f_iosize = sbuf->f_iosize; /* * The "total" fields count total resources in all layers, * the "free" fields count only those resources which are * free in the upper layer (since only the upper layer * is writable). */ if (sbuf->f_bsize != lbsize) sbp->f_blocks = sbp->f_blocks * lbsize / sbuf->f_bsize; sbp->f_blocks += sbuf->f_blocks; sbp->f_bfree = sbuf->f_bfree; sbp->f_bavail = sbuf->f_bavail; sbp->f_bresvd = sbuf->f_bresvd; sbp->f_files += sbuf->f_files; sbp->f_ffree = sbuf->f_ffree; sbp->f_favail = sbuf->f_favail; sbp->f_fresvd = sbuf->f_fresvd; copy_statvfs_info(sbp, mp); done: free(sbuf, M_TEMP); return error; }
/* * Free reference to union layer */ int union_unmount(struct mount *mp, int mntflags) { struct union_mount *um = MOUNTTOUNIONMOUNT(mp); int freeing; int error; #ifdef UNION_DIAGNOSTIC printf("union_unmount(mp = %p)\n", mp); #endif /* * Keep flushing vnodes from the mount list. * This is needed because of the un_pvp held * reference to the parent vnode. * If more vnodes have been freed on a given pass, * the try again. The loop will iterate at most * (d) times, where (d) is the maximum tree depth * in the filesystem. */ for (freeing = 0; (error = vflush(mp, NULL, 0)) != 0;) { struct vnode *vp; int n; /* count #vnodes held on mount list */ n = 0; TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) n++; /* if this is unchanged then stop */ if (n == freeing) break; /* otherwise try once more time */ freeing = n; } /* * Ok, now that we've tried doing it gently, get out the hammer. */ if (mntflags & MNT_FORCE) error = vflush(mp, NULL, FORCECLOSE); if (error) return error; /* * Discard references to upper and lower target vnodes. */ if (um->um_lowervp) vrele(um->um_lowervp); vrele(um->um_uppervp); kauth_cred_free(um->um_cred); /* * Finally, throw away the union_mount structure */ kmem_free(um, sizeof(struct union_mount)); mp->mnt_data = NULL; return 0; }
/* * Mount union filesystem */ int union_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; int error = 0; struct union_args *args = data; struct vnode *lowerrootvp = NULLVP; struct vnode *upperrootvp = NULLVP; struct union_mount *um = 0; const char *cp; char *xp; int len; size_t size; if (*data_len < sizeof *args) return EINVAL; #ifdef UNION_DIAGNOSTIC printf("union_mount(mp = %p)\n", mp); #endif if (mp->mnt_flag & MNT_GETARGS) { um = MOUNTTOUNIONMOUNT(mp); if (um == NULL) return EIO; args->target = NULL; args->mntflags = um->um_op; *data_len = sizeof *args; return 0; } /* * Update is a no-op */ if (mp->mnt_flag & MNT_UPDATE) { /* * Need to provide. * 1. a way to convert between rdonly and rdwr mounts. * 2. support for nfs exports. */ error = EOPNOTSUPP; goto bad; } lowerrootvp = mp->mnt_vnodecovered; vref(lowerrootvp); /* * Find upper node. */ error = namei_simple_user(args->target, NSM_FOLLOW_NOEMULROOT, &upperrootvp); if (error != 0) goto bad; if (upperrootvp->v_type != VDIR) { error = EINVAL; goto bad; } um = kmem_zalloc(sizeof(struct union_mount), KM_SLEEP); /* * Keep a held reference to the target vnodes. * They are vrele'd in union_unmount. * * Depending on the _BELOW flag, the filesystems are * viewed in a different order. In effect, this is the * same as providing a mount under option to the mount syscall. */ um->um_op = args->mntflags & UNMNT_OPMASK; switch (um->um_op) { case UNMNT_ABOVE: um->um_lowervp = lowerrootvp; um->um_uppervp = upperrootvp; break; case UNMNT_BELOW: um->um_lowervp = upperrootvp; um->um_uppervp = lowerrootvp; break; case UNMNT_REPLACE: vrele(lowerrootvp); lowerrootvp = NULLVP; um->um_uppervp = upperrootvp; um->um_lowervp = lowerrootvp; break; default: error = EINVAL; goto bad; } mp->mnt_iflag |= IMNT_MPSAFE; /* * Unless the mount is readonly, ensure that the top layer * supports whiteout operations */ if ((mp->mnt_flag & MNT_RDONLY) == 0) { vn_lock(um->um_uppervp, LK_EXCLUSIVE | LK_RETRY); error = VOP_WHITEOUT(um->um_uppervp, (struct componentname *) 0, LOOKUP); VOP_UNLOCK(um->um_uppervp); if (error) goto bad; } um->um_cred = l->l_cred; kauth_cred_hold(um->um_cred); um->um_cmode = UN_DIRMODE &~ l->l_proc->p_cwdi->cwdi_cmask; /* * Depending on what you think the MNT_LOCAL flag might mean, * you may want the && to be || on the conditional below. * At the moment it has been defined that the filesystem is * only local if it is all local, ie the MNT_LOCAL flag implies * that the entire namespace is local. If you think the MNT_LOCAL * flag implies that some of the files might be stored locally * then you will want to change the conditional. */ if (um->um_op == UNMNT_ABOVE) { if (((um->um_lowervp == NULLVP) || (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) && (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) mp->mnt_flag |= MNT_LOCAL; } /* * Copy in the upper layer's RDONLY flag. This is for the benefit * of lookup() which explicitly checks the flag, rather than asking * the filesystem for it's own opinion. This means, that an update * mount of the underlying filesystem to go from rdonly to rdwr * will leave the unioned view as read-only. */ mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY); mp->mnt_data = um; vfs_getnewfsid(mp); error = set_statvfs_info( path, UIO_USERSPACE, NULL, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); if (error) goto bad; switch (um->um_op) { case UNMNT_ABOVE: cp = "<above>:"; break; case UNMNT_BELOW: cp = "<below>:"; break; case UNMNT_REPLACE: cp = ""; break; default: cp = "<invalid>:"; #ifdef DIAGNOSTIC panic("union_mount: bad um_op"); #endif break; } len = strlen(cp); memcpy(mp->mnt_stat.f_mntfromname, cp, len); xp = mp->mnt_stat.f_mntfromname + len; len = MNAMELEN - len; (void) copyinstr(args->target, xp, len - 1, &size); memset(xp + size, 0, len - size); #ifdef UNION_DIAGNOSTIC printf("union_mount: from %s, on %s\n", mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); #endif /* Setup the readdir hook if it's not set already */ if (!vn_union_readdir_hook) vn_union_readdir_hook = union_readdirhook; return (0); bad: if (um) kmem_free(um, sizeof(struct union_mount)); if (upperrootvp) vrele(upperrootvp); if (lowerrootvp) vrele(lowerrootvp); return (error); }
/* * union_lookup(struct vnode *a_dvp, struct vnode **a_vpp, * struct componentname *a_cnp) */ static int union_lookup(struct vop_old_lookup_args *ap) { int error; int uerror, lerror; struct vnode *uppervp, *lowervp; struct vnode *upperdvp, *lowerdvp; struct vnode *dvp = ap->a_dvp; /* starting dir */ struct union_node *dun = VTOUNION(dvp); /* associated union node */ struct componentname *cnp = ap->a_cnp; struct thread *td = cnp->cn_td; int lockparent = cnp->cn_flags & CNP_LOCKPARENT; struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount); struct ucred *saved_cred = NULL; int iswhiteout; struct vattr va; *ap->a_vpp = NULLVP; /* * Disallow write attemps to the filesystem mounted read-only. */ if ((dvp->v_mount->mnt_flag & MNT_RDONLY) && (cnp->cn_nameiop == NAMEI_DELETE || cnp->cn_nameiop == NAMEI_RENAME)) { return (EROFS); } /* * For any lookup's we do, always return with the parent locked */ cnp->cn_flags |= CNP_LOCKPARENT; lowerdvp = dun->un_lowervp; uppervp = NULLVP; lowervp = NULLVP; iswhiteout = 0; uerror = ENOENT; lerror = ENOENT; /* * Get a private lock on uppervp and a reference, effectively * taking it out of the union_node's control. * * We must lock upperdvp while holding our lock on dvp * to avoid a deadlock. */ upperdvp = union_lock_upper(dun, td); /* * do the lookup in the upper level. * if that level comsumes additional pathnames, * then assume that something special is going * on and just return that vnode. */ if (upperdvp != NULLVP) { /* * We do not have to worry about the DOTDOT case, we've * already unlocked dvp. */ UDEBUG(("A %p\n", upperdvp)); /* * Do the lookup. We must supply a locked and referenced * upperdvp to the function and will get a new locked and * referenced upperdvp back with the old having been * dereferenced. * * If an error is returned, uppervp will be NULLVP. If no * error occurs, uppervp will be the locked and referenced * return vnode or possibly NULL, depending on what is being * requested. It is possible that the returned uppervp * will be the same as upperdvp. */ uerror = union_lookup1(um->um_uppervp, &upperdvp, &uppervp, cnp); UDEBUG(( "uerror %d upperdvp %p %d/%d, uppervp %p ref=%d/lck=%d\n", uerror, upperdvp, upperdvp->v_sysref.refcnt, vn_islocked(upperdvp), uppervp, (uppervp ? uppervp->v_sysref.refcnt : -99), (uppervp ? vn_islocked(uppervp) : -99) )); /* * Disallow write attemps to the filesystem mounted read-only. */ if (uerror == EJUSTRETURN && (dvp->v_mount->mnt_flag & MNT_RDONLY) && (cnp->cn_nameiop == NAMEI_CREATE || cnp->cn_nameiop == NAMEI_RENAME)) { error = EROFS; goto out; } /* * Special case. If cn_consume != 0 skip out. The result * of the lookup is transfered to our return variable. If * an error occured we have to throw away the results. */ if (cnp->cn_consume != 0) { if ((error = uerror) == 0) { *ap->a_vpp = uppervp; uppervp = NULL; } goto out; } /* * Calculate whiteout, fall through */ if (uerror == ENOENT || uerror == EJUSTRETURN) { if (cnp->cn_flags & CNP_ISWHITEOUT) { iswhiteout = 1; } else if (lowerdvp != NULLVP) { int terror; terror = VOP_GETATTR(upperdvp, &va); if (terror == 0 && (va.va_flags & OPAQUE)) iswhiteout = 1; } } } /* * in a similar way to the upper layer, do the lookup * in the lower layer. this time, if there is some * component magic going on, then vput whatever we got * back from the upper layer and return the lower vnode * instead. */ if (lowerdvp != NULLVP && !iswhiteout) { int nameiop; UDEBUG(("B %p\n", lowerdvp)); /* * Force only LOOKUPs on the lower node, since * we won't be making changes to it anyway. */ nameiop = cnp->cn_nameiop; cnp->cn_nameiop = NAMEI_LOOKUP; if (um->um_op == UNMNT_BELOW) { saved_cred = cnp->cn_cred; cnp->cn_cred = um->um_cred; } /* * We shouldn't have to worry about locking interactions * between the lower layer and our union layer (w.r.t. * `..' processing) because we don't futz with lowervp * locks in the union-node instantiation code path. * * union_lookup1() requires lowervp to be locked on entry, * and it will be unlocked on return. The ref count will * not change. On return lowervp doesn't represent anything * to us so we NULL it out. */ vref(lowerdvp); vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY); lerror = union_lookup1(um->um_lowervp, &lowerdvp, &lowervp, cnp); if (lowerdvp == lowervp) vrele(lowerdvp); else vput(lowerdvp); lowerdvp = NULL; /* lowerdvp invalid after vput */ if (um->um_op == UNMNT_BELOW) cnp->cn_cred = saved_cred; cnp->cn_nameiop = nameiop; if (cnp->cn_consume != 0 || lerror == EACCES) { if ((error = lerror) == 0) { *ap->a_vpp = lowervp; lowervp = NULL; } goto out; } } else { UDEBUG(("C %p\n", lowerdvp)); if ((cnp->cn_flags & CNP_ISDOTDOT) && dun->un_pvp != NULLVP) { if ((lowervp = LOWERVP(dun->un_pvp)) != NULL) { vref(lowervp); vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY); lerror = 0; } } } /* * Ok. Now we have uerror, uppervp, upperdvp, lerror, and lowervp. * * 1. If both layers returned an error, select the upper layer. * * 2. If the upper layer faile and the bottom layer succeeded, * two subcases occur: * * a. The bottom vnode is not a directory, in which case * just return a new union vnode referencing an * empty top layer and the existing bottom layer. * * b. The button vnode is a directory, in which case * create a new directory in the top layer and * and fall through to case 3. * * 3. If the top layer succeeded then return a new union * vnode referencing whatever the new top layer and * whatever the bottom layer returned. */ /* case 1. */ if ((uerror != 0) && (lerror != 0)) { error = uerror; goto out; } /* case 2. */ if (uerror != 0 /* && (lerror == 0) */ ) { if (lowervp->v_type == VDIR) { /* case 2b. */ KASSERT(uppervp == NULL, ("uppervp unexpectedly non-NULL")); /* * oops, uppervp has a problem, we may have to shadow. */ uerror = union_mkshadow(um, upperdvp, cnp, &uppervp); if (uerror) { error = uerror; goto out; } } } /* * Must call union_allocvp with both the upper and lower vnodes * referenced and the upper vnode locked. ap->a_vpp is returned * referenced and locked. lowervp, uppervp, and upperdvp are * absorbed by union_allocvp() whether it succeeds or fails. * * upperdvp is the parent directory of uppervp which may be * different, depending on the path, from dvp->un_uppervp. That's * why it is a separate argument. Note that it must be unlocked. * * dvp must be locked on entry to the call and will be locked on * return. */ if (uppervp && uppervp != upperdvp) vn_unlock(uppervp); if (lowervp) vn_unlock(lowervp); if (upperdvp) vn_unlock(upperdvp); error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp, uppervp, lowervp, 1); UDEBUG(("Create %p = %p %p refs=%d\n", *ap->a_vpp, uppervp, lowervp, (*ap->a_vpp) ? ((*ap->a_vpp)->v_sysref.refcnt) : -99)); uppervp = NULL; upperdvp = NULL; lowervp = NULL; /* * Termination Code * * - put away any extra junk laying around. Note that lowervp * (if not NULL) will never be the same as *ap->a_vp and * neither will uppervp, because when we set that state we * NULL-out lowervp or uppervp. On the otherhand, upperdvp * may match uppervp or *ap->a_vpp. * * - relock/unlock dvp if appropriate. */ out: if (upperdvp) { if (upperdvp == uppervp || upperdvp == *ap->a_vpp) vrele(upperdvp); else vput(upperdvp); } if (uppervp) vput(uppervp); if (lowervp) vput(lowervp); /* * Restore LOCKPARENT state */ if (!lockparent) cnp->cn_flags &= ~CNP_LOCKPARENT; UDEBUG(("Out %d vpp %p/%d lower %p upper %p\n", error, *ap->a_vpp, ((*ap->a_vpp) ? (*ap->a_vpp)->v_sysref.refcnt : -99), lowervp, uppervp)); /* * dvp lock state, determine whether to relock dvp. dvp is expected * to be locked on return if: * * - there was an error (except not EJUSTRETURN), or * - we hit the last component and lockparent is true * * dvp_is_locked is the current state of the dvp lock, not counting * the possibility that *ap->a_vpp == dvp (in which case it is locked * anyway). Note that *ap->a_vpp == dvp only if no error occured. */ if (*ap->a_vpp != dvp) { if ((error == 0 || error == EJUSTRETURN) && !lockparent) { vn_unlock(dvp); } } /* * Diagnostics */ #ifdef DIAGNOSTIC if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.' && *ap->a_vpp != dvp) { panic("union_lookup returning . (%p) not same as startdir (%p)", ap->a_vpp, dvp); } #endif return (error); }
/* * union_rename(struct vnode *a_fdvp, struct vnode *a_fvp, * struct componentname *a_fcnp, struct vnode *a_tdvp, * struct vnode *a_tvp, struct componentname *a_tcnp) */ static int union_rename(struct vop_old_rename_args *ap) { int error; struct vnode *fdvp = ap->a_fdvp; struct vnode *fvp = ap->a_fvp; struct vnode *tdvp = ap->a_tdvp; struct vnode *tvp = ap->a_tvp; /* * Figure out what fdvp to pass to our upper or lower vnode. If we * replace the fdvp, release the original one and ref the new one. */ if (fdvp->v_tag == VT_UNION) { /* always true */ struct union_node *un = VTOUNION(fdvp); if (un->un_uppervp == NULLVP) { /* * this should never happen in normal * operation but might if there was * a problem creating the top-level shadow * directory. */ error = EXDEV; goto bad; } fdvp = un->un_uppervp; vref(fdvp); vrele(ap->a_fdvp); } /* * Figure out what fvp to pass to our upper or lower vnode. If we * replace the fvp, release the original one and ref the new one. */ if (fvp->v_tag == VT_UNION) { /* always true */ struct union_node *un = VTOUNION(fvp); #if 0 struct union_mount *um = MOUNTTOUNIONMOUNT(fvp->v_mount); #endif if (un->un_uppervp == NULLVP) { switch(fvp->v_type) { case VREG: vn_lock(un->un_vnode, LK_EXCLUSIVE | LK_RETRY); error = union_copyup(un, 1, ap->a_fcnp->cn_cred, ap->a_fcnp->cn_td); vn_unlock(un->un_vnode); if (error) goto bad; break; case VDIR: /* * XXX not yet. * * There is only one way to rename a directory * based in the lowervp, and that is to copy * the entire directory hierarchy. Otherwise * it would not last across a reboot. */ #if 0 vrele(fvp); fvp = NULL; vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY); error = union_mkshadow(um, fdvp, ap->a_fcnp, &un->un_uppervp); vn_unlock(fdvp); if (un->un_uppervp) vn_unlock(un->un_uppervp); if (error) goto bad; break; #endif default: error = EXDEV; goto bad; } } if (un->un_lowervp != NULLVP) ap->a_fcnp->cn_flags |= CNP_DOWHITEOUT; fvp = un->un_uppervp; vref(fvp); vrele(ap->a_fvp); } /* * Figure out what tdvp (destination directory) to pass to the * lower level. If we replace it with uppervp, we need to vput the * old one. The exclusive lock is transfered to what we will pass * down in the VOP_RENAME and we replace uppervp with a simple * reference. */ if (tdvp->v_tag == VT_UNION) { struct union_node *un = VTOUNION(tdvp); if (un->un_uppervp == NULLVP) { /* * this should never happen in normal * operation but might if there was * a problem creating the top-level shadow * directory. */ error = EXDEV; goto bad; } /* * new tdvp is a lock and reference on uppervp, put away * the old tdvp. */ tdvp = union_lock_upper(un, ap->a_tcnp->cn_td); vput(ap->a_tdvp); } /* * Figure out what tvp (destination file) to pass to the * lower level. * * If the uppervp file does not exist put away the (wrong) * file and change tvp to NULL. */ if (tvp != NULLVP && tvp->v_tag == VT_UNION) { struct union_node *un = VTOUNION(tvp); tvp = union_lock_upper(un, ap->a_tcnp->cn_td); vput(ap->a_tvp); /* note: tvp may be NULL */ } /* * VOP_RENAME releases/vputs prior to returning, so we have no * cleanup to do. */ return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); /* * Error. We still have to release / vput the various elements. */ bad: vrele(fdvp); if (fvp) vrele(fvp); vput(tdvp); if (tvp != NULLVP) { if (tvp != tdvp) vput(tvp); else vrele(tvp); } return (error); }