/* * Try to create a journal log inside the filesystem. */ int wapbl_create_infs_log(struct mount *mp, struct fs *fs, struct vnode *devvp, daddr_t *startp, size_t *countp, uint64_t *extradatap) { struct vnode *vp, *rvp; struct inode *ip; int error; if ((error = VFS_ROOT(mp, &rvp)) != 0) return error; error = UFS_INODE_ALLOC(VTOI(rvp), 0 | S_IFREG, NOCRED, &vp); if (mp->mnt_flag & MNT_UPDATE) { vput(rvp); } else { VOP_UNLOCK(rvp, 0); vgone(rvp); } if (error != 0) return error; vp->v_type = VREG; ip = VTOI(vp); ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; /* ip->i_mode = 0 | IFREG; */ DIP_ASSIGN(ip, mode, 0 | IFREG); /* ip->i_flags = SF_LOG; */ DIP_ASSIGN(ip, flags, SF_LOG); ip->i_effnlink = 1; DIP_ASSIGN(ip, nlink, 1); ffs_update(ip, MNT_WAIT); if ((error = wapbl_allocate_log_file(mp, vp, startp, countp, extradatap)) != 0) { /* * If we couldn't allocate the space for the log file, * remove the inode by setting its link count back to * zero and bail. */ ip->i_effnlink = 0; DIP_ASSIGN(ip, nlink, 0); VOP_UNLOCK(vp, 0); vgone(vp); return error; } /* * Now that we have the place-holder inode for the journal, * we don't need the vnode ever again. */ VOP_UNLOCK(vp, 0); vgone(vp); return 0; }
static int _xfs_rename( struct vop_rename_args /* { struct vnode *a_fdvp; struct vnode *a_fvp; struct componentname *a_fcnp; struct vnode *a_tdvp; struct vnode *a_tvp; struct componentname *a_tcnp; } */ *ap) { struct vnode *fvp = ap->a_fvp; struct vnode *tvp = ap->a_tvp; struct vnode *fdvp = ap->a_fdvp; struct vnode *tdvp = ap->a_tdvp; /* struct componentname *tcnp = ap->a_tcnp; */ /* struct componentname *fcnp = ap->a_fcnp;*/ int error = EPERM; if (error) goto out; /* Check for cross-device rename */ if ((fvp->v_mount != tdvp->v_mount) || (tvp && (fvp->v_mount != tvp->v_mount))) { error = EXDEV; goto out; } if (tvp && tvp->v_usecount > 1) { error = EBUSY; goto out; } if (fvp->v_type == VDIR) { if (tvp != NULL && tvp->v_type == VDIR) cache_purge(tdvp); cache_purge(fdvp); } out: if (tdvp == tvp) vrele(tdvp); else vput(tdvp); if (tvp) vput(tvp); vrele(fdvp); vrele(fvp); vgone(fvp); if (tvp) vgone(tvp); return (error); }
/* * Free reference to overlay layer */ int ov_unmount(struct mount *mp, int mntflags) { struct vnode *overlay_rootvp = MOUNTTOOVERLAYMOUNT(mp)->ovm_rootvp; struct overlay_mount *omp; int error; int flags = 0; #ifdef OVERLAYFS_DIAGNOSTIC printf("ov_unmount(mp = %p)\n", mp); #endif if (mntflags & MNT_FORCE) flags |= FORCECLOSE; if (overlay_rootvp->v_usecount > 1 && (mntflags & MNT_FORCE) == 0) return (EBUSY); if ((error = vflush(mp, overlay_rootvp, flags)) != 0) return (error); #ifdef OVERLAYFS_DIAGNOSTIC vprint("alias root of lower", overlay_rootvp); #endif /* * Blow it away for future re-use */ vgone(overlay_rootvp); /* * Finally, throw away the overlay_mount structure */ omp = mp->mnt_data; kmem_free(omp, sizeof(struct overlay_mount)); mp->mnt_data = NULL; return 0; }
int fdesc_unmount(struct mount *mp, int mntflags) { int error; int flags = 0; struct vnode *rtvp = VFSTOFDESC(mp)->f_root; if (mntflags & MNT_FORCE) flags |= FORCECLOSE; if (rtvp->v_usecount > 1 && (mntflags & MNT_FORCE) == 0) return (EBUSY); if ((error = vflush(mp, rtvp, flags)) != 0) return (error); /* * Blow it away for future re-use */ vgone(rtvp); /* * Finally, throw away the fdescmount structure */ free(mp->mnt_data, M_UFSMNT); /* XXX */ mp->mnt_data = NULL; return (0); }
/* * Purge the cache of dead entries * * This is extremely inefficient due to the fact that vgone() not only * indirectly modifies the vnode cache, but may also sleep. We can * neither hold pfs_vncache_mutex across a vgone() call, nor make any * assumptions about the state of the cache after vgone() returns. In * consequence, we must start over after every vgone() call, and keep * trying until we manage to traverse the entire cache. * * The only way to improve this situation is to change the data structure * used to implement the cache. */ static void pfs_purge_locked(struct pfs_node *pn, bool force) { struct pfs_vdata *pvd; struct vnode *vnp; mtx_assert(&pfs_vncache_mutex, MA_OWNED); pvd = pfs_vncache; while (pvd != NULL) { if (force || pvd->pvd_dead || (pn != NULL && pvd->pvd_pn == pn)) { vnp = pvd->pvd_vnode; vhold(vnp); mtx_unlock(&pfs_vncache_mutex); VOP_LOCK(vnp, LK_EXCLUSIVE); vgone(vnp); VOP_UNLOCK(vnp, 0); mtx_lock(&pfs_vncache_mutex); vdrop(vnp); pvd = pfs_vncache; } else { pvd = pvd->pvd_next; } } }
/* * If allocating vnode fails, call this. */ static void fdesc_insmntque_dtr(struct vnode *vp, void *arg) { vgone(vp); vput(vp); }
int osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep) { struct vnode *vp; int code; vp = AFSTOV(avc); if (!VI_TRYLOCK(vp)) return 0; code = osi_fbsd_checkinuse(avc); if (code != 0) { VI_UNLOCK(vp); return 0; } if ((vp->v_iflag & VI_DOOMED) != 0) { VI_UNLOCK(vp); return 1; } /* must hold the vnode before calling vgone() * This code largely copied from vfs_subr.c:vlrureclaim() */ vholdl(vp); AFS_GUNLOCK(); *slept = 1; /* use the interlock while locking, so no one else can DOOM this */ ma_vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread); vgone(vp); MA_VOP_UNLOCK(vp, 0, curthread); vdrop(vp); AFS_GLOCK(); return 1; }
/* * Free reference to umap layer */ int umapfs_unmount(struct mount *mp, int mntflags) { struct umap_mount *amp = MOUNTTOUMAPMOUNT(mp); struct vnode *rtvp = amp->umapm_rootvp; int error; int flags = 0; #ifdef UMAPFS_DIAGNOSTIC printf("umapfs_unmount(mp = %p)\n", mp); #endif if (mntflags & MNT_FORCE) flags |= FORCECLOSE; if (rtvp->v_usecount > 1 && (mntflags & MNT_FORCE) == 0) return (EBUSY); if ((error = vflush(mp, rtvp, flags)) != 0) return (error); #ifdef UMAPFS_DIAGNOSTIC vprint("alias root of lower", rtvp); #endif /* * Blow it away for future re-use */ vgone(rtvp); /* * Finally, throw away the umap_mount structure */ kmem_free(amp, sizeof(struct umap_mount)); mp->mnt_data = NULL; return 0; }
/* * Mknod vnode call */ int ufs_mknod(void *v) { struct vop_mknod_args *ap = v; struct vattr *vap = ap->a_vap; struct vnode **vpp = ap->a_vpp; struct inode *ip; int error; if ((error = ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), ap->a_dvp, vpp, ap->a_cnp)) != 0) return (error); VN_KNOTE(ap->a_dvp, NOTE_WRITE); ip = VTOI(*vpp); ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; if (vap->va_rdev != VNOVAL) { /* * Want to be able to use this to make badblock * inodes, so don't truncate the dev number. */ DIP_ASSIGN(ip, rdev, vap->va_rdev); } /* * Remove inode so that it will be reloaded by VFS_VGET and * checked to see if it is an alias of an existing entry in * the inode cache. */ vput(*vpp); (*vpp)->v_type = VNON; vgone(*vpp); *vpp = NULL; return (0); }
/* * _inactive is called when the pfsnode * is vrele'd and the reference count goes * to zero. (vp) will be on the vnode free * list, so to get it back vget() must be * used. * * for procfs, check if the process is still * alive and if it isn't then just throw away * the vnode by calling vgone(). this may * be overkill and a waste of time since the * chances are that the process will still be * there and pfind is not free. * * (vp) is not locked on entry or exit. */ int procfs_inactive(void *v) { struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct pfsnode *pfs = VTOPFS(vp); if (pfind(pfs->pfs_pid) == NULL && !(vp->v_flag & VXLOCK)) vgone(vp); return (0); }
static void pefs_insmntque_dtr(struct vnode *vp, void *_pn) { struct pefs_node *pn = _pn; PEFSDEBUG("pefs_insmntque_dtr: free node %p\n", pn); vp->v_data = NULL; vp->v_vnlock = &vp->v_lock; pefs_key_release(pn->pn_tkey.ptk_key); uma_zfree(pefs_node_zone, pn); vp->v_op = &dead_vnodeops; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vgone(vp); vput(vp); }
/* Try to discard pages, in order to recycle a vcache entry. * * We also make some sanity checks: ref count, open count, held locks. * * We also do some non-VM-related chores, such as releasing the cred pointer * (for AIX and Solaris) and releasing the gnode (for AIX). * * Locking: afs_xvcache lock is held. If it is dropped and re-acquired, * *slept should be set to warn the caller. * * Formerly, afs_xvcache was dropped and re-acquired for Solaris, but now it * is not dropped and re-acquired for any platform. It may be that *slept is * therefore obsolescent. * */ int osi_VM_FlushVCache(struct vcache *avc, int *slept) { struct vm_object *obj; struct vnode *vp = AFSTOV(avc); if (!VI_TRYLOCK(vp)) /* need interlock to check usecount */ return EBUSY; if (vp->v_usecount > 0) { VI_UNLOCK(vp); return EBUSY; } /* XXX * The value of avc->opens here came to be, at some point, * typically -1. This was caused by incorrectly performing afs_close * processing on vnodes being recycled */ if (avc->opens) { VI_UNLOCK(vp); return EBUSY; } /* if a lock is held, give up */ if (CheckLock(&avc->lock)) { VI_UNLOCK(vp); return EBUSY; } if ((vp->v_iflag & VI_DOOMED) != 0) { VI_UNLOCK(vp); return (0); } /* must hold the vnode before calling vgone() * This code largely copied from vfs_subr.c:vlrureclaim() */ vholdl(vp); AFS_GUNLOCK(); *slept = 1; /* use the interlock while locking, so no one else can DOOM this */ ilock_vnode(vp); vgone(vp); unlock_vnode(vp); vdrop(vp); AFS_GLOCK(); return 0; }
/* ARGSUSED */ int ext2fs_mknod(void *v) { struct vop_mknod_v3_args /* { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; } */ *ap = v; struct vattr *vap = ap->a_vap; struct vnode **vpp = ap->a_vpp; struct inode *ip; int error; struct mount *mp; ino_t ino; if ((error = ext2fs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), ap->a_dvp, vpp, ap->a_cnp)) != 0) return (error); VN_KNOTE(ap->a_dvp, NOTE_WRITE); ip = VTOI(*vpp); mp = (*vpp)->v_mount; ino = ip->i_number; ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; if (vap->va_rdev != VNOVAL) { /* * Want to be able to use this to make badblock * inodes, so don't truncate the dev number. */ ip->i_din.e2fs_din->e2di_rdev = h2fs32(vap->va_rdev); } /* * Remove inode so that it will be reloaded by VFS_VGET and * checked to see if it is an alias of an existing entry in * the inode cache. */ (*vpp)->v_type = VNON; VOP_UNLOCK(*vpp); vgone(*vpp); error = VFS_VGET(mp, ino, vpp); if (error != 0) { *vpp = NULL; return (error); } VOP_UNLOCK(*vpp); return (0); }
static int unionfs_revoke(void *v) { struct vop_revoke_args *ap = v; struct unionfs_node *unp; struct vnode *tvp; int error; unp = VTOUNIONFS(ap->a_vp); tvp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); error = VOP_REVOKE(tvp, ap->a_flags); if (error == 0) { vgone(ap->a_vp); /* ??? */ } return error; }
int osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep) { *slept = 0; if (!VREFCOUNT_GT(avc,0) && avc->opens == 0 && (avc->f.states & CUnlinkedDel) == 0) { /* * vgone() reclaims the vnode, which calls afs_FlushVCache(), * then it puts the vnode on the free list. * If we don't do this we end up with a cleaned vnode that's * not on the free list. */ AFS_GUNLOCK(); vgone(AFSTOV(avc)); AFS_GLOCK(); return 1; } return 0; }
/* Unmount the filesystem described by mp. */ int smbfs_unmount(struct mount *mp, int mntflags) { struct lwp *l = curlwp; struct smbmount *smp = VFSTOSMBFS(mp); struct smb_cred scred; struct vnode *smbfs_rootvp = SMBTOV(smp->sm_root); int error, flags; SMBVDEBUG("smbfs_unmount: flags=%04x\n", mntflags); flags = 0; if (mntflags & MNT_FORCE) flags |= FORCECLOSE; if (smbfs_rootvp->v_usecount > 1 && (mntflags & MNT_FORCE) == 0) return EBUSY; /* Flush all vnodes. * Keep trying to flush the vnode list for the mount while * some are still busy and we are making progress towards * making them not busy. This is needed because smbfs vnodes * reference their parent directory but may appear after their * parent in the list; one pass over the vnode list is not * sufficient in this case. */ do { smp->sm_didrele = 0; error = vflush(mp, smbfs_rootvp, flags); } while (error == EBUSY && smp->sm_didrele != 0); if (error) return error; vgone(smbfs_rootvp); smb_makescred(&scred, l, l->l_cred); smb_share_lock(smp->sm_share); smb_share_put(smp->sm_share, &scred); mp->mnt_data = NULL; hashdone(smp->sm_hash, HASH_LIST, smp->sm_hashlen); mutex_destroy(&smp->sm_hashlock); free(smp, M_SMBFSDATA); return 0; }
/* ARGSUSED */ static int ext2_mknod(struct vop_mknod_args *ap) { struct vattr *vap = ap->a_vap; struct vnode **vpp = ap->a_vpp; struct inode *ip; ino_t ino; int error; error = ext2_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), ap->a_dvp, vpp, ap->a_cnp); if (error) return (error); ip = VTOI(*vpp); ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; if (vap->va_rdev != VNOVAL) { /* * Want to be able to use this to make badblock * inodes, so don't truncate the dev number. */ ip->i_rdev = vap->va_rdev; } /* * Remove inode, then reload it through VFS_VGET so it is * checked to see if it is an alias of an existing entry in * the inode cache. XXX I don't believe this is necessary now. */ (*vpp)->v_type = VNON; ino = ip->i_number; /* Save this before vgone() invalidates ip. */ vgone(*vpp); vput(*vpp); error = VFS_VGET(ap->a_dvp->v_mount, ino, LK_EXCLUSIVE, vpp); if (error) { *vpp = NULL; return (error); } return (0); }
/* * Disable a pseudofs node, and free all vnodes associated with it */ int pfs_disable(struct pfs_node *pn) { struct pfs_vdata *pvd, *prev; if (pn->pn_flags & PFS_DISABLED) return (0); mtx_lock(&pfs_vncache_mutex); pn->pn_flags |= PFS_DISABLED; /* see the comment about the double loop in pfs_exit() */ /* XXX linear search... not very efficient */ for (pvd = pfs_vncache; pvd != NULL; pvd = pvd->pvd_next) { while (pvd != NULL && pvd->pvd_pn == pn) { prev = pvd->pvd_prev; vgone(pvd->pvd_vnode); pvd = prev ? prev->pvd_next : pfs_vncache; } if (pvd == NULL) break; } mtx_unlock(&pfs_vncache_mutex); return (0); }
/* * Free all vnodes associated with a defunct process */ static void pfs_exit(struct proc *p) { struct pfs_vdata *pvd, *prev; mtx_lock(&pfs_vncache_mutex); /* * The double loop is necessary because vgone() indirectly * calls pfs_vncache_free() which frees pvd, so we have to * backtrace one step every time we free a vnode. */ /* XXX linear search... not very efficient */ for (pvd = pfs_vncache; pvd != NULL; pvd = pvd->pvd_next) { while (pvd != NULL && pvd->pvd_pid == p->p_pid) { prev = pvd->pvd_prev; vgone(pvd->pvd_vnode); pvd = prev ? prev->pvd_next : pfs_vncache; } if (pvd == NULL) break; } mtx_unlock(&pfs_vncache_mutex); }
int fdesc_unmount(struct mount *mp, int mntflags) { int error; int flags = 0; struct vnode *rtvp = mp->mnt_data; if (mntflags & MNT_FORCE) flags |= FORCECLOSE; if (rtvp->v_usecount > 1 && (mntflags & MNT_FORCE) == 0) return (EBUSY); if ((error = vflush(mp, rtvp, flags)) != 0) return (error); /* * Blow it away for future re-use */ vgone(rtvp); mp->mnt_data = NULL; return (0); }
/* * Last reference to an node. If necessary, write or delete it. * * hpfs_inactive(struct vnode *a_vp) */ int hpfs_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct hpfsnode *hp = VTOHP(vp); int error; dprintf(("hpfs_inactive(0x%x): \n", hp->h_no)); if (hp->h_flag & H_CHANGE) { dprintf(("hpfs_inactive: node changed, update\n")); error = hpfs_update (hp); if (error) return (error); } if (hp->h_flag & H_PARCHANGE) { dprintf(("hpfs_inactive: parent node changed, update\n")); error = hpfs_updateparent (hp); if (error) return (error); } if (prtactive && vp->v_sysref.refcnt > 1) vprint("hpfs_inactive: pushing active", vp); if (hp->h_flag & H_INVAL) { #if defined(__DragonFly__) vrecycle(vp); #else /* defined(__NetBSD__) */ vgone(vp); #endif return (0); } return (0); }
/* * Look up a vnode/nfsnode by file handle and store the pointer in *npp. * Callers must check for mount points!! * An error number is returned. */ int nfs_nget(struct mount *mnt, nfsfh_t *fh, int fhsize, struct nfsnode **npp) { struct nfsmount *nmp; struct nfsnode *np, find, *np2; struct vnode *vp, *nvp; struct proc *p = curproc; /* XXX */ int error; nmp = VFSTONFS(mnt); loop: rw_enter_write(&nfs_hashlock); find.n_fhp = fh; find.n_fhsize = fhsize; np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find); if (np != NULL) { rw_exit_write(&nfs_hashlock); vp = NFSTOV(np); error = vget(vp, LK_EXCLUSIVE, p); if (error) goto loop; *npp = np; return (0); } /* * getnewvnode() could recycle a vnode, potentially formerly * owned by NFS. This will cause a VOP_RECLAIM() to happen, * which will cause recursive locking, so we unlock before * calling getnewvnode() lock again afterwards, but must check * to see if this nfsnode has been added while we did not hold * the lock. */ rw_exit_write(&nfs_hashlock); error = getnewvnode(VT_NFS, mnt, &nfs_vops, &nvp); /* note that we don't have this vnode set up completely yet */ rw_enter_write(&nfs_hashlock); if (error) { *npp = NULL; rw_exit_write(&nfs_hashlock); return (error); } nvp->v_flag |= VLARVAL; np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find); if (np != NULL) { vgone(nvp); rw_exit_write(&nfs_hashlock); goto loop; } vp = nvp; np = pool_get(&nfs_node_pool, PR_WAITOK | PR_ZERO); vp->v_data = np; /* we now have an nfsnode on this vnode */ vp->v_flag &= ~VLARVAL; np->n_vnode = vp; rw_init(&np->n_commitlock, "nfs_commitlk"); /* * Are we getting the root? If so, make sure the vnode flags * are correct */ if ((fhsize == nmp->nm_fhsize) && !bcmp(fh, nmp->nm_fh, fhsize)) { if (vp->v_type == VNON) vp->v_type = VDIR; vp->v_flag |= VROOT; } np->n_fhp = &np->n_fh; bcopy(fh, np->n_fhp, fhsize); np->n_fhsize = fhsize; np2 = RB_INSERT(nfs_nodetree, &nmp->nm_ntree, np); KASSERT(np2 == NULL); np->n_accstamp = -1; rw_exit(&nfs_hashlock); *npp = np; return (0); }
int udf_vget(struct mount *mp, ino_t ino, struct vnode **vpp) { struct buf *bp; struct vnode *devvp; struct umount *ump; struct proc *p; struct vnode *vp, *nvp; struct unode *up; struct extfile_entry *xfe; struct file_entry *fe; int error, sector, size; p = curproc; bp = NULL; *vpp = NULL; ump = VFSTOUDFFS(mp); /* See if we already have this in the cache */ if ((error = udf_hashlookup(ump, ino, LK_EXCLUSIVE, vpp)) != 0) return (error); if (*vpp != NULL) return (0); /* * Allocate memory and check the tag id's before grabbing a new * vnode, since it's hard to roll back if there is a problem. */ up = pool_get(&unode_pool, PR_WAITOK | PR_ZERO); /* * Copy in the file entry. Per the spec, the size can only be 1 block. */ sector = ino; devvp = ump->um_devvp; udf_vat_map(ump, §or); if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) { printf("Cannot read sector %d\n", sector); pool_put(&unode_pool, up); if (bp != NULL) brelse(bp); return (error); } xfe = (struct extfile_entry *)bp->b_data; fe = (struct file_entry *)bp->b_data; error = udf_checktag(&xfe->tag, TAGID_EXTFENTRY); if (error == 0) { size = letoh32(xfe->l_ea) + letoh32(xfe->l_ad); } else { error = udf_checktag(&fe->tag, TAGID_FENTRY); if (error) { printf("Invalid file entry!\n"); pool_put(&unode_pool, up); if (bp != NULL) brelse(bp); return (ENOMEM); } else size = letoh32(fe->l_ea) + letoh32(fe->l_ad); } /* Allocate max size of FE/XFE. */ up->u_fentry = malloc(size + UDF_EXTFENTRY_SIZE, M_UDFFENTRY, M_NOWAIT | M_ZERO); if (up->u_fentry == NULL) { pool_put(&unode_pool, up); if (bp != NULL) brelse(bp); return (ENOMEM); /* Cannot allocate file entry block */ } if (udf_checktag(&xfe->tag, TAGID_EXTFENTRY) == 0) bcopy(bp->b_data, up->u_fentry, size + UDF_EXTFENTRY_SIZE); else bcopy(bp->b_data, up->u_fentry, size + UDF_FENTRY_SIZE); brelse(bp); bp = NULL; if ((error = udf_allocv(mp, &vp, p))) { free(up->u_fentry, M_UDFFENTRY); pool_put(&unode_pool, up); return (error); /* Error from udf_allocv() */ } up->u_vnode = vp; up->u_ino = ino; up->u_devvp = ump->um_devvp; up->u_dev = ump->um_dev; up->u_ump = ump; vp->v_data = up; vref(ump->um_devvp); lockinit(&up->u_lock, PINOD, "unode", 0, 0); /* * udf_hashins() will lock the vnode for us. */ udf_hashins(up); switch (up->u_fentry->icbtag.file_type) { default: printf("Unrecognized file type (%d)\n", vp->v_type); vp->v_type = VREG; break; case UDF_ICB_FILETYPE_DIRECTORY: vp->v_type = VDIR; break; case UDF_ICB_FILETYPE_BLOCKDEVICE: vp->v_type = VBLK; break; case UDF_ICB_FILETYPE_CHARDEVICE: vp->v_type = VCHR; break; case UDF_ICB_FILETYPE_FIFO: vp->v_type = VFIFO; break; case UDF_ICB_FILETYPE_SOCKET: vp->v_type = VSOCK; break; case UDF_ICB_FILETYPE_SYMLINK: vp->v_type = VLNK; break; case UDF_ICB_FILETYPE_RANDOMACCESS: case UDF_ICB_FILETYPE_REALTIME: case UDF_ICB_FILETYPE_UNKNOWN: vp->v_type = VREG; break; } /* check if this is a vnode alias */ if ((nvp = checkalias(vp, up->u_dev, ump->um_mountp)) != NULL) { printf("found a vnode alias\n"); /* * Discard unneeded vnode, but save its udf_node. * Note that the lock is carried over in the udf_node */ nvp->v_data = vp->v_data; vp->v_data = NULL; vp->v_op = spec_vnodeop_p; vrele(vp); vgone(vp); /* * Reinitialize aliased inode. */ vp = nvp; ump->um_devvp = vp; } *vpp = vp; return (0); }
int udf_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { struct buf *bp; struct vnode *devvp; struct udf_mnt *udfmp; struct thread *td; struct vnode *vp; struct udf_node *unode; struct file_entry *fe; int error, sector, size; error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* * We must promote to an exclusive lock for vnode creation. This * can happen if lookup is passed LOCKSHARED. */ if ((flags & LK_TYPE_MASK) == LK_SHARED) { flags &= ~LK_TYPE_MASK; flags |= LK_EXCLUSIVE; } /* * We do not lock vnode creation as it is believed to be too * expensive for such rare case as simultaneous creation of vnode * for same ino by different processes. We just allow them to race * and check later to decide who wins. Let the race begin! */ td = curthread; udfmp = VFSTOUDFFS(mp); unode = uma_zalloc(udf_zone_node, M_WAITOK | M_ZERO); if ((error = udf_allocv(mp, &vp, td))) { printf("Error from udf_allocv\n"); uma_zfree(udf_zone_node, unode); return (error); } unode->i_vnode = vp; unode->hash_id = ino; unode->udfmp = udfmp; vp->v_data = unode; lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { uma_zfree(udf_zone_node, unode); return (error); } error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* * Copy in the file entry. Per the spec, the size can only be 1 block. */ sector = ino + udfmp->part_start; devvp = udfmp->im_devvp; if ((error = RDSECTOR(devvp, sector, udfmp->bsize, &bp)) != 0) { printf("Cannot read sector %d\n", sector); vgone(vp); vput(vp); brelse(bp); *vpp = NULL; return (error); } fe = (struct file_entry *)bp->b_data; if (udf_checktag(&fe->tag, TAGID_FENTRY)) { printf("Invalid file entry!\n"); vgone(vp); vput(vp); brelse(bp); *vpp = NULL; return (ENOMEM); } size = UDF_FENTRY_SIZE + le32toh(fe->l_ea) + le32toh(fe->l_ad); unode->fentry = malloc(size, M_UDFFENTRY, M_NOWAIT | M_ZERO); if (unode->fentry == NULL) { printf("Cannot allocate file entry block\n"); vgone(vp); vput(vp); brelse(bp); *vpp = NULL; return (ENOMEM); } bcopy(bp->b_data, unode->fentry, size); brelse(bp); bp = NULL; switch (unode->fentry->icbtag.file_type) { default: vp->v_type = VBAD; break; case 4: vp->v_type = VDIR; break; case 5: vp->v_type = VREG; break; case 6: vp->v_type = VBLK; break; case 7: vp->v_type = VCHR; break; case 9: vp->v_type = VFIFO; vp->v_op = &udf_fifoops; break; case 10: vp->v_type = VSOCK; break; case 12: vp->v_type = VLNK; break; } if (vp->v_type != VFIFO) VN_LOCK_ASHARE(vp); if (ino == udf_getid(&udfmp->root_icb)) vp->v_vflag |= VV_ROOT; *vpp = vp; return (0); }
static int smbfs_node_alloc(struct mount *mp, struct vnode *dvp, const char *dirnm, int dirlen, const char *name, int nmlen, char sep, struct smbfattr *fap, struct vnode **vpp) { struct vattr vattr; struct thread *td = curthread; /* XXX */ struct smbmount *smp = VFSTOSMBFS(mp); struct smbnode *np, *dnp; struct vnode *vp, *vp2; struct smbcmp sc; char *p, *rpath; int error, rplen; sc.n_parent = dvp; sc.n_nmlen = nmlen; sc.n_name = name; if (smp->sm_root != NULL && dvp == NULL) { SMBERROR("do not allocate root vnode twice!\n"); return EINVAL; } if (nmlen == 2 && bcmp(name, "..", 2) == 0) { if (dvp == NULL) return EINVAL; vp = VTOSMB(VTOSMB(dvp)->n_parent)->n_vnode; error = vget(vp, LK_EXCLUSIVE, td); if (error == 0) *vpp = vp; return error; } else if (nmlen == 1 && name[0] == '.') { SMBERROR("do not call me with dot!\n"); return EINVAL; } dnp = dvp ? VTOSMB(dvp) : NULL; if (dnp == NULL && dvp != NULL) { vprint("smbfs_node_alloc: dead parent vnode", dvp); return EINVAL; } error = vfs_hash_get(mp, smbfs_hash(name, nmlen), LK_EXCLUSIVE, td, vpp, smbfs_vnode_cmp, &sc); if (error) return (error); if (*vpp) { np = VTOSMB(*vpp); /* Force cached attributes to be refreshed if stale. */ (void)VOP_GETATTR(*vpp, &vattr, td->td_ucred); /* * If the file type on the server is inconsistent with * what it was when we created the vnode, kill the * bogus vnode now and fall through to the code below * to create a new one with the right type. */ if (((*vpp)->v_type == VDIR && (np->n_dosattr & SMB_FA_DIR) == 0) || ((*vpp)->v_type == VREG && (np->n_dosattr & SMB_FA_DIR) != 0)) { vgone(*vpp); vput(*vpp); } else { SMBVDEBUG("vnode taken from the hashtable\n"); return (0); } } /* * If we don't have node attributes, then it is an explicit lookup * for an existing vnode. */ if (fap == NULL) return ENOENT; error = getnewvnode("smbfs", mp, &smbfs_vnodeops, vpp); if (error) return (error); vp = *vpp; np = malloc(sizeof *np, M_SMBNODE, M_WAITOK | M_ZERO); rplen = dirlen; if (sep != '\0') rplen++; rplen += nmlen; rpath = malloc(rplen + 1, M_SMBNODENAME, M_WAITOK); p = rpath; bcopy(dirnm, p, dirlen); p += dirlen; if (sep != '\0') *p++ = sep; if (name != NULL) { bcopy(name, p, nmlen); p += nmlen; } *p = '\0'; MPASS(p == rpath + rplen); lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); /* Vnode initialization */ vp->v_type = fap->fa_attr & SMB_FA_DIR ? VDIR : VREG; vp->v_data = np; np->n_vnode = vp; np->n_mount = VFSTOSMBFS(mp); np->n_rpath = rpath; np->n_rplen = rplen; np->n_nmlen = nmlen; np->n_name = smbfs_name_alloc(name, nmlen); np->n_ino = fap->fa_ino; if (dvp) { ASSERT_VOP_LOCKED(dvp, "smbfs_node_alloc"); np->n_parent = dvp; np->n_parentino = VTOSMB(dvp)->n_ino; if (/*vp->v_type == VDIR &&*/ (dvp->v_vflag & VV_ROOT) == 0) { vref(dvp); np->n_flag |= NREFPARENT; } } else if (vp->v_type == VREG) SMBERROR("new vnode '%s' born without parent ?\n", np->n_name); error = insmntque(vp, mp); if (error) { free(np, M_SMBNODE); return (error); } error = vfs_hash_insert(vp, smbfs_hash(name, nmlen), LK_EXCLUSIVE, td, &vp2, smbfs_vnode_cmp, &sc); if (error) return (error); if (vp2 != NULL) *vpp = vp2; return (0); }
static int smbfs_node_alloc(struct mount *mp, struct vnode *dvp, const char *name, int nmlen, struct smbfattr *fap, struct vnode **vpp) { struct vattr vattr; struct thread *td = curthread; /* XXX */ struct smbmount *smp = VFSTOSMBFS(mp); struct smbnode_hashhead *nhpp; struct smbnode *np, *np2, *dnp; struct vnode *vp; u_long hashval; int error; *vpp = NULL; if (smp->sm_root != NULL && dvp == NULL) { SMBERROR("do not allocate root vnode twice!\n"); return EINVAL; } if (nmlen == 2 && bcmp(name, "..", 2) == 0) { if (dvp == NULL) return EINVAL; vp = VTOSMB(VTOSMB(dvp)->n_parent)->n_vnode; error = vget(vp, LK_EXCLUSIVE, td); if (error == 0) *vpp = vp; return error; } else if (nmlen == 1 && name[0] == '.') { SMBERROR("do not call me with dot!\n"); return EINVAL; } dnp = dvp ? VTOSMB(dvp) : NULL; if (dnp == NULL && dvp != NULL) { vprint("smbfs_node_alloc: dead parent vnode", dvp); return EINVAL; } hashval = smbfs_hash(name, nmlen); retry: smbfs_hash_lock(smp); loop: nhpp = SMBFS_NOHASH(smp, hashval); LIST_FOREACH(np, nhpp, n_hash) { vp = SMBTOV(np); if (np->n_parent != dvp || np->n_nmlen != nmlen || bcmp(name, np->n_name, nmlen) != 0) continue; VI_LOCK(vp); smbfs_hash_unlock(smp); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) != 0) goto retry; /* Force cached attributes to be refreshed if stale. */ (void)VOP_GETATTR(vp, &vattr, td->td_ucred); /* * If the file type on the server is inconsistent with * what it was when we created the vnode, kill the * bogus vnode now and fall through to the code below * to create a new one with the right type. */ if ((vp->v_type == VDIR && (np->n_dosattr & SMB_FA_DIR) == 0) || (vp->v_type == VREG && (np->n_dosattr & SMB_FA_DIR) != 0)) { vgone(vp); vput(vp); break; } *vpp = vp; return 0; }
/* ARGSUSED */ int ufs_mknod(void *v) { struct vop_mknod_v3_args /* { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; } */ *ap = v; struct vattr *vap; struct vnode **vpp; struct inode *ip; int error; struct mount *mp; ino_t ino; struct ufs_lookup_results *ulr; vap = ap->a_vap; vpp = ap->a_vpp; /* XXX should handle this material another way */ ulr = &VTOI(ap->a_dvp)->i_crap; UFS_CHECK_CRAPCOUNTER(VTOI(ap->a_dvp)); /* * UFS_WAPBL_BEGIN1(dvp->v_mount, dvp) performed by successful * ufs_makeinode */ fstrans_start(ap->a_dvp->v_mount, FSTRANS_SHARED); if ((error = ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), ap->a_dvp, ulr, vpp, ap->a_cnp)) != 0) goto out; VN_KNOTE(ap->a_dvp, NOTE_WRITE); ip = VTOI(*vpp); mp = (*vpp)->v_mount; ino = ip->i_number; ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; if (vap->va_rdev != VNOVAL) { struct ufsmount *ump = ip->i_ump; /* * Want to be able to use this to make badblock * inodes, so don't truncate the dev number. */ if (ump->um_fstype == UFS1) ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev, UFS_MPNEEDSWAP(ump)); else ip->i_ffs2_rdev = ufs_rw64(vap->va_rdev, UFS_MPNEEDSWAP(ump)); } UFS_WAPBL_UPDATE(*vpp, NULL, NULL, 0); UFS_WAPBL_END1(ap->a_dvp->v_mount, ap->a_dvp); /* * Remove inode so that it will be reloaded by vcache_get and * checked to see if it is an alias of an existing entry in * the inode cache. */ (*vpp)->v_type = VNON; VOP_UNLOCK(*vpp); vgone(*vpp); error = vcache_get(mp, &ino, sizeof(ino), vpp); out: fstrans_done(ap->a_dvp->v_mount); if (error != 0) { *vpp = NULL; return (error); } return (0); }
int fdesc_allocvp(fdntype ftype, unsigned fd_fd, int ix, struct mount *mp, struct vnode **vpp) { struct fdescmount *fmp; struct fdhashhead *fc; struct fdescnode *fd, *fd2; struct vnode *vp, *vp2; struct thread *td; int error = 0; td = curthread; fc = FD_NHASH(ix); loop: mtx_lock(&fdesc_hashmtx); /* * If a forced unmount is progressing, we need to drop it. The flags are * protected by the hashmtx. */ fmp = (struct fdescmount *)mp->mnt_data; if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) { mtx_unlock(&fdesc_hashmtx); return (-1); } LIST_FOREACH(fd, fc, fd_hash) { if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) { /* Get reference to vnode in case it's being free'd */ vp = fd->fd_vnode; VI_LOCK(vp); mtx_unlock(&fdesc_hashmtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) goto loop; *vpp = vp; return (0); } } mtx_unlock(&fdesc_hashmtx); fd = malloc(sizeof(struct fdescnode), M_TEMP, M_WAITOK); error = getnewvnode("fdescfs", mp, &fdesc_vnodeops, &vp); if (error) { free(fd, M_TEMP); return (error); } vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_data = fd; fd->fd_vnode = vp; fd->fd_type = ftype; fd->fd_fd = fd_fd; fd->fd_ix = ix; error = insmntque1(vp, mp, fdesc_insmntque_dtr, NULL); if (error != 0) { *vpp = NULLVP; return (error); } /* Make sure that someone didn't beat us when inserting the vnode. */ mtx_lock(&fdesc_hashmtx); /* * If a forced unmount is progressing, we need to drop it. The flags are * protected by the hashmtx. */ fmp = (struct fdescmount *)mp->mnt_data; if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) { mtx_unlock(&fdesc_hashmtx); vgone(vp); vput(vp); *vpp = NULLVP; return (-1); } LIST_FOREACH(fd2, fc, fd_hash) { if (fd2->fd_ix == ix && fd2->fd_vnode->v_mount == mp) { /* Get reference to vnode in case it's being free'd */ vp2 = fd2->fd_vnode; VI_LOCK(vp2); mtx_unlock(&fdesc_hashmtx); error = vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, td); /* Someone beat us, dec use count and wait for reclaim */ vgone(vp); vput(vp); /* If we didn't get it, return no vnode. */ if (error) vp2 = NULLVP; *vpp = vp2; return (error); } } /* If we came here, we can insert it safely. */ LIST_INSERT_HEAD(fc, fd, fd_hash); mtx_unlock(&fdesc_hashmtx); *vpp = vp; return (0); }
/* * Initialize the vnode associated with a new inode, handle aliased * vnodes. */ int ufs_vinit(struct mount *mntp, struct vops *specops, struct vops *fifoops, struct vnode **vpp) { struct inode *ip; struct vnode *vp, *nvp; struct timeval mtv; vp = *vpp; ip = VTOI(vp); switch(vp->v_type = IFTOVT(DIP(ip, mode))) { case VCHR: case VBLK: vp->v_op = specops; if ((nvp = checkalias(vp, DIP(ip, rdev), mntp)) != NULL) { /* * Discard unneeded vnode, but save its inode. * Note that the lock is carried over in the inode * to the replacement vnode. */ nvp->v_data = vp->v_data; vp->v_data = NULL; vp->v_op = &spec_vops; #ifdef VFSLCKDEBUG vp->v_flag &= ~VLOCKSWORK; #endif vrele(vp); vgone(vp); /* * Reinitialize aliased inode. */ vp = nvp; ip->i_vnode = vp; } break; case VFIFO: #ifdef FIFO vp->v_op = fifoops; break; #else return (EOPNOTSUPP); #endif case VNON: case VBAD: case VSOCK: case VLNK: case VDIR: case VREG: break; } if (ip->i_number == ROOTINO) vp->v_flag |= VROOT; /* * Initialize modrev times */ getmicrouptime(&mtv); SETHIGH(ip->i_modrev, mtv.tv_sec); SETLOW(ip->i_modrev, mtv.tv_usec * 4294); *vpp = vp; return (0); }
int fusefs_mknod(void *v) { struct vop_mknod_args *ap = v; struct componentname *cnp = ap->a_cnp; struct vnode **vpp = ap->a_vpp; struct vnode *dvp = ap->a_dvp; struct vattr *vap = ap->a_vap; struct proc *p = cnp->cn_proc; struct vnode *tdp = NULL; struct fusefs_mnt *fmp; struct fusefs_node *ip; struct fusebuf *fbuf; int error = 0; ip = VTOI(dvp); fmp = (struct fusefs_mnt *)ip->ufs_ino.i_ump; if (!fmp->sess_init || (fmp->undef_op & UNDEF_MKNOD)) { error = ENOSYS; goto out; } fbuf = fb_setup(cnp->cn_namelen + 1, ip->ufs_ino.i_number, FBT_MKNOD, p); fbuf->fb_io_mode = MAKEIMODE(vap->va_type, vap->va_mode); if (vap->va_rdev != VNOVAL) fbuf->fb_io_rdev = vap->va_rdev; memcpy(fbuf->fb_dat, cnp->cn_nameptr, cnp->cn_namelen); fbuf->fb_dat[cnp->cn_namelen] = '\0'; error = fb_queue(fmp->dev, fbuf); if (error) { if (error == ENOSYS) fmp->undef_op |= UNDEF_MKNOD; fb_delete(fbuf); goto out; } if ((error = VFS_VGET(fmp->mp, fbuf->fb_ino, &tdp))) { fb_delete(fbuf); goto out; } tdp->v_type = IFTOVT(fbuf->fb_io_mode); VTOI(tdp)->vtype = tdp->v_type; if (dvp != NULL && dvp->v_type == VDIR) VTOI(tdp)->parent = ip->ufs_ino.i_number; *vpp = tdp; VN_KNOTE(ap->a_dvp, NOTE_WRITE); fb_delete(fbuf); vput(ap->a_dvp); /* Remove inode so that it will be reloaded by VFS_VGET and * checked to see if it is an alias of an existing entry in * the inode cache. */ vput(*vpp); (*vpp)->v_type = VNON; vgone(*vpp); *vpp = NULL; return (0); out: vput(ap->a_dvp); return (error); }