static int p9fs_mount(struct mount *mp) { struct p9fsmount *p9mp; struct p9fs_session *p9s; int error; error = EINVAL; if (vfs_filteropt(mp->mnt_optnew, p9_opts)) goto out; if (mp->mnt_flag & MNT_UPDATE) return (p9fs_mount_parse_opts(mp)); /* Allocate and initialize the private mount structure. */ p9mp = malloc(sizeof (struct p9fsmount), M_P9MNT, M_WAITOK | M_ZERO); mp->mnt_data = p9mp; p9mp->p9_mountp = mp; p9fs_init_session(&p9mp->p9_session); p9s = &p9mp->p9_session; p9s->p9s_mount = mp; error = p9fs_mount_parse_opts(mp); if (error != 0) goto out; error = p9fs_connect(mp); if (error != 0) { goto out; } /* Negotiate with the remote service. XXX: Add auth call. */ error = p9fs_client_version(p9s); if (error == 0) { /* Initialize the root vnode just before attaching. */ struct vnode *vp, *ivp; struct p9fs_node *np = &p9s->p9s_rootnp; np->p9n_fid = ROOTFID; np->p9n_session = p9s; error = getnewvnode("p9fs", mp, &p9fs_vnops, &vp); if (error == 0) { vn_lock(vp, LK_EXCLUSIVE); error = insmntque(vp, mp); } ivp = NULL; if (error == 0) error = vfs_hash_insert(vp, ROOTFID, LK_EXCLUSIVE, curthread, &ivp, NULL, NULL); if (error == 0 && ivp != NULL) error = EBUSY; if (error == 0) { np->p9n_vnode = vp; vp->v_data = np; vp->v_type = VDIR; vp->v_vflag |= VV_ROOT; VOP_UNLOCK(vp, 0); } } if (error == 0) error = p9fs_client_attach(p9s); if (error == 0) p9s->p9s_state = P9S_RUNNING; out: if (error != 0) (void) p9fs_unmount(mp, MNT_FORCE); return (error); }
int nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp) { struct nfsnode *np, *np2; struct nfsnodehashhead *nhpp; struct vnode *vp; int error; int lkflags; struct nfsmount *nmp; /* * Calculate nfs mount point and figure out whether the rslock should * be interruptable or not. */ nmp = VFSTONFS(mntp); if (nmp->nm_flag & NFSMNT_INT) lkflags = LK_PCATCH; else lkflags = 0; lwkt_gettoken(&nfsnhash_token); retry: nhpp = NFSNOHASH(fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT)); loop: for (np = nhpp->lh_first; np; np = np->n_hash.le_next) { if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize || bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize)) { continue; } vp = NFSTOV(np); if (vget(vp, LK_EXCLUSIVE)) goto loop; for (np = nhpp->lh_first; np; np = np->n_hash.le_next) { if (mntp == NFSTOV(np)->v_mount && np->n_fhsize == fhsize && bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize) == 0 ) { break; } } if (np == NULL || NFSTOV(np) != vp) { vput(vp); goto loop; } *npp = np; lwkt_reltoken(&nfsnhash_token); return(0); } /* * Obtain a lock to prevent a race condition if the getnewvnode() * or MALLOC() below happens to block. */ if (lockmgr(&nfsnhash_lock, LK_EXCLUSIVE | LK_SLEEPFAIL)) goto loop; /* * Allocate before getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced * elsewhere if objcache should block. */ np = objcache_get(nfsnode_objcache, M_WAITOK); error = getnewvnode(VT_NFS, mntp, &vp, 0, 0); if (error) { lockmgr(&nfsnhash_lock, LK_RELEASE); *npp = NULL; objcache_put(nfsnode_objcache, np); lwkt_reltoken(&nfsnhash_token); return (error); } /* * Initialize most of (np). */ bzero(np, sizeof (*np)); if (fhsize > NFS_SMALLFH) { MALLOC(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK); } else {
/* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int flags) { struct thread *td = curthread; /* XXX */ struct nfsnode *np; struct vnode *vp; struct vnode *nvp; int error; u_int hash; struct nfsmount *nmp; struct nfs_vncmp ncmp; nmp = VFSTONFS(mntp); *npp = NULL; hash = fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT); ncmp.fhsize = fhsize; ncmp.fh = fhp; error = vfs_hash_get(mntp, hash, flags, td, &nvp, nfs_vncmpf, &ncmp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); return (0); } /* * Allocate before getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced * elsewhere if zalloc should block. */ np = uma_zalloc(nfsnode_zone, M_WAITOK | M_ZERO); error = getnewvnode("nfs", mntp, &nfs_vnodeops, &nvp); if (error) { uma_zfree(nfsnode_zone, np); return (error); } vp = nvp; vp->v_bufobj.bo_ops = &buf_ops_nfs; vp->v_data = np; np->n_vnode = vp; /* * Initialize the mutex even if the vnode is going to be a loser. * This simplifies the logic in reclaim, which can then unconditionally * destroy the mutex (in the case of the loser, or if hash_insert happened * to return an error no special casing is needed). */ mtx_init(&np->n_mtx, "NFSnode lock", NULL, MTX_DEF); /* * NFS supports recursive and shared locking. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); if (fhsize > NFS_SMALLFH) { np->n_fhp = malloc(fhsize, M_NFSBIGFH, M_WAITOK); } else np->n_fhp = &np->n_fh; bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize); np->n_fhsize = fhsize; error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; if (np->n_fhsize > NFS_SMALLFH) { free((caddr_t)np->n_fhp, M_NFSBIGFH); } mtx_destroy(&np->n_mtx); uma_zfree(nfsnode_zone, np); return (error); } error = vfs_hash_insert(vp, hash, flags, td, &nvp, nfs_vncmpf, &ncmp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); /* vfs_hash_insert() vput()'s the losing vnode */ return (0); } *npp = np; return (0); }
/* ARGSUSED */ int mfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct vnode *devvp; struct mfs_args *args = data; struct ufsmount *ump; struct fs *fs; struct mfsnode *mfsp; struct proc *p; int flags, error = 0; if (args == NULL) return EINVAL; if (*data_len < sizeof *args) return EINVAL; p = l->l_proc; if (mp->mnt_flag & MNT_GETARGS) { struct vnode *vp; ump = VFSTOUFS(mp); if (ump == NULL) return EIO; vp = ump->um_devvp; if (vp == NULL) return EIO; mfsp = VTOMFS(vp); if (mfsp == NULL) return EIO; args->fspec = NULL; args->base = mfsp->mfs_baseoff; args->size = mfsp->mfs_size; *data_len = sizeof *args; return 0; } /* * XXX turn off async to avoid hangs when writing lots of data. * the problem is that MFS needs to allocate pages to clean pages, * so if we wait until the last minute to clean pages then there * may not be any pages available to do the cleaning. * ... and since the default partially-synchronous mode turns out * to not be sufficient under heavy load, make it full synchronous. */ mp->mnt_flag &= ~MNT_ASYNC; mp->mnt_flag |= MNT_SYNCHRONOUS; /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOUFS(mp); fs = ump->um_fs; if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ffs_flushfiles(mp, flags, l); if (error) return (error); } if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) fs->fs_ronly = 0; if (args->fspec == NULL) return EINVAL; return (0); } error = getnewvnode(VT_MFS, NULL, mfs_vnodeop_p, NULL, &devvp); if (error) return (error); devvp->v_vflag |= VV_MPSAFE; devvp->v_type = VBLK; spec_node_init(devvp, makedev(255, mfs_minor)); mfs_minor++; mfsp = kmem_alloc(sizeof(*mfsp), KM_SLEEP); devvp->v_data = mfsp; mfsp->mfs_baseoff = args->base; mfsp->mfs_size = args->size; mfsp->mfs_vnode = devvp; mfsp->mfs_proc = p; mfsp->mfs_shutdown = 0; cv_init(&mfsp->mfs_cv, "mfsidl"); mfsp->mfs_refcnt = 1; bufq_alloc(&mfsp->mfs_buflist, "fcfs", 0); if ((error = ffs_mountfs(devvp, mp, l)) != 0) { mfsp->mfs_shutdown = 1; vrele(devvp); return (error); } ump = VFSTOUFS(mp); fs = ump->um_fs; error = set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); if (error) return error; (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, sizeof(fs->fs_fsmnt)); fs->fs_fsmnt[sizeof(fs->fs_fsmnt) - 1] = '\0'; /* XXX: cleanup on error */ return 0; }
/* * VFS Operations. * * mount system call */ int mfs_mount(struct mount *mp, const char *path, void *data, struct nameidata *ndp, struct proc *p) { struct vnode *devvp; struct mfs_args args; struct ufsmount *ump; struct fs *fs; struct mfsnode *mfsp; size_t size; int flags, error; error = copyin(data, (caddr_t)&args, sizeof (struct mfs_args)); if (error) return (error); /* * If updating, check whether changing from read-only to * read/write; if there is no device name, that's all we do. */ if (mp->mnt_flag & MNT_UPDATE) { ump = VFSTOUFS(mp); fs = ump->um_fs; if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { flags = WRITECLOSE; if (mp->mnt_flag & MNT_FORCE) flags |= FORCECLOSE; error = ffs_flushfiles(mp, flags, p); if (error) return (error); } if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) fs->fs_ronly = 0; #ifdef EXPORTMFS if (args.fspec == 0) return (vfs_export(mp, &ump->um_export, &args.export_info)); #endif return (0); } error = getnewvnode(VT_MFS, NULL, &mfs_vops, &devvp); if (error) return (error); devvp->v_type = VBLK; if (checkalias(devvp, makedev(255, mfs_minor), (struct mount *)0)) panic("mfs_mount: dup dev"); mfs_minor++; mfsp = malloc(sizeof *mfsp, M_MFSNODE, M_WAITOK); mfsp->mfs_dying = 0; devvp->v_data = mfsp; mfsp->mfs_baseoff = args.base; mfsp->mfs_size = args.size; mfsp->mfs_vnode = devvp; mfsp->mfs_pid = p->p_pid; bufq_init(&mfsp->mfs_bufq, BUFQ_FIFO); if ((error = ffs_mountfs(devvp, mp, p)) != 0) { mfsp->mfs_dying = 1; vrele(devvp); return (error); } ump = VFSTOUFS(mp); fs = ump->um_fs; (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); bcopy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MNAMELEN); (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); bcopy(&args, &mp->mnt_stat.mount_info.mfs_args, sizeof(args)); return (0); }
int fdesc_allocvp(fdntype ftype, unsigned fd_fd, int ix, struct mount *mp, struct vnode **vpp) { struct fdescmount *fmp; struct fdhashhead *fc; struct fdescnode *fd, *fd2; struct vnode *vp, *vp2; struct thread *td; int error = 0; td = curthread; fc = FD_NHASH(ix); loop: mtx_lock(&fdesc_hashmtx); /* * If a forced unmount is progressing, we need to drop it. The flags are * protected by the hashmtx. */ fmp = (struct fdescmount *)mp->mnt_data; if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) { mtx_unlock(&fdesc_hashmtx); return (-1); } LIST_FOREACH(fd, fc, fd_hash) { if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) { /* Get reference to vnode in case it's being free'd */ vp = fd->fd_vnode; VI_LOCK(vp); mtx_unlock(&fdesc_hashmtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) goto loop; *vpp = vp; return (0); } } mtx_unlock(&fdesc_hashmtx); fd = malloc(sizeof(struct fdescnode), M_TEMP, M_WAITOK); error = getnewvnode("fdescfs", mp, &fdesc_vnodeops, &vp); if (error) { free(fd, M_TEMP); return (error); } vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_data = fd; fd->fd_vnode = vp; fd->fd_type = ftype; fd->fd_fd = fd_fd; fd->fd_ix = ix; error = insmntque1(vp, mp, fdesc_insmntque_dtr, NULL); if (error != 0) { *vpp = NULLVP; return (error); } /* Make sure that someone didn't beat us when inserting the vnode. */ mtx_lock(&fdesc_hashmtx); /* * If a forced unmount is progressing, we need to drop it. The flags are * protected by the hashmtx. */ fmp = (struct fdescmount *)mp->mnt_data; if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) { mtx_unlock(&fdesc_hashmtx); vgone(vp); vput(vp); *vpp = NULLVP; return (-1); } LIST_FOREACH(fd2, fc, fd_hash) { if (fd2->fd_ix == ix && fd2->fd_vnode->v_mount == mp) { /* Get reference to vnode in case it's being free'd */ vp2 = fd2->fd_vnode; VI_LOCK(vp2); mtx_unlock(&fdesc_hashmtx); error = vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, td); /* Someone beat us, dec use count and wait for reclaim */ vgone(vp); vput(vp); /* If we didn't get it, return no vnode. */ if (error) vp2 = NULLVP; *vpp = vp2; return (error); } } /* If we came here, we can insert it safely. */ LIST_INSERT_HEAD(fc, fd, fd_hash); mtx_unlock(&fdesc_hashmtx); *vpp = vp; return (0); }
/* * If xtaf_deget() succeeds it returns with the gotten denode locked(). * * pmp - address of xtafmount structure of the filesystem containing * the denode of interest. The address of the xtafmount structure * is used. * dirclust - which cluster bp contains, if dirclust is 0 (root directory) * diroffset is relative to the beginning of the root directory, * otherwise it is cluster relative. This is the cluster this directory entry came from. * diroffset - offset past begin of cluster of denode we want * depp - returns the address of the gotten denode. */ int xtaf_deget(struct xtafmount *pmp, u_long dirclust, u_long diroffset, struct denode **depp) { int error; uint32_t inode; struct mount *mntp = pmp->pm_mountp; struct direntry *ep; struct denode *ldep; struct vnode *nvp, *xvp; struct buf *bp; #ifdef XTAF_DEBUG printf("xtaf_deget(pmp %p, dirclust %lx, diroffset %lx, depp %p)\n", pmp, dirclust, diroffset, depp); #endif /* * See if the denode is in the denode cache. Use the location of * the directory entry to compute the hash value. For subdir use * address of the first dir entry. For root dir use cluster XTAFROOT, * offset XTAFROOT_OFS */ inode = (uint32_t)pmp->pm_bpcluster * dirclust + diroffset; error = vfs_hash_get(mntp, inode, LK_EXCLUSIVE, curthread, &nvp, de_vncmpf, &inode); if (error) return (error); if (nvp != NULL) { *depp = VTODE(nvp); KASSERT((*depp)->de_dirclust == dirclust, ("wrong dirclust")); KASSERT((*depp)->de_diroffset == diroffset, ("wrong diroffset")); return (0); } /* * Do the malloc before the getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced * elsewhere if malloc should block. */ ldep = malloc(sizeof(struct denode), M_XTAFNODE, M_WAITOK | M_ZERO); /* * Directory entry was not in cache, have to create a vnode and * copy it from the passed disk buffer. */ /* getnewvnode() does a VREF() on the vnode */ error = getnewvnode("xtaf", mntp, &xtaf_vnodeops, &nvp); if (error) { *depp = NULL; free(ldep, M_XTAFNODE); return error; } nvp->v_data = ldep; ldep->de_vnode = nvp; ldep->de_flag = 0; ldep->de_dirclust = dirclust; ldep->de_diroffset = diroffset; ldep->de_inode = inode; lockmgr(nvp->v_vnlock, LK_EXCLUSIVE, NULL); xtaf_fc_purge(ldep, 0); /* init the fat cache for this denode */ error = insmntque(nvp, mntp); if (error != 0) { free(ldep, M_XTAFNODE); *depp = NULL; return (error); } error = vfs_hash_insert(nvp, inode, LK_EXCLUSIVE, curthread, &xvp, de_vncmpf, &inode); if (error) { *depp = NULL; return (error); } if (xvp != NULL) { *depp = xvp->v_data; return (0); } ldep->de_pmp = pmp; ldep->de_refcnt = 1; #ifdef XTAF_DEBUG printf("xtaf_deget(): dirclust=%lx diroffset=%lx\n", dirclust, diroffset); #endif /* * Copy the directory entry into the denode area of the vnode. */ if (dirclust == XTAFROOT && diroffset == XTAFROOT_OFS) { /* * Directory entry for the root directory. There isn't one, * so we manufacture one. */ nvp->v_vflag |= VV_ROOT; FAKEDIR(ldep, pmp->pm_rootdirsize, XTAFROOT); #ifdef XTAF_DEBUG printf("xtaf_deget(): FAKEDIR root\n"); #endif } else { error = xtaf_readep(pmp, dirclust, diroffset, &bp, &ep); if (error) { /* * The denode does not contain anything useful, so * it would be wrong to leave it on its hash chain. * Arrange for vput() to just forget about it. */ ldep->de_Length = LEN_DELETED; vput(nvp); *depp = NULL; return (error); } DE_INTERNALIZE(ldep, ep); brelse(bp); } /* * Fill in a few fields of the vnode and finish filling in the * denode. Then return the address of the found denode. */ if (ldep->de_Attributes & ATTR_DIRECTORY) { /* * Since XTAF directory entries that describe directories * have 0 in the filesize field, we take this opportunity * to find out the length of the directory and plug it into * the denode structure. */ u_long size; #ifdef XTAF_DEBUG /* FIXME something goes wrong here, StartCluster shouldn't * be 0xffffffff ? Filename also gets blanked here and * SF_IMMUTABLE set */ printf("ldep->de_StartCluster = %lx dirclust = %lx\n", ldep->de_StartCluster, dirclust); #endif nvp->v_type = VDIR; if (ldep->de_StartCluster != XTAFROOT) { error = xtaf_pcbmap(ldep, 0xffff, 0, &size, 0); if (error == E2BIG) { ldep->de_FileSize = de_cn2off(pmp, size); error = 0; } else printf("xtaf_deget(): xtaf_pcbmap returned " "%d\n", error); } } else nvp->v_type = VREG; ldep->de_modrev = init_va_filerev(); *depp = ldep; return (0); }
/* * Look up a EXT2FS dinode number to find its incore vnode, otherwise read it * in from disk. If it is in core, wait for the lock bit to clear, then * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ int ext2fs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) { struct m_ext2fs *fs; struct inode *ip; struct ufsmount *ump; struct buf *bp; struct vnode *vp; dev_t dev; int error; void *cp; ump = VFSTOUFS(mp); dev = ump->um_dev; retry: if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) return (0); /* Allocate a new vnode/inode. */ error = getnewvnode(VT_EXT2FS, mp, ext2fs_vnodeop_p, NULL, &vp); if (error) { *vpp = NULL; return (error); } ip = pool_get(&ext2fs_inode_pool, PR_WAITOK); mutex_enter(&ufs_hashlock); if ((*vpp = ufs_ihashget(dev, ino, 0)) != NULL) { mutex_exit(&ufs_hashlock); ungetnewvnode(vp); pool_put(&ext2fs_inode_pool, ip); goto retry; } vp->v_vflag |= VV_LOCKSWORK; memset(ip, 0, sizeof(struct inode)); vp->v_data = ip; ip->i_vnode = vp; ip->i_ump = ump; ip->i_e2fs = fs = ump->um_e2fs; ip->i_dev = dev; ip->i_number = ino; ip->i_e2fs_last_lblk = 0; ip->i_e2fs_last_blk = 0; genfs_node_init(vp, &ext2fs_genfsops); /* * Put it onto its hash chain and lock it so that other requests for * this inode will block if they arrive while we are sleeping waiting * for old data structures to be purged or for the contents of the * disk portion of this inode to be read. */ ufs_ihashins(ip); mutex_exit(&ufs_hashlock); /* Read in the disk contents for the inode, copy into the inode. */ error = bread(ump->um_devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ino)), (int)fs->e2fs_bsize, NOCRED, 0, &bp); if (error) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ vput(vp); *vpp = NULL; return (error); } cp = (char *)bp->b_data + (ino_to_fsbo(fs, ino) * EXT2_DINODE_SIZE(fs)); ip->i_din.e2fs_din = pool_get(&ext2fs_dinode_pool, PR_WAITOK); e2fs_iload((struct ext2fs_dinode *)cp, ip->i_din.e2fs_din); ext2fs_set_inode_guid(ip); brelse(bp, 0); /* If the inode was deleted, reset all fields */ if (ip->i_e2fs_dtime != 0) { ip->i_e2fs_mode = 0; (void)ext2fs_setsize(ip, 0); (void)ext2fs_setnblock(ip, 0); memset(ip->i_e2fs_blocks, 0, sizeof(ip->i_e2fs_blocks)); } /* * Initialize the vnode from the inode, check for aliases. */ error = ext2fs_vinit(mp, ext2fs_specop_p, ext2fs_fifoop_p, &vp); if (error) { vput(vp); *vpp = NULL; return (error); } /* * Finish inode initialization now that aliasing has been resolved. */ ip->i_devvp = ump->um_devvp; vref(ip->i_devvp); /* * Set up a generation number for this inode if it does not * already have one. This should only happen on old filesystems. */ if (ip->i_e2fs_gen == 0) { if (++ext2gennumber < (u_long)time_second) ext2gennumber = time_second; ip->i_e2fs_gen = ext2gennumber; if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) ip->i_flag |= IN_MODIFIED; } uvm_vnp_setsize(vp, ext2fs_size(ip)); *vpp = vp; return (0); }
static int fuse_vnode_alloc(struct mount *mp, struct thread *td, uint64_t nodeid, enum vtype vtyp, struct vnode **vpp) { const int lkflags = LK_EXCLUSIVE | LK_RETRY; struct fuse_vnode_data *fvdat; struct vnode *vp2; int err = 0; DEBUG("been asked for vno #%ju\n", (uintmax_t)nodeid); if (vtyp == VNON) { return EINVAL; } *vpp = NULL; err = vfs_hash_get(mp, fuse_vnode_hash(nodeid), lkflags, td, vpp, fuse_vnode_cmp, &nodeid); if (err) return (err); if (*vpp) { MPASS((*vpp)->v_type == vtyp && (*vpp)->v_data != NULL); DEBUG("vnode taken from hash\n"); return (0); } fvdat = malloc(sizeof(*fvdat), M_FUSEVN, M_WAITOK | M_ZERO); err = getnewvnode("fuse", mp, &fuse_vnops, vpp); if (err) { free(fvdat, M_FUSEVN); return (err); } vn_lock(*vpp, lkflags); err = insmntque(*vpp, mp); ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc"); if (err) { VOP_UNLOCK(*vpp, 0); free(fvdat, M_FUSEVN); *vpp = NULL; return (err); } fuse_vnode_init(*vpp, fvdat, nodeid, vtyp); err = vfs_hash_insert(*vpp, fuse_vnode_hash(nodeid), lkflags, td, &vp2, fuse_vnode_cmp, &nodeid); if (err) { VOP_UNLOCK(*vpp, 0); fuse_vnode_destroy(*vpp); *vpp = NULL; return (err); } /* * XXXIP: Prevent silent vnode reuse. It may happen because several fuse * filesystems ignore inode numbers */ KASSERT(vp2 == NULL, ("vfs hash collision for node #%ju\n", (uintmax_t)nodeid)); ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc"); return (0); }
/* * Make a new or get existing pefs node. * vp is the alias vnode * lvp is the lower vnode * ldvp is the lower directory vnode, used if no key specified * * The lvp assumed to be locked and having "spare" reference. This routine * vrele lvp if pefs node was taken from hash. Otherwise it "transfers" the * caller's "spare" reference to created pefs vnode. */ static int pefs_node_get(struct mount *mp, struct vnode *lvp, struct vnode **vpp, pefs_node_init_fn *init_fn, void *context) { struct pefs_node *pn; struct vnode *vp; int error; ASSERT_VOP_LOCKED(lvp, "pefs_node_get"); /* Lookup the hash firstly */ *vpp = pefs_nodehash_get(mp, lvp); if (*vpp != NULL) { vrele(lvp); return (0); } /* * We do not serialize vnode creation, instead we will check for * duplicates later, when adding new vnode to hash. * * Note that duplicate can only appear in hash if the lvp is * locked LK_SHARED. */ /* * Do the MALLOC before the getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced * elsewhere if MALLOC should block. */ pn = uma_zalloc(pefs_node_zone, M_WAITOK | M_ZERO); pn->pn_lowervp = lvp; /* pn->pn_lowervp should be initialized before calling init_fn. */ error = init_fn(mp, pn, context); MPASS(!(((pn->pn_flags & PN_HASKEY) == 0) ^ (pn->pn_tkey.ptk_key == NULL))); if (error != 0) { uma_zfree(pefs_node_zone, pn); return (error); } error = getnewvnode("pefs", mp, &pefs_vnodeops, &vp); if (error != 0) { pefs_key_release(pn->pn_tkey.ptk_key); uma_zfree(pefs_node_zone, pn); return (error); } if (pn->pn_tkey.ptk_key == NULL) PEFSDEBUG("pefs_node_get: creating node without key: %p\n", pn); pn->pn_vnode = vp; vp->v_type = lvp->v_type; vp->v_data = pn; vp->v_vnlock = lvp->v_vnlock; if (vp->v_vnlock == NULL) panic("pefs_node_get: Passed a NULL vnlock.\n"); error = insmntque1(vp, mp, pefs_insmntque_dtr, pn); if (error != 0) return (error); /* * Atomically insert our new node into the hash or vget existing * if someone else has beaten us to it. */ *vpp = pefs_nodehash_insert(mp, pn); if (*vpp != NULL) { vrele(lvp); vp->v_vnlock = &vp->v_lock; pn->pn_lowervp = NULL; vrele(vp); MPASS(PEFS_LOWERVP(*vpp) == lvp); ASSERT_VOP_LOCKED(*vpp, "pefs_node_get: duplicate"); return (0); } if (vp->v_type == VDIR) pn->pn_dircache = pefs_dircache_get(); *vpp = vp; MPASS(PEFS_LOWERVP(*vpp) == lvp); ASSERT_VOP_LOCKED(*vpp, "pefs_node_get"); return (0); }
/* * Allocate a vnode */ int pfs_vncache_alloc(struct mount *mp, struct vnode **vpp, struct pfs_node *pn, pid_t pid) { struct pfs_vdata *pvd, *pvd2; struct vnode *vp; int error; /* * See if the vnode is in the cache. * XXX linear search is not very efficient. */ retry: mtx_lock(&pfs_vncache_mutex); for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) { if (pvd->pvd_pn == pn && pvd->pvd_pid == pid && pvd->pvd_vnode->v_mount == mp) { vp = pvd->pvd_vnode; VI_LOCK(vp); mtx_unlock(&pfs_vncache_mutex); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) { ++pfs_vncache_hits; *vpp = vp; /* * Some callers cache_enter(vp) later, so * we have to make sure it's not in the * VFS cache so it doesn't get entered * twice. A better solution would be to * make pfs_vncache_alloc() responsible * for entering the vnode in the VFS * cache. */ cache_purge(vp); return (0); } goto retry; } } mtx_unlock(&pfs_vncache_mutex); /* nope, get a new one */ pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK); pvd->pvd_next = pvd->pvd_prev = NULL; error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp); if (error) { free(pvd, M_PFSVNCACHE); return (error); } pvd->pvd_pn = pn; pvd->pvd_pid = pid; (*vpp)->v_data = pvd; switch (pn->pn_type) { case pfstype_root: (*vpp)->v_vflag = VV_ROOT; #if 0 printf("root vnode allocated\n"); #endif /* fall through */ case pfstype_dir: case pfstype_this: case pfstype_parent: case pfstype_procdir: (*vpp)->v_type = VDIR; break; case pfstype_file: (*vpp)->v_type = VREG; break; case pfstype_symlink: (*vpp)->v_type = VLNK; break; case pfstype_none: KASSERT(0, ("pfs_vncache_alloc called for null node\n")); default: panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type); } /* * Propagate flag through to vnode so users know it can change * if the process changes (i.e. execve) */ if ((pn->pn_flags & PFS_PROCDEP) != 0) (*vpp)->v_vflag |= VV_PROCDEP; pvd->pvd_vnode = *vpp; vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY); VN_LOCK_AREC(*vpp); error = insmntque(*vpp, mp); if (error != 0) { free(pvd, M_PFSVNCACHE); *vpp = NULLVP; return (error); } retry2: mtx_lock(&pfs_vncache_mutex); /* * Other thread may race with us, creating the entry we are * going to insert into the cache. Recheck after * pfs_vncache_mutex is reacquired. */ for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) { if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid && pvd2->pvd_vnode->v_mount == mp) { vp = pvd2->pvd_vnode; VI_LOCK(vp); mtx_unlock(&pfs_vncache_mutex); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) { ++pfs_vncache_hits; vgone(*vpp); vput(*vpp); *vpp = vp; cache_purge(vp); return (0); } goto retry2; } } ++pfs_vncache_misses; if (++pfs_vncache_entries > pfs_vncache_maxentries) pfs_vncache_maxentries = pfs_vncache_entries; pvd->pvd_prev = NULL; pvd->pvd_next = pfs_vncache; if (pvd->pvd_next) pvd->pvd_next->pvd_prev = pvd; pfs_vncache = pvd; mtx_unlock(&pfs_vncache_mutex); return (0); }
/* * Get a p9node. Nodes are represented by (fid, qid) tuples in 9P2000. * Fids are assigned by the client, while qids are assigned by the server. * * The caller is expected to have generated the FID via p9fs_getfid() and * obtained the QID from the server via p9fs_client_walk() and friends. */ int p9fs_nget(struct p9fs_session *p9s, uint32_t fid, struct p9fs_qid *qid, int lkflags, struct p9fs_node **npp) { int error = 0; struct p9fs_node *np; struct vnode *vp, *nvp; struct vattr vattr = {}; struct thread *td = curthread; *npp = NULL; error = vfs_hash_get(p9s->p9s_mount, fid, lkflags, td, &vp, NULL, NULL); if (error != 0) return (error); if (vp != NULL) { *npp = vp->v_data; return (0); } np = malloc(sizeof (struct p9fs_node), M_P9NODE, M_WAITOK | M_ZERO); getnewvnode_reserve(1); error = getnewvnode("p9fs", p9s->p9s_mount, &p9fs_vnops, &nvp); if (error != 0) { getnewvnode_drop_reserve(); free(np, M_P9NODE); return (error); } vp = nvp; vn_lock(vp, LK_EXCLUSIVE); error = insmntque(nvp, p9s->p9s_mount); if (error != 0) { /* vp was vput()'d by insmntque() */ free(np, M_P9NODE); return (error); } error = vfs_hash_insert(nvp, fid, lkflags, td, &nvp, NULL, NULL); if (error != 0) { free(np, M_P9NODE); return (error); } if (nvp != NULL) { free(np, M_P9NODE); *npp = nvp->v_data; /* vp was vput()'d by vfs_hash_insert() */ return (0); } error = p9fs_client_stat(p9s, fid, &vattr); if (error != 0) { free(np, M_P9NODE); return (error); } /* Our vnode is the winner. Set up the new p9node for it. */ vp->v_type = vattr.va_type; vp->v_data = np; np->p9n_fid = fid; np->p9n_session = p9s; np->p9n_vnode = vp; bcopy(qid, &np->p9n_qid, sizeof (*qid)); *npp = np; return (error); }
/* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int lkflags) { struct nfsnode *np; struct vnode *vp; struct nfsmount *nmp = VFSTONFS(mntp); int error; struct fh_match fhm; fhm.fhm_fhp = fhp; fhm.fhm_fhsize = fhsize; loop: rw_enter(&nmp->nm_rbtlock, RW_READER); np = rb_tree_find_node(&nmp->nm_rbtree, &fhm); if (np != NULL) { vp = NFSTOV(np); mutex_enter(vp->v_interlock); rw_exit(&nmp->nm_rbtlock); error = vget(vp, LK_EXCLUSIVE | lkflags); if (error == EBUSY) return error; if (error) goto loop; *npp = np; return(0); } rw_exit(&nmp->nm_rbtlock); error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp); if (error) { *npp = 0; return (error); } np = pool_get(&nfs_node_pool, PR_WAITOK); memset(np, 0, sizeof *np); np->n_vnode = vp; /* * Insert the nfsnode in the hash queue for its new file handle */ if (fhsize > NFS_SMALLFH) { np->n_fhp = kmem_alloc(fhsize, KM_SLEEP); } else np->n_fhp = &np->n_fh; memcpy(np->n_fhp, fhp, fhsize); np->n_fhsize = fhsize; np->n_accstamp = -1; np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK); rw_enter(&nmp->nm_rbtlock, RW_WRITER); if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) { rw_exit(&nmp->nm_rbtlock); if (fhsize > NFS_SMALLFH) { kmem_free(np->n_fhp, fhsize); } pool_put(&nfs_vattr_pool, np->n_vattr); pool_put(&nfs_node_pool, np); ungetnewvnode(vp); goto loop; } vp->v_data = np; genfs_node_init(vp, &nfs_genfsops); /* * Initalize read/write creds to useful values. VOP_OPEN will * overwrite these. */ np->n_rcred = curlwp->l_cred; kauth_cred_hold(np->n_rcred); np->n_wcred = curlwp->l_cred; kauth_cred_hold(np->n_wcred); VOP_LOCK(vp, LK_EXCLUSIVE); NFS_INVALIDATE_ATTRCACHE(np); uvm_vnp_setsize(vp, 0); (void)rb_tree_insert_node(&nmp->nm_rbtree, np); rw_exit(&nmp->nm_rbtlock); *npp = np; return (0); }
int ntfs_vgetex( struct mount *mp, ino_t ino, u_int32_t attrtype, char *attrname, u_long lkflags, u_long flags, struct vnode **vpp) { int error; struct ntfsmount *ntmp; struct ntnode *ip; struct fnode *fp; struct vnode *vp; enum vtype f_type = VBAD; dprintf(("ntfs_vgetex: ino: %llu, attr: 0x%x:%s, lkf: 0x%lx, f:" " 0x%lx\n", (unsigned long long)ino, attrtype, attrname ? attrname : "", (u_long)lkflags, (u_long)flags)); ntmp = VFSTONTFS(mp); *vpp = NULL; loop: /* Get ntnode */ error = ntfs_ntlookup(ntmp, ino, &ip); if (error) { printf("ntfs_vget: ntfs_ntget failed\n"); return (error); } /* It may be not initialized fully, so force load it */ if (!(flags & VG_DONTLOADIN) && !(ip->i_flag & IN_LOADED)) { error = ntfs_loadntnode(ntmp, ip); if(error) { printf("ntfs_vget: CAN'T LOAD ATTRIBUTES FOR INO:" " %llu\n", (unsigned long long)ip->i_number); ntfs_ntput(ip); return (error); } } error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp); if (error) { printf("ntfs_vget: ntfs_fget failed\n"); ntfs_ntput(ip); return (error); } if (!(flags & VG_DONTVALIDFN) && !(fp->f_flag & FN_VALID)) { if ((ip->i_frflag & NTFS_FRFLAG_DIR) && (fp->f_attrtype == NTFS_A_DATA && fp->f_attrname == NULL)) { f_type = VDIR; } else if (flags & VG_EXT) { f_type = VNON; fp->f_size = fp->f_allocated = 0; } else { f_type = VREG; error = ntfs_filesize(ntmp, fp, &fp->f_size, &fp->f_allocated); if (error) { ntfs_ntput(ip); return (error); } } fp->f_flag |= FN_VALID; } /* * We may be calling vget() now. To avoid potential deadlock, we need * to release ntnode lock, since due to locking order vnode * lock has to be acquired first. * ntfs_fget() bumped ntnode usecount, so ntnode won't be recycled * prematurely. * Take v_interlock before releasing ntnode lock to avoid races. */ vp = FTOV(fp); if (vp) { mutex_enter(vp->v_interlock); ntfs_ntput(ip); if (vget(vp, lkflags) != 0) goto loop; *vpp = vp; return 0; } ntfs_ntput(ip); error = getnewvnode(VT_NTFS, ntmp->ntm_mountp, ntfs_vnodeop_p, NULL, &vp); if(error) { ntfs_frele(fp); return (error); } ntfs_ntget(ip); error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp); if (error) { printf("ntfs_vget: ntfs_fget failed\n"); ntfs_ntput(ip); return (error); } if (FTOV(fp)) { /* * Another thread beat us, put back freshly allocated * vnode and retry. */ ntfs_ntput(ip); ungetnewvnode(vp); goto loop; } dprintf(("ntfs_vget: vnode: %p for ntnode: %llu\n", vp, (unsigned long long)ino)); fp->f_vp = vp; vp->v_data = fp; if (f_type != VBAD) vp->v_type = f_type; genfs_node_init(vp, &ntfs_genfsops); if (ino == NTFS_ROOTINO) vp->v_vflag |= VV_ROOT; ntfs_ntput(ip); if (lkflags & (LK_EXCLUSIVE | LK_SHARED)) { error = vn_lock(vp, lkflags); if (error) { vput(vp); return (error); } } uvm_vnp_setsize(vp, fp->f_size); /* XXX: mess, cf. ntfs_lookupfile() */ vref(ip->i_devvp); *vpp = vp; return (0); }
/* * Get the vnode associated with the given inode, allocating the vnode if * necessary. The vnode will be returned exclusively locked. * * *errorp is set to a UNIX error, not a HAMMER2 error. * * The caller must lock the inode (shared or exclusive). * * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim * races. */ struct vnode * hammer2_igetv(hammer2_inode_t *ip, int *errorp) { hammer2_pfs_t *pmp; struct vnode *vp; pmp = ip->pmp; KKASSERT(pmp != NULL); *errorp = 0; for (;;) { /* * Attempt to reuse an existing vnode assignment. It is * possible to race a reclaim so the vget() may fail. The * inode must be unlocked during the vget() to avoid a * deadlock against a reclaim. */ int wasexclusive; vp = ip->vp; if (vp) { /* * Inode must be unlocked during the vget() to avoid * possible deadlocks, but leave the ip ref intact. * * vnode is held to prevent destruction during the * vget(). The vget() can still fail if we lost * a reclaim race on the vnode. */ hammer2_mtx_state_t ostate; vhold(vp); ostate = hammer2_inode_lock_temp_release(ip); if (vget(vp, LK_EXCLUSIVE)) { vdrop(vp); hammer2_inode_lock_temp_restore(ip, ostate); continue; } hammer2_inode_lock_temp_restore(ip, ostate); vdrop(vp); /* vp still locked and ref from vget */ if (ip->vp != vp) { kprintf("hammer2: igetv race %p/%p\n", ip->vp, vp); vput(vp); continue; } *errorp = 0; break; } /* * No vnode exists, allocate a new vnode. Beware of * allocation races. This function will return an * exclusively locked and referenced vnode. */ *errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0); if (*errorp) { kprintf("hammer2: igetv getnewvnode failed %d\n", *errorp); vp = NULL; break; } /* * Lock the inode and check for an allocation race. */ wasexclusive = hammer2_inode_lock_upgrade(ip); if (ip->vp != NULL) { vp->v_type = VBAD; vx_put(vp); hammer2_inode_lock_downgrade(ip, wasexclusive); continue; } switch (ip->meta.type) { case HAMMER2_OBJTYPE_DIRECTORY: vp->v_type = VDIR; break; case HAMMER2_OBJTYPE_REGFILE: /* * Regular file must use buffer cache I/O * (VKVABIO cpu sync semantics supported) */ vp->v_type = VREG; vsetflags(vp, VKVABIO); vinitvmio(vp, ip->meta.size, HAMMER2_LBUFSIZE, (int)ip->meta.size & HAMMER2_LBUFMASK); break; case HAMMER2_OBJTYPE_SOFTLINK: /* * XXX for now we are using the generic file_read * and file_write code so we need a buffer cache * association. * * (VKVABIO cpu sync semantics supported) */ vp->v_type = VLNK; vsetflags(vp, VKVABIO); vinitvmio(vp, ip->meta.size, HAMMER2_LBUFSIZE, (int)ip->meta.size & HAMMER2_LBUFMASK); break; case HAMMER2_OBJTYPE_CDEV: vp->v_type = VCHR; /* fall through */ case HAMMER2_OBJTYPE_BDEV: vp->v_ops = &pmp->mp->mnt_vn_spec_ops; if (ip->meta.type != HAMMER2_OBJTYPE_CDEV) vp->v_type = VBLK; addaliasu(vp, ip->meta.rmajor, ip->meta.rminor); break; case HAMMER2_OBJTYPE_FIFO: vp->v_type = VFIFO; vp->v_ops = &pmp->mp->mnt_vn_fifo_ops; break; case HAMMER2_OBJTYPE_SOCKET: vp->v_type = VSOCK; break; default: panic("hammer2: unhandled objtype %d", ip->meta.type); break; } if (ip == pmp->iroot) vsetflags(vp, VROOT); vp->v_data = ip; ip->vp = vp; hammer2_inode_ref(ip); /* vp association */ hammer2_inode_lock_downgrade(ip, wasexclusive); break; } /* * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0. */ if (hammer2_debug & 0x0002) { kprintf("igetv vp %p refs 0x%08x aux 0x%08x\n", vp, vp->v_refcnt, vp->v_auxrefs); } return (vp); }
/* * Mount the per-process file descriptors (/dev/fd) */ static int portal_mount(struct mount *mp) { struct file *fp; struct portalmount *fmp; struct socket *so; struct vnode *rvp; struct thread *td; struct portalnode *pn; int error, v; char *p; td = curthread; if (vfs_filteropt(mp->mnt_optnew, portal_opts)) return (EINVAL); error = vfs_scanopt(mp->mnt_optnew, "socket", "%d", &v); if (error != 1) return (EINVAL); error = vfs_getopt(mp->mnt_optnew, "config", (void **)&p, NULL); if (error) return (error); /* * Capsicum is not incompatible with portalfs, but we don't really * know what rights are required. In the spirit of "better safe than * sorry", pretend that all rights are required for now. */ if ((error = fget(td, v, CAP_MASK_VALID, &fp)) != 0) return (error); if (fp->f_type != DTYPE_SOCKET) { fdrop(fp, td); return(ENOTSOCK); } so = fp->f_data; /* XXX race against userland */ if (so->so_proto->pr_domain->dom_family != AF_UNIX) { fdrop(fp, td); return (ESOCKTNOSUPPORT); } pn = malloc(sizeof(struct portalnode), M_TEMP, M_WAITOK); fmp = malloc(sizeof(struct portalmount), M_PORTALFSMNT, M_WAITOK); /* XXX */ error = getnewvnode("portal", mp, &portal_vnodeops, &rvp); /* XXX */ if (error) { free(fmp, M_PORTALFSMNT); free(pn, M_TEMP); fdrop(fp, td); return (error); } error = insmntque(rvp, mp); /* XXX: Too early for mpsafe fs */ if (error != 0) { free(fmp, M_PORTALFSMNT); free(pn, M_TEMP); fdrop(fp, td); return (error); } rvp->v_data = pn; rvp->v_type = VDIR; rvp->v_vflag |= VV_ROOT; VTOPORTAL(rvp)->pt_arg = 0; VTOPORTAL(rvp)->pt_size = 0; VTOPORTAL(rvp)->pt_fileid = PORTAL_ROOTFILEID; fmp->pm_root = rvp; fhold(fp); fmp->pm_server = fp; MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; MNT_IUNLOCK(mp); mp->mnt_data = fmp; vfs_getnewfsid(mp); vfs_mountedfrom(mp, p); fdrop(fp, td); return (0); }
/* * Make a new or get existing unionfs node. * * uppervp and lowervp should be unlocked. Because if new unionfs vnode is * locked, uppervp or lowervp is locked too. In order to prevent dead lock, * you should not lock plurality simultaneously. */ int unionfs_nodeget(struct mount *mp, struct vnode *uppervp, struct vnode *lowervp, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct thread *td) { struct unionfs_mount *ump; struct unionfs_node *unp; struct vnode *vp; int error; int lkflags; enum vtype vt; char *path; ump = MOUNTTOUNIONFSMOUNT(mp); lkflags = (cnp ? cnp->cn_lkflags : 0); path = (cnp ? cnp->cn_nameptr : NULL); *vpp = NULLVP; if (uppervp == NULLVP && lowervp == NULLVP) panic("unionfs_nodeget: upper and lower is null"); vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type); /* If it has no ISLASTCN flag, path check is skipped. */ if (cnp && !(cnp->cn_flags & ISLASTCN)) path = NULL; /* check the cache */ if (path != NULL && dvp != NULLVP && vt == VDIR) { vp = unionfs_get_cached_vnode(uppervp, lowervp, dvp, path); if (vp != NULLVP) { vref(vp); *vpp = vp; goto unionfs_nodeget_out; } } if ((uppervp == NULLVP || ump->um_uppervp != uppervp) || (lowervp == NULLVP || ump->um_lowervp != lowervp)) { /* dvp will be NULLVP only in case of root vnode. */ if (dvp == NULLVP) return (EINVAL); } /* * Do the MALLOC before the getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced elsewhere * if MALLOC should block. */ unp = malloc(sizeof(struct unionfs_node), M_UNIONFSNODE, M_WAITOK | M_ZERO); error = getnewvnode("unionfs", mp, &unionfs_vnodeops, &vp); if (error != 0) { free(unp, M_UNIONFSNODE); return (error); } error = insmntque(vp, mp); /* XXX: Too early for mpsafe fs */ if (error != 0) { free(unp, M_UNIONFSNODE); return (error); } if (dvp != NULLVP) vref(dvp); if (uppervp != NULLVP) vref(uppervp); if (lowervp != NULLVP) vref(lowervp); if (vt == VDIR) unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH, &(unp->un_hashmask)); unp->un_vnode = vp; unp->un_uppervp = uppervp; unp->un_lowervp = lowervp; unp->un_dvp = dvp; if (uppervp != NULLVP) vp->v_vnlock = uppervp->v_vnlock; else vp->v_vnlock = lowervp->v_vnlock; if (path != NULL) { unp->un_path = (char *) malloc(cnp->cn_namelen +1, M_UNIONFSPATH, M_WAITOK|M_ZERO); bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen); unp->un_path[cnp->cn_namelen] = '\0'; } vp->v_type = vt; vp->v_data = unp; if ((uppervp != NULLVP && ump->um_uppervp == uppervp) && (lowervp != NULLVP && ump->um_lowervp == lowervp)) vp->v_vflag |= VV_ROOT; if (path != NULL && dvp != NULLVP && vt == VDIR) *vpp = unionfs_ins_cached_vnode(unp, dvp, path); if ((*vpp) != NULLVP) { if (dvp != NULLVP) vrele(dvp); if (uppervp != NULLVP) vrele(uppervp); if (lowervp != NULLVP) vrele(lowervp); unp->un_uppervp = NULLVP; unp->un_lowervp = NULLVP; unp->un_dvp = NULLVP; vrele(vp); vp = *vpp; vref(vp); } else *vpp = vp; unionfs_nodeget_out: if (lkflags & LK_TYPE_MASK) vn_lock(vp, lkflags | LK_RETRY, td); return (0); }
/* * layer_node_alloc: make a new layerfs vnode. * * => vp is the alias vnode, lowervp is the lower vnode. * => We will hold a reference to lowervp. */ int layer_node_alloc(struct mount *mp, struct vnode *lowervp, struct vnode **vpp) { struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp); struct layer_node_hashhead *hd; struct layer_node *xp; struct vnode *vp, *nvp; int error; /* Get a new vnode and share its interlock with underlying vnode. */ error = getnewvnode(lmp->layerm_tag, mp, lmp->layerm_vnodeop_p, lowervp->v_interlock, &vp); if (error) { return error; } vp->v_type = lowervp->v_type; mutex_enter(vp->v_interlock); vp->v_iflag |= VI_LAYER; mutex_exit(vp->v_interlock); xp = kmem_alloc(lmp->layerm_size, KM_SLEEP); if (xp == NULL) { ungetnewvnode(vp); return ENOMEM; } if (vp->v_type == VBLK || vp->v_type == VCHR) { spec_node_init(vp, lowervp->v_rdev); } /* * Before inserting the node into the hash, check if other thread * did not race with us. If so - return that node, destroy ours. */ mutex_enter(&lmp->layerm_hashlock); if ((nvp = layer_node_find(mp, lowervp)) != NULL) { ungetnewvnode(vp); kmem_free(xp, lmp->layerm_size); *vpp = nvp; return 0; } vp->v_data = xp; vp->v_vflag = (vp->v_vflag & ~VV_MPSAFE) | (lowervp->v_vflag & VV_MPSAFE); xp->layer_vnode = vp; xp->layer_lowervp = lowervp; xp->layer_flags = 0; /* * Insert the new node into the hash. * Add a reference to the lower node. */ vref(lowervp); hd = LAYER_NHASH(lmp, lowervp); LIST_INSERT_HEAD(hd, xp, layer_hash); uvm_vnp_setsize(vp, 0); mutex_exit(&lmp->layerm_hashlock); *vpp = vp; return 0; }
static int smbfs_node_alloc(struct mount *mp, struct vnode *dvp, const char *dirnm, int dirlen, const char *name, int nmlen, char sep, struct smbfattr *fap, struct vnode **vpp) { struct vattr vattr; struct thread *td = curthread; /* XXX */ struct smbmount *smp = VFSTOSMBFS(mp); struct smbnode *np, *dnp; struct vnode *vp, *vp2; struct smbcmp sc; char *p, *rpath; int error, rplen; sc.n_parent = dvp; sc.n_nmlen = nmlen; sc.n_name = name; if (smp->sm_root != NULL && dvp == NULL) { SMBERROR("do not allocate root vnode twice!\n"); return EINVAL; } if (nmlen == 2 && bcmp(name, "..", 2) == 0) { if (dvp == NULL) return EINVAL; vp = VTOSMB(VTOSMB(dvp)->n_parent)->n_vnode; error = vget(vp, LK_EXCLUSIVE, td); if (error == 0) *vpp = vp; return error; } else if (nmlen == 1 && name[0] == '.') { SMBERROR("do not call me with dot!\n"); return EINVAL; } dnp = dvp ? VTOSMB(dvp) : NULL; if (dnp == NULL && dvp != NULL) { vprint("smbfs_node_alloc: dead parent vnode", dvp); return EINVAL; } error = vfs_hash_get(mp, smbfs_hash(name, nmlen), LK_EXCLUSIVE, td, vpp, smbfs_vnode_cmp, &sc); if (error) return (error); if (*vpp) { np = VTOSMB(*vpp); /* Force cached attributes to be refreshed if stale. */ (void)VOP_GETATTR(*vpp, &vattr, td->td_ucred); /* * If the file type on the server is inconsistent with * what it was when we created the vnode, kill the * bogus vnode now and fall through to the code below * to create a new one with the right type. */ if (((*vpp)->v_type == VDIR && (np->n_dosattr & SMB_FA_DIR) == 0) || ((*vpp)->v_type == VREG && (np->n_dosattr & SMB_FA_DIR) != 0)) { vgone(*vpp); vput(*vpp); } else { SMBVDEBUG("vnode taken from the hashtable\n"); return (0); } } /* * If we don't have node attributes, then it is an explicit lookup * for an existing vnode. */ if (fap == NULL) return ENOENT; error = getnewvnode("smbfs", mp, &smbfs_vnodeops, vpp); if (error) return (error); vp = *vpp; np = malloc(sizeof *np, M_SMBNODE, M_WAITOK | M_ZERO); rplen = dirlen; if (sep != '\0') rplen++; rplen += nmlen; rpath = malloc(rplen + 1, M_SMBNODENAME, M_WAITOK); p = rpath; bcopy(dirnm, p, dirlen); p += dirlen; if (sep != '\0') *p++ = sep; if (name != NULL) { bcopy(name, p, nmlen); p += nmlen; } *p = '\0'; MPASS(p == rpath + rplen); lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); /* Vnode initialization */ vp->v_type = fap->fa_attr & SMB_FA_DIR ? VDIR : VREG; vp->v_data = np; np->n_vnode = vp; np->n_mount = VFSTOSMBFS(mp); np->n_rpath = rpath; np->n_rplen = rplen; np->n_nmlen = nmlen; np->n_name = smbfs_name_alloc(name, nmlen); np->n_ino = fap->fa_ino; if (dvp) { ASSERT_VOP_LOCKED(dvp, "smbfs_node_alloc"); np->n_parent = dvp; np->n_parentino = VTOSMB(dvp)->n_ino; if (/*vp->v_type == VDIR &&*/ (dvp->v_vflag & VV_ROOT) == 0) { vref(dvp); np->n_flag |= NREFPARENT; } } else if (vp->v_type == VREG) SMBERROR("new vnode '%s' born without parent ?\n", np->n_name); error = insmntque(vp, mp); if (error) { free(np, M_SMBNODE); return (error); } error = vfs_hash_insert(vp, smbfs_hash(name, nmlen), LK_EXCLUSIVE, td, &vp2, smbfs_vnode_cmp, &sc); if (error) return (error); if (vp2 != NULL) *vpp = vp2; return (0); }
/* * If deget() succeeds it returns with the gotten denode locked(). * * pmp - address of msdosfsmount structure of the filesystem containing * the denode of interest. The pm_dev field and the address of * the msdosfsmount structure are used. * dirclust - which cluster bp contains, if dirclust is 0 (root directory) * diroffset is relative to the beginning of the root directory, * otherwise it is cluster relative. * diroffset - offset past begin of cluster of denode we want * depp - returns the address of the gotten denode. */ int deget(struct msdosfsmount *pmp, uint32_t dirclust, uint32_t diroffset, struct denode **depp) { int error; extern struct vops msdosfs_vops; struct direntry *direntptr; struct denode *ldep; struct vnode *nvp; struct buf *bp; struct proc *p = curproc; /* XXX */ #ifdef MSDOSFS_DEBUG printf("deget(pmp %08x, dirclust %d, diroffset %x, depp %08x)\n", pmp, dirclust, diroffset, depp); #endif /* * On FAT32 filesystems, root is a (more or less) normal * directory */ if (FAT32(pmp) && dirclust == MSDOSFSROOT) dirclust = pmp->pm_rootdirblk; /* * See if the denode is in the denode cache. Use the location of * the directory entry to compute the hash value. For subdir use * address of "." entry. For root dir (if not FAT32) use cluster * MSDOSFSROOT, offset MSDOSFSROOT_OFS * * NOTE: The check for de_refcnt > 0 below insures the denode being * examined does not represent an unlinked but still open file. * These files are not to be accessible even when the directory * entry that represented the file happens to be reused while the * deleted file is still open. */ retry: ldep = msdosfs_hashget(pmp->pm_dev, dirclust, diroffset); if (ldep) { *depp = ldep; return (0); } /* * Directory entry was not in cache, have to create a vnode and * copy it from the passed disk buffer. */ /* getnewvnode() does a vref() on the vnode */ error = getnewvnode(VT_MSDOSFS, pmp->pm_mountp, &msdosfs_vops, &nvp); if (error) { *depp = 0; return (error); } ldep = malloc(sizeof(*ldep), M_MSDOSFSNODE, M_WAITOK | M_ZERO); lockinit(&ldep->de_lock, PINOD, "denode", 0, 0); nvp->v_data = ldep; ldep->de_vnode = nvp; ldep->de_flag = 0; ldep->de_devvp = 0; ldep->de_lockf = 0; ldep->de_dev = pmp->pm_dev; ldep->de_dirclust = dirclust; ldep->de_diroffset = diroffset; fc_purge(ldep, 0); /* init the fat cache for this denode */ /* * Insert the denode into the hash queue and lock the denode so it * can't be accessed until we've read it in and have done what we * need to it. */ vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY, p); error = msdosfs_hashins(ldep); if (error) { vput (nvp); if (error == EEXIST) goto retry; return (error); } ldep->de_pmp = pmp; ldep->de_devvp = pmp->pm_devvp; ldep->de_refcnt = 1; /* * Copy the directory entry into the denode area of the vnode. */ if ((dirclust == MSDOSFSROOT || (FAT32(pmp) && dirclust == pmp->pm_rootdirblk)) && diroffset == MSDOSFSROOT_OFS) { /* * Directory entry for the root directory. There isn't one, * so we manufacture one. We should probably rummage * through the root directory and find a label entry (if it * exists), and then use the time and date from that entry * as the time and date for the root denode. */ nvp->v_flag |= VROOT; /* should be further down XXX */ ldep->de_Attributes = ATTR_DIRECTORY; if (FAT32(pmp)) ldep->de_StartCluster = pmp->pm_rootdirblk; /* de_FileSize will be filled in further down */ else { ldep->de_StartCluster = MSDOSFSROOT; ldep->de_FileSize = pmp->pm_rootdirsize * pmp->pm_BytesPerSec; } /* * fill in time and date so that dos2unixtime() doesn't * spit up when called from msdosfs_getattr() with root * denode */ ldep->de_CTime = 0x0000; /* 00:00:00 */ ldep->de_CTimeHundredth = 0; ldep->de_CDate = (0 << DD_YEAR_SHIFT) | (1 << DD_MONTH_SHIFT) | (1 << DD_DAY_SHIFT); /* Jan 1, 1980 */ ldep->de_ADate = ldep->de_CDate; ldep->de_MTime = ldep->de_CTime; ldep->de_MDate = ldep->de_CDate; /* leave the other fields as garbage */ } else { error = readep(pmp, dirclust, diroffset, &bp, &direntptr); if (error) return (error); DE_INTERNALIZE(ldep, direntptr); brelse(bp); } /* * Fill in a few fields of the vnode and finish filling in the * denode. Then return the address of the found denode. */ if (ldep->de_Attributes & ATTR_DIRECTORY) { /* * Since DOS directory entries that describe directories * have 0 in the filesize field, we take this opportunity * to find out the length of the directory and plug it into * the denode structure. */ uint32_t size; nvp->v_type = VDIR; if (ldep->de_StartCluster != MSDOSFSROOT) { error = pcbmap(ldep, 0xffff, 0, &size, 0); if (error == E2BIG) { ldep->de_FileSize = de_cn2off(pmp, size); error = 0; } else if (error) { printf("deget(): pcbmap returned %d\n", error); return (error); } } } else nvp->v_type = VREG; vref(ldep->de_devvp); *depp = ldep; return (0); }
/* * internal version with extra arguments to allow accessing resource fork */ int hfs_vget_internal(struct mount *mp, ino_t ino, uint8_t fork, struct vnode **vpp) { struct hfsmount *hmp; struct hfsnode *hnode; struct vnode *vp; hfs_callback_args cbargs; hfs_cnid_t cnid; hfs_catalog_keyed_record_t rec; hfs_catalog_key_t key; /* the search key used to find this file on disk */ dev_t dev; int error; #ifdef HFS_DEBUG printf("vfsop = hfs_vget()\n"); #endif /* HFS_DEBUG */ hnode = NULL; vp = NULL; hmp = VFSTOHFS(mp); dev = hmp->hm_dev; cnid = (hfs_cnid_t)ino; if (fork != HFS_RSRCFORK) fork = HFS_DATAFORK; retry: /* Check if this vnode has already been allocated. If so, just return it. */ if ((*vpp = hfs_nhashget(dev, cnid, fork, LK_EXCLUSIVE)) != NULL) return 0; /* Allocate a new vnode/inode. */ error = getnewvnode(VT_HFS, mp, hfs_vnodeop_p, NULL, &vp); if (error) { goto error; } hnode = malloc(sizeof(struct hfsnode), M_TEMP, M_WAITOK | M_ZERO); /* * If someone beat us to it while sleeping in getnewvnode(), * push back the freshly allocated vnode we don't need, and return. */ mutex_enter(&hfs_hashlock); if (hfs_nhashget(dev, cnid, fork, 0) != NULL) { mutex_exit(&hfs_hashlock); ungetnewvnode(vp); free(hnode, M_TEMP); goto retry; } vp->v_vflag |= VV_LOCKSWORK; vp->v_data = hnode; genfs_node_init(vp, &hfs_genfsops); hnode->h_vnode = vp; hnode->h_hmp = hmp; hnode->dummy = 0x1337BABE; /* * We need to put this vnode into the hash chain and lock it so that other * requests for this inode will block if they arrive while we are sleeping * waiting for old data structures to be purged or for the contents of the * disk portion of this inode to be read. The hash chain requires the node's * device and cnid to be known. Since this information was passed in the * arguments, fill in the appropriate hfsnode fields without reading having * to read the disk. */ hnode->h_dev = dev; hnode->h_rec.u.cnid = cnid; hnode->h_fork = fork; hfs_nhashinsert(hnode); mutex_exit(&hfs_hashlock); /* * Read catalog record from disk. */ hfslib_init_cbargs(&cbargs); if (hfslib_find_catalog_record_with_cnid(&hmp->hm_vol, cnid, &rec, &key, &cbargs) != 0) { vput(vp); error = EBADF; goto error; } memcpy(&hnode->h_rec, &rec, sizeof(hnode->h_rec)); hnode->h_parent = key.parent_cnid; /* XXX Eventually need to add an "ignore permissions" mount option */ /* * Now convert some of the catalog record's fields into values that make * sense on this system. */ /* DATE AND TIME */ /* * Initialize the vnode from the hfsnode, check for aliases. * Note that the underlying vnode may change. */ hfs_vinit(mp, hfs_specop_p, hfs_fifoop_p, &vp); hnode->h_devvp = hmp->hm_devvp; vref(hnode->h_devvp); /* Increment the ref count to the volume's device. */ /* Make sure UVM has allocated enough memory. (?) */ if (hnode->h_rec.u.rec_type == HFS_REC_FILE) { if (hnode->h_fork == HFS_DATAFORK) uvm_vnp_setsize(vp, hnode->h_rec.file.data_fork.logical_size); else uvm_vnp_setsize(vp, hnode->h_rec.file.rsrc_fork.logical_size); } else uvm_vnp_setsize(vp, 0); /* no directly reading directories */ *vpp = vp; return 0; error: *vpp = NULL; return error; }
/* * Look up a vnode/nfsnode by file handle and store the pointer in *npp. * Callers must check for mount points!! * An error number is returned. */ int nfs_nget(struct mount *mnt, nfsfh_t *fh, int fhsize, struct nfsnode **npp) { struct nfsmount *nmp; struct nfsnode *np, find, *np2; struct vnode *vp, *nvp; struct proc *p = curproc; /* XXX */ int error; nmp = VFSTONFS(mnt); loop: rw_enter_write(&nfs_hashlock); find.n_fhp = fh; find.n_fhsize = fhsize; np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find); if (np != NULL) { rw_exit_write(&nfs_hashlock); vp = NFSTOV(np); error = vget(vp, LK_EXCLUSIVE, p); if (error) goto loop; *npp = np; return (0); } /* * getnewvnode() could recycle a vnode, potentially formerly * owned by NFS. This will cause a VOP_RECLAIM() to happen, * which will cause recursive locking, so we unlock before * calling getnewvnode() lock again afterwards, but must check * to see if this nfsnode has been added while we did not hold * the lock. */ rw_exit_write(&nfs_hashlock); error = getnewvnode(VT_NFS, mnt, &nfs_vops, &nvp); /* note that we don't have this vnode set up completely yet */ rw_enter_write(&nfs_hashlock); if (error) { *npp = NULL; rw_exit_write(&nfs_hashlock); return (error); } nvp->v_flag |= VLARVAL; np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find); if (np != NULL) { vgone(nvp); rw_exit_write(&nfs_hashlock); goto loop; } vp = nvp; np = pool_get(&nfs_node_pool, PR_WAITOK | PR_ZERO); vp->v_data = np; /* we now have an nfsnode on this vnode */ vp->v_flag &= ~VLARVAL; np->n_vnode = vp; rw_init(&np->n_commitlock, "nfs_commitlk"); /* * Are we getting the root? If so, make sure the vnode flags * are correct */ if ((fhsize == nmp->nm_fhsize) && !bcmp(fh, nmp->nm_fh, fhsize)) { if (vp->v_type == VNON) vp->v_type = VDIR; vp->v_flag |= VROOT; } np->n_fhp = &np->n_fh; bcopy(fh, np->n_fhp, fhsize); np->n_fhsize = fhsize; np2 = RB_INSERT(nfs_nodetree, &nmp->nm_ntree, np); KASSERT(np2 == NULL); np->n_accstamp = -1; rw_exit(&nfs_hashlock); *npp = np; return (0); }
/* * Look up an EXT2FS dinode number to find its incore vnode, otherwise read it * in from disk. If it is in core, wait for the lock bit to clear, then * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ static int ext2_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp) { struct m_ext2fs *fs; struct inode *ip; struct ext2mount *ump; struct buf *bp; struct vnode *vp; struct cdev *dev; struct thread *td; int i, error; int used_blocks; td = curthread; error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); ump = VFSTOEXT2(mp); dev = ump->um_dev; ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO); /* Allocate a new vnode/inode. */ if ((error = getnewvnode("ext2fs", mp, &ext2_vnodeops, &vp)) != 0) { *vpp = NULL; free(ip, M_EXT2NODE); return (error); } vp->v_data = ip; ip->i_vnode = vp; ip->i_e2fs = fs = ump->um_e2fs; ip->i_ump = ump; ip->i_number = ino; lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); error = insmntque(vp, mp); if (error != 0) { free(ip, M_EXT2NODE); *vpp = NULL; return (error); } error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL); if (error || *vpp != NULL) return (error); /* Read in the disk contents for the inode, copy into the inode. */ if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ brelse(bp); vput(vp); *vpp = NULL; return (error); } /* convert ext2 inode to dinode */ ext2_ei2i((struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ino)), ip); ip->i_block_group = ino_to_cg(fs, ino); ip->i_next_alloc_block = 0; ip->i_next_alloc_goal = 0; /* * Now we want to make sure that block pointers for unused * blocks are zeroed out - ext2_balloc depends on this * although for regular files and directories only * * If IN_E4EXTENTS is enabled, unused blocks are not zeroed * out because we could corrupt the extent tree. */ if (!(ip->i_flag & IN_E4EXTENTS) && (S_ISDIR(ip->i_mode) || S_ISREG(ip->i_mode))) { used_blocks = (ip->i_size+fs->e2fs_bsize-1) / fs->e2fs_bsize; for (i = used_blocks; i < EXT2_NDIR_BLOCKS; i++) ip->i_db[i] = 0; } #ifdef EXT2FS_DEBUG ext2_print_inode(ip); #endif bqrelse(bp); /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. */ if ((error = ext2_vinit(mp, &ext2_fifoops, &vp)) != 0) { vput(vp); *vpp = NULL; return (error); } /* * Finish inode initialization. */ /* * Set up a generation number for this inode if it does not * already have one. This should only happen on old filesystems. */ if (ip->i_gen == 0) { ip->i_gen = random() + 1; if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) ip->i_flag |= IN_MODIFIED; } *vpp = vp; return (0); }
/* * If deget() succeeds it returns with the gotten denode locked(). * * pmp - address of msdosfsmount structure of the filesystem containing * the denode of interest. The address of * the msdosfsmount structure are used. * dirclust - which cluster bp contains, if dirclust is 0 (root directory) * diroffset is relative to the beginning of the root directory, * otherwise it is cluster relative. * diroffset - offset past begin of cluster of denode we want * depp - returns the address of the gotten denode. */ int deget(struct msdosfsmount *pmp, u_long dirclust, u_long diroffset, struct denode **depp) { int error; uint64_t inode; struct mount *mntp = pmp->pm_mountp; struct direntry *direntptr; struct denode *ldep; struct vnode *nvp, *xvp; struct buf *bp; #ifdef MSDOSFS_DEBUG printf("deget(pmp %p, dirclust %lu, diroffset %lx, depp %p)\n", pmp, dirclust, diroffset, depp); #endif /* * On FAT32 filesystems, root is a (more or less) normal * directory */ if (FAT32(pmp) && dirclust == MSDOSFSROOT) dirclust = pmp->pm_rootdirblk; /* * See if the denode is in the denode cache. Use the location of * the directory entry to compute the hash value. For subdir use * address of "." entry. For root dir (if not FAT32) use cluster * MSDOSFSROOT, offset MSDOSFSROOT_OFS * * NOTE: The check for de_refcnt > 0 below insures the denode being * examined does not represent an unlinked but still open file. * These files are not to be accessible even when the directory * entry that represented the file happens to be reused while the * deleted file is still open. */ inode = (uint64_t)pmp->pm_bpcluster * dirclust + diroffset; error = vfs_hash_get(mntp, inode, LK_EXCLUSIVE, curthread, &nvp, de_vncmpf, &inode); if (error) return (error); if (nvp != NULL) { *depp = VTODE(nvp); KASSERT((*depp)->de_dirclust == dirclust, ("wrong dirclust")); KASSERT((*depp)->de_diroffset == diroffset, ("wrong diroffset")); return (0); } ldep = malloc(sizeof(struct denode), M_MSDOSFSNODE, M_WAITOK | M_ZERO); /* * Directory entry was not in cache, have to create a vnode and * copy it from the passed disk buffer. */ /* getnewvnode() does a VREF() on the vnode */ error = getnewvnode("msdosfs", mntp, &msdosfs_vnodeops, &nvp); if (error) { *depp = NULL; free(ldep, M_MSDOSFSNODE); return error; } nvp->v_data = ldep; ldep->de_vnode = nvp; ldep->de_flag = 0; ldep->de_dirclust = dirclust; ldep->de_diroffset = diroffset; ldep->de_inode = inode; lockmgr(nvp->v_vnlock, LK_EXCLUSIVE, NULL); fc_purge(ldep, 0); /* init the FAT cache for this denode */ error = insmntque(nvp, mntp); if (error != 0) { free(ldep, M_MSDOSFSNODE); *depp = NULL; return (error); } error = vfs_hash_insert(nvp, inode, LK_EXCLUSIVE, curthread, &xvp, de_vncmpf, &inode); if (error) { *depp = NULL; return (error); } if (xvp != NULL) { *depp = xvp->v_data; return (0); } ldep->de_pmp = pmp; ldep->de_refcnt = 1; /* * Copy the directory entry into the denode area of the vnode. */ if ((dirclust == MSDOSFSROOT || (FAT32(pmp) && dirclust == pmp->pm_rootdirblk)) && diroffset == MSDOSFSROOT_OFS) { /* * Directory entry for the root directory. There isn't one, * so we manufacture one. We should probably rummage * through the root directory and find a label entry (if it * exists), and then use the time and date from that entry * as the time and date for the root denode. */ nvp->v_vflag |= VV_ROOT; /* should be further down XXX */ ldep->de_Attributes = ATTR_DIRECTORY; ldep->de_LowerCase = 0; if (FAT32(pmp)) ldep->de_StartCluster = pmp->pm_rootdirblk; /* de_FileSize will be filled in further down */ else { ldep->de_StartCluster = MSDOSFSROOT; ldep->de_FileSize = pmp->pm_rootdirsize * DEV_BSIZE; } /* * fill in time and date so that fattime2timespec() doesn't * spit up when called from msdosfs_getattr() with root * denode */ ldep->de_CHun = 0; ldep->de_CTime = 0x0000; /* 00:00:00 */ ldep->de_CDate = (0 << DD_YEAR_SHIFT) | (1 << DD_MONTH_SHIFT) | (1 << DD_DAY_SHIFT); /* Jan 1, 1980 */ ldep->de_ADate = ldep->de_CDate; ldep->de_MTime = ldep->de_CTime; ldep->de_MDate = ldep->de_CDate; /* leave the other fields as garbage */ } else { error = readep(pmp, dirclust, diroffset, &bp, &direntptr); if (error) { /* * The denode does not contain anything useful, so * it would be wrong to leave it on its hash chain. * Arrange for vput() to just forget about it. */ ldep->de_Name[0] = SLOT_DELETED; vput(nvp); *depp = NULL; return (error); } (void)DE_INTERNALIZE(ldep, direntptr); brelse(bp); } /* * Fill in a few fields of the vnode and finish filling in the * denode. Then return the address of the found denode. */ if (ldep->de_Attributes & ATTR_DIRECTORY) { /* * Since DOS directory entries that describe directories * have 0 in the filesize field, we take this opportunity * to find out the length of the directory and plug it into * the denode structure. */ u_long size; /* * XXX it sometimes happens that the "." entry has cluster * number 0 when it shouldn't. Use the actual cluster number * instead of what is written in directory entry. */ if (diroffset == 0 && ldep->de_StartCluster != dirclust) { #ifdef MSDOSFS_DEBUG printf("deget(): \".\" entry at clust %lu != %lu\n", dirclust, ldep->de_StartCluster); #endif ldep->de_StartCluster = dirclust; } nvp->v_type = VDIR; if (ldep->de_StartCluster != MSDOSFSROOT) { error = pcbmap(ldep, 0xffff, 0, &size, 0); if (error == E2BIG) { ldep->de_FileSize = de_cn2off(pmp, size); error = 0; } else { #ifdef MSDOSFS_DEBUG printf("deget(): pcbmap returned %d\n", error); #endif } } } else nvp->v_type = VREG; ldep->de_modrev = init_va_filerev(); *depp = ldep; return (0); }
int ntfs_vgetex(struct mount *mp, ino_t ino, u_int32_t attrtype, char *attrname, u_long lkflags, u_long flags, struct thread *td, struct vnode **vpp) { int error; struct ntfsmount *ntmp; struct ntnode *ip; struct fnode *fp; struct vnode *vp; enum vtype f_type; dprintf(("ntfs_vgetex: ino: %ju, attr: 0x%x:%s, lkf: 0x%lx, f: 0x%lx\n", (uintmax_t) ino, attrtype, attrname?attrname:"", lkflags, flags)); ntmp = VFSTONTFS(mp); *vpp = NULL; /* Get ntnode */ error = ntfs_ntlookup(ntmp, ino, &ip); if (error) { kprintf("ntfs_vget: ntfs_ntget failed\n"); return (error); } /* It may be not initialized fully, so force load it */ if (!(flags & VG_DONTLOADIN) && !(ip->i_flag & IN_LOADED)) { error = ntfs_loadntnode(ntmp, ip); if(error) { kprintf("ntfs_vget: CAN'T LOAD ATTRIBUTES FOR INO: %"PRId64"\n", ip->i_number); ntfs_ntput(ip); return (error); } } error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp); if (error) { kprintf("ntfs_vget: ntfs_fget failed\n"); ntfs_ntput(ip); return (error); } f_type = VINT; if (!(flags & VG_DONTVALIDFN) && !(fp->f_flag & FN_VALID)) { if ((ip->i_frflag & NTFS_FRFLAG_DIR) && (fp->f_attrtype == NTFS_A_DATA && fp->f_attrname == NULL)) { f_type = VDIR; } else if (flags & VG_EXT) { f_type = VINT; fp->f_size = fp->f_allocated = 0; } else { f_type = VREG; error = ntfs_filesize(ntmp, fp, &fp->f_size, &fp->f_allocated); if (error) { ntfs_ntput(ip); return (error); } } fp->f_flag |= FN_VALID; } if (FTOV(fp)) { VGET(FTOV(fp), lkflags); *vpp = FTOV(fp); ntfs_ntput(ip); return (0); } error = getnewvnode(VT_NTFS, ntmp->ntm_mountp, &vp, VLKTIMEOUT, 0); if(error) { ntfs_frele(fp); ntfs_ntput(ip); return (error); } dprintf(("ntfs_vget: vnode: %p for ntnode: %ju\n", vp, (uintmax_t)ino)); fp->f_vp = vp; vp->v_data = fp; vp->v_type = f_type; if (ino == NTFS_ROOTINO) vsetflags(vp, VROOT); /* * Normal files use the buffer cache */ if (f_type == VREG) vinitvmio(vp, fp->f_size, PAGE_SIZE, -1); ntfs_ntput(ip); KKASSERT(lkflags & LK_TYPE_MASK); /* XXX leave vnode locked exclusively from getnewvnode */ *vpp = vp; return (0); }
/* * Allocate a vnode */ int pfs_vncache_alloc(struct mount *mp, struct vnode **vpp, struct pfs_node *pn, pid_t pid) { struct pfs_vdata *pvd; int error; /* * See if the vnode is in the cache. * XXX linear search is not very efficient. */ mtx_lock(&pfs_vncache_mutex); for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) { if (pvd->pvd_pn == pn && pvd->pvd_pid == pid) { if (vget(pvd->pvd_vnode, 0, curthread) == 0) { ++pfs_vncache_hits; *vpp = pvd->pvd_vnode; mtx_unlock(&pfs_vncache_mutex); /* XXX see comment at top of pfs_lookup() */ cache_purge(*vpp); vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread); return (0); } /* XXX if this can happen, we're in trouble */ break; } } mtx_unlock(&pfs_vncache_mutex); ++pfs_vncache_misses; /* nope, get a new one */ MALLOC(pvd, struct pfs_vdata *, sizeof *pvd, M_PFSVNCACHE, M_WAITOK); if (++pfs_vncache_entries > pfs_vncache_maxentries) pfs_vncache_maxentries = pfs_vncache_entries; error = getnewvnode("pseudofs", mp, pfs_vnodeop_p, vpp); if (error) { FREE(pvd, M_PFSVNCACHE); return (error); } pvd->pvd_pn = pn; pvd->pvd_pid = pid; (*vpp)->v_data = pvd; switch (pn->pn_type) { case pfstype_root: (*vpp)->v_vflag = VV_ROOT; #if 0 printf("root vnode allocated\n"); #endif /* fall through */ case pfstype_dir: case pfstype_this: case pfstype_parent: case pfstype_procdir: (*vpp)->v_type = VDIR; break; case pfstype_file: (*vpp)->v_type = VREG; break; case pfstype_symlink: (*vpp)->v_type = VLNK; break; case pfstype_none: KASSERT(0, ("pfs_vncache_alloc called for null node\n")); default: panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type); } /* * Propagate flag through to vnode so users know it can change * if the process changes (i.e. execve) */ if ((pn->pn_flags & PFS_PROCDEP) != 0) (*vpp)->v_vflag |= VV_PROCDEP; pvd->pvd_vnode = *vpp; mtx_lock(&pfs_vncache_mutex); pvd->pvd_prev = NULL; pvd->pvd_next = pfs_vncache; if (pvd->pvd_next) pvd->pvd_next->pvd_prev = pvd; pfs_vncache = pvd; mtx_unlock(&pfs_vncache_mutex); (*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE; vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread); return (0); }
/* * lookup an anode, check mount's hash table if not found, create * return locked and referenced al la vget(vp, 1); */ int adosfs_vget(struct mount *mp, ino_t an, struct vnode **vpp) { struct adosfsmount *amp; struct vnode *vp; struct anode *ap; struct buf *bp; char *nam, *tmp; int namlen, error; error = 0; amp = VFSTOADOSFS(mp); bp = NULL; /* * check hash table. we are done if found */ if ((*vpp = adosfs_ahashget(mp, an)) != NULL) return (0); error = getnewvnode(VT_ADOSFS, mp, adosfs_vnodeop_p, NULL, &vp); if (error) return (error); /* * setup, insert in hash, and lock before io. */ vp->v_data = ap = pool_get(&adosfs_node_pool, PR_WAITOK); memset(ap, 0, sizeof(struct anode)); ap->vp = vp; ap->amp = amp; ap->block = an; ap->nwords = amp->nwords; genfs_node_init(vp, &adosfs_genfsops); adosfs_ainshash(amp, ap); if ((error = bread(amp->devvp, an * amp->bsize / DEV_BSIZE, amp->bsize, NOCRED, 0, &bp)) != 0) { vput(vp); return (error); } /* * get type and fill rest in based on that. */ switch (ap->type = adosfs_getblktype(amp, bp)) { case AROOT: vp->v_type = VDIR; vp->v_vflag |= VV_ROOT; ap->mtimev.days = adoswordn(bp, ap->nwords - 10); ap->mtimev.mins = adoswordn(bp, ap->nwords - 9); ap->mtimev.ticks = adoswordn(bp, ap->nwords - 8); ap->created.days = adoswordn(bp, ap->nwords - 7); ap->created.mins = adoswordn(bp, ap->nwords - 6); ap->created.ticks = adoswordn(bp, ap->nwords - 5); break; case ALDIR: case ADIR: vp->v_type = VDIR; break; case ALFILE: case AFILE: vp->v_type = VREG; ap->fsize = adoswordn(bp, ap->nwords - 47); break; case ASLINK: /* XXX soft link */ vp->v_type = VLNK; /* * convert from BCPL string and * from: "part:dir/file" to: "/part/dir/file" */ nam = (char *)bp->b_data + (6 * sizeof(long)); namlen = strlen(nam); tmp = nam; while (*tmp && *tmp != ':') tmp++; if (*tmp == 0) { ap->slinkto = malloc(namlen + 1, M_ANODE, M_WAITOK); memcpy(ap->slinkto, nam, namlen); } else if (*nam == ':') { ap->slinkto = malloc(namlen + 1, M_ANODE, M_WAITOK); memcpy(ap->slinkto, nam, namlen); ap->slinkto[0] = '/'; } else { ap->slinkto = malloc(namlen + 2, M_ANODE, M_WAITOK); ap->slinkto[0] = '/'; memcpy(&ap->slinkto[1], nam, namlen); ap->slinkto[tmp - nam + 1] = '/'; namlen++; } ap->slinkto[namlen] = 0; ap->fsize = namlen; break; default: brelse(bp, 0); vput(vp); return (EINVAL); } /* * Get appropriate data from this block; hard link needs * to get other data from the "real" block. */ /* * copy in name (from original block) */ nam = (char *)bp->b_data + (ap->nwords - 20) * sizeof(u_int32_t); namlen = *(u_char *)nam++; if (namlen > 30) { #ifdef DIAGNOSTIC printf("adosfs: aget: name length too long blk %llu\n", (unsigned long long)an); #endif brelse(bp, 0); vput(vp); return (EINVAL); } memcpy(ap->name, nam, namlen); ap->name[namlen] = 0; /* * if dir alloc hash table and copy it in */ if (vp->v_type == VDIR) { int i; ap->tab = malloc(ANODETABSZ(ap) * 2, M_ANODE, M_WAITOK); ap->ntabent = ANODETABENT(ap); ap->tabi = (int *)&ap->tab[ap->ntabent]; memset(ap->tabi, 0, ANODETABSZ(ap)); for (i = 0; i < ap->ntabent; i++) ap->tab[i] = adoswordn(bp, i + 6); } /* * misc. */ ap->pblock = adoswordn(bp, ap->nwords - 3); ap->hashf = adoswordn(bp, ap->nwords - 4); ap->linknext = adoswordn(bp, ap->nwords - 10); ap->linkto = adoswordn(bp, ap->nwords - 11); /* * setup last indirect block cache. */ ap->lastlindblk = 0; if (ap->type == AFILE) { ap->lastindblk = ap->block; if (adoswordn(bp, ap->nwords - 10)) ap->linkto = ap->block; } else if (ap->type == ALFILE) { ap->lastindblk = ap->linkto; brelse(bp, 0); bp = NULL; error = bread(amp->devvp, ap->linkto * amp->bsize / DEV_BSIZE, amp->bsize, NOCRED, 0, &bp); if (error) { vput(vp); return (error); } ap->fsize = adoswordn(bp, ap->nwords - 47); /* * Should ap->block be set to the real file header block? */ ap->block = ap->linkto; } if (ap->type == AROOT) { ap->adprot = 15; ap->uid = amp->uid; ap->gid = amp->gid; } else { ap->adprot = adoswordn(bp, ap->nwords - 48) ^ 15; /* * ADOS directories do not have a `x' protection bit as * it is known in VFS; this functionality is fulfilled * by the ADOS `r' bit. * * To retain the ADOS behaviour, fake execute permissions * in that case. */ if ((ap->type == ADIR || ap->type == ALDIR) && (ap->adprot & 0x00000008) == 0) ap->adprot &= ~0x00000002; /* * Get uid/gid from extensions in file header * (really need to know if this is a muFS partition) */ ap->uid = (adoswordn(bp, ap->nwords - 49) >> 16) & 0xffff; ap->gid = adoswordn(bp, ap->nwords - 49) & 0xffff; if (ap->uid || ap->gid) { if (ap->uid == 0xffff) ap->uid = 0; if (ap->gid == 0xffff) ap->gid = 0; ap->adprot |= 0x40000000; /* Kludge */ } else { /* * uid & gid extension don't exist, * so use the mount-point uid/gid */ ap->uid = amp->uid; ap->gid = amp->gid; } } ap->mtime.days = adoswordn(bp, ap->nwords - 23); ap->mtime.mins = adoswordn(bp, ap->nwords - 22); ap->mtime.ticks = adoswordn(bp, ap->nwords - 21); *vpp = vp; brelse(bp, 0); uvm_vnp_setsize(vp, ap->fsize); return (0); }
/* * Look up a FFS dinode number to find its incore vnode, otherwise read it * in from disk. If it is in core, wait for the lock bit to clear, then * return the inode locked. Detection and handling of mount points must be * done by the calling routine. */ int ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) { struct fs *fs; struct inode *ip; struct ufs1_dinode *dp1; #ifdef FFS2 struct ufs2_dinode *dp2; #endif struct ufsmount *ump; struct buf *bp; struct vnode *vp; dev_t dev; int error; if (ino > (ufsino_t)-1) panic("ffs_vget: alien ino_t %llu", (unsigned long long)ino); ump = VFSTOUFS(mp); dev = ump->um_dev; retry: if ((*vpp = ufs_ihashget(dev, ino)) != NULL) return (0); /* Allocate a new vnode/inode. */ if ((error = getnewvnode(VT_UFS, mp, &ffs_vops, &vp)) != 0) { *vpp = NULL; return (error); } #ifdef VFSLCKDEBUG vp->v_flag |= VLOCKSWORK; #endif ip = pool_get(&ffs_ino_pool, PR_WAITOK|PR_ZERO); lockinit(&ip->i_lock, PINOD, "inode", 0, 0); ip->i_ump = ump; vref(ip->i_devvp); vp->v_data = ip; ip->i_vnode = vp; ip->i_fs = fs = ump->um_fs; ip->i_dev = dev; ip->i_number = ino; ip->i_vtbl = &ffs_vtbl; /* * Put it onto its hash chain and lock it so that other requests for * this inode will block if they arrive while we are sleeping waiting * for old data structures to be purged or for the contents of the * disk portion of this inode to be read. */ error = ufs_ihashins(ip); if (error) { /* * VOP_INACTIVE will treat this as a stale file * and recycle it quickly */ vrele(vp); if (error == EEXIST) goto retry; return (error); } /* Read in the disk contents for the inode, copy into the inode. */ error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), (int)fs->fs_bsize, &bp); if (error) { /* * The inode does not contain anything useful, so it would * be misleading to leave it on its hash chain. With mode * still zero, it will be unlinked and returned to the free * list by vput(). */ vput(vp); brelse(bp); *vpp = NULL; return (error); } #ifdef FFS2 if (ip->i_ump->um_fstype == UM_UFS2) { ip->i_din2 = pool_get(&ffs_dinode2_pool, PR_WAITOK); dp2 = (struct ufs2_dinode *) bp->b_data + ino_to_fsbo(fs, ino); *ip->i_din2 = *dp2; } else #endif { ip->i_din1 = pool_get(&ffs_dinode1_pool, PR_WAITOK); dp1 = (struct ufs1_dinode *) bp->b_data + ino_to_fsbo(fs, ino); *ip->i_din1 = *dp1; } brelse(bp); if (DOINGSOFTDEP(vp)) softdep_load_inodeblock(ip); else ip->i_effnlink = DIP(ip, nlink); /* * Initialize the vnode from the inode, check for aliases. * Note that the underlying vnode may have changed. */ error = ufs_vinit(mp, &ffs_specvops, FFS_FIFOOPS, &vp); if (error) { vput(vp); *vpp = NULL; return (error); } /* * Set up a generation number for this inode if it does not * already have one. This should only happen on old filesystems. */ if (DIP(ip, gen) == 0) { DIP_ASSIGN(ip, gen, arc4random() & INT_MAX); if (DIP(ip, gen) == 0 || DIP(ip, gen) == -1) DIP_ASSIGN(ip, gen, 1); /* Shouldn't happen */ if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) ip->i_flag |= IN_MODIFIED; } /* * Ensure that uid and gid are correct. This is a temporary * fix until fsck has been changed to do the update. */ if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_inodefmt < FS_44INODEFMT) { ip->i_ffs1_uid = ip->i_din1->di_ouid; ip->i_ffs1_gid = ip->i_din1->di_ogid; } *vpp = vp; return (0); }
/* * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this * function is going to be used to get Regular Files, code must be added * to fill in the "struct nfsv4node". * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp, int lkflags) { struct thread *td = curthread; /* XXX */ struct nfsnode *np; struct vnode *vp; struct vnode *nvp; int error; u_int hash; struct nfsmount *nmp; struct nfsfh *nfhp; nmp = VFSTONFS(mntp); *npp = NULL; hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT); MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, M_NFSFH, M_WAITOK); bcopy(fhp, &nfhp->nfh_fh[0], fhsize); nfhp->nfh_len = fhsize; error = vfs_hash_get(mntp, hash, lkflags, td, &nvp, newnfs_vncmpf, nfhp); FREE(nfhp, M_NFSFH); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); return (0); } np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO); error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp); if (error) { uma_zfree(newnfsnode_zone, np); return (error); } vp = nvp; KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0")); vp->v_bufobj.bo_ops = &buf_ops_newnfs; vp->v_data = np; np->n_vnode = vp; /* * Initialize the mutex even if the vnode is going to be a loser. * This simplifies the logic in reclaim, which can then unconditionally * destroy the mutex (in the case of the loser, or if hash_insert * happened to return an error no special casing is needed). */ mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); /* * NFS supports recursive and shared locking. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); /* * Are we getting the root? If so, make sure the vnode flags * are correct */ if ((fhsize == nmp->nm_fhsize) && !bcmp(fhp, nmp->nm_fh, fhsize)) { if (vp->v_type == VNON) vp->v_type = VDIR; vp->v_vflag |= VV_ROOT; } MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, M_NFSFH, M_WAITOK); bcopy(fhp, np->n_fhp->nfh_fh, fhsize); np->n_fhp->nfh_len = fhsize; error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; FREE((caddr_t)np->n_fhp, M_NFSFH); mtx_destroy(&np->n_mtx); uma_zfree(newnfsnode_zone, np); return (error); } error = vfs_hash_insert(vp, hash, lkflags, td, &nvp, newnfs_vncmpf, np->n_fhp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); /* vfs_hash_insert() vput()'s the losing vnode */ return (0); } *npp = np; return (0); }
static int chfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp) { struct chfs_mount *chmp; struct chfs_inode *ip; struct ufsmount *ump; struct vnode *vp; dev_t dev; int error; struct chfs_vnode_cache* chvc = NULL; struct chfs_node_ref* nref = NULL; struct buf *bp; dbg("vget() | ino: %llu\n", (unsigned long long)ino); ump = VFSTOUFS(mp); dev = ump->um_dev; retry: if (!vpp) { vpp = kmem_alloc(sizeof(struct vnode*), KM_SLEEP); } /* Get node from inode hash. */ if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) { return 0; } /* Allocate a new vnode/inode. */ if ((error = getnewvnode(VT_CHFS, mp, chfs_vnodeop_p, NULL, &vp)) != 0) { *vpp = NULL; return (error); } ip = pool_get(&chfs_inode_pool, PR_WAITOK); mutex_enter(&chfs_hashlock); if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) { mutex_exit(&chfs_hashlock); ungetnewvnode(vp); pool_put(&chfs_inode_pool, ip); goto retry; } vp->v_vflag |= VV_LOCKSWORK; /* Initialize vnode/inode. */ memset(ip, 0, sizeof(*ip)); vp->v_data = ip; ip->vp = vp; ip->ch_type = VTTOCHT(vp->v_type); ip->ump = ump; ip->chmp = chmp = ump->um_chfs; ip->dev = dev; ip->ino = ino; vp->v_mount = mp; genfs_node_init(vp, &chfs_genfsops); rb_tree_init(&ip->fragtree, &frag_rbtree_ops); chfs_ihashins(ip); mutex_exit(&chfs_hashlock); /* Set root inode. */ if (ino == CHFS_ROOTINO) { dbg("SETROOT\n"); vp->v_vflag |= VV_ROOT; vp->v_type = VDIR; ip->ch_type = CHT_DIR; ip->mode = IFMT | IEXEC | IWRITE | IREAD; ip->iflag |= (IN_ACCESS | IN_CHANGE | IN_UPDATE); chfs_update(vp, NULL, NULL, UPDATE_WAIT); TAILQ_INIT(&ip->dents); chfs_set_vnode_size(vp, 512); } mutex_enter(&chmp->chm_lock_vnocache); chvc = chfs_vnode_cache_get(chmp, ino); mutex_exit(&chmp->chm_lock_vnocache); if (!chvc) { dbg("!chvc\n"); /* Initialize the corresponding vnode cache. */ /* XXX, we cant alloc under a lock, refactor this! */ chvc = chfs_vnode_cache_alloc(ino); mutex_enter(&chmp->chm_lock_vnocache); if (ino == CHFS_ROOTINO) { chvc->nlink = 2; chvc->pvno = CHFS_ROOTINO; chvc->state = VNO_STATE_CHECKEDABSENT; } chfs_vnode_cache_add(chmp, chvc); mutex_exit(&chmp->chm_lock_vnocache); ip->chvc = chvc; TAILQ_INIT(&ip->dents); } else { dbg("chvc\n"); ip->chvc = chvc; /* We had a vnode cache, the node is already on flash, so read it */ if (ino == CHFS_ROOTINO) { chvc->pvno = CHFS_ROOTINO; TAILQ_INIT(&chvc->scan_dirents); } else { chfs_readvnode(mp, ino, &vp); } mutex_enter(&chmp->chm_lock_mountfields); /* Initialize type specific things. */ switch (ip->ch_type) { case CHT_DIR: /* Read every dirent. */ nref = chvc->dirents; while (nref && (struct chfs_vnode_cache *)nref != chvc) { chfs_readdirent(mp, nref, ip); nref = nref->nref_next; } chfs_set_vnode_size(vp, 512); break; case CHT_REG: /* FALLTHROUGH */ case CHT_SOCK: /* Collect data. */ dbg("read_inode_internal | ino: %llu\n", (unsigned long long)ip->ino); error = chfs_read_inode(chmp, ip); if (error) { vput(vp); *vpp = NULL; mutex_exit(&chmp->chm_lock_mountfields); return (error); } break; case CHT_LNK: /* Collect data. */ dbg("read_inode_internal | ino: %llu\n", (unsigned long long)ip->ino); error = chfs_read_inode_internal(chmp, ip); if (error) { vput(vp); *vpp = NULL; mutex_exit(&chmp->chm_lock_mountfields); return (error); } /* Set link. */ dbg("size: %llu\n", (unsigned long long)ip->size); bp = getiobuf(vp, true); bp->b_blkno = 0; bp->b_bufsize = bp->b_resid = bp->b_bcount = ip->size; bp->b_data = kmem_alloc(ip->size, KM_SLEEP); chfs_read_data(chmp, vp, bp); if (!ip->target) ip->target = kmem_alloc(ip->size, KM_SLEEP); memcpy(ip->target, bp->b_data, ip->size); kmem_free(bp->b_data, ip->size); putiobuf(bp); break; case CHT_CHR: /* FALLTHROUGH */ case CHT_BLK: /* FALLTHROUGH */ case CHT_FIFO: /* Collect data. */ dbg("read_inode_internal | ino: %llu\n", (unsigned long long)ip->ino); error = chfs_read_inode_internal(chmp, ip); if (error) { vput(vp); *vpp = NULL; mutex_exit(&chmp->chm_lock_mountfields); return (error); } /* Set device. */ bp = getiobuf(vp, true); bp->b_blkno = 0; bp->b_bufsize = bp->b_resid = bp->b_bcount = sizeof(dev_t); bp->b_data = kmem_alloc(sizeof(dev_t), KM_SLEEP); chfs_read_data(chmp, vp, bp); memcpy(&ip->rdev, bp->b_data, sizeof(dev_t)); kmem_free(bp->b_data, sizeof(dev_t)); putiobuf(bp); /* Set specific operations. */ if (ip->ch_type == CHT_FIFO) { vp->v_op = chfs_fifoop_p; } else { vp->v_op = chfs_specop_p; spec_node_init(vp, ip->rdev); } break; case CHT_BLANK: /* FALLTHROUGH */ case CHT_BAD: break; } mutex_exit(&chmp->chm_lock_mountfields); } /* Finish inode initalization. */ ip->devvp = ump->um_devvp; vref(ip->devvp); uvm_vnp_setsize(vp, ip->size); *vpp = vp; return 0; }