示例#1
0
/*
 * Allocate a vnode
 */
int
pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
		  struct pfs_node *pn, pid_t pid)
{
	struct pfs_vdata *pvd, *pvd2;
	struct vnode *vp;
	int error;

	/*
	 * See if the vnode is in the cache.
	 * XXX linear search is not very efficient.
	 */
retry:
	mtx_lock(&pfs_vncache_mutex);
	for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) {
		if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
		    pvd->pvd_vnode->v_mount == mp) {
			vp = pvd->pvd_vnode;
			VI_LOCK(vp);
			mtx_unlock(&pfs_vncache_mutex);
			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
				++pfs_vncache_hits;
				*vpp = vp;
				/*
				 * Some callers cache_enter(vp) later, so
				 * we have to make sure it's not in the
				 * VFS cache so it doesn't get entered
				 * twice.  A better solution would be to
				 * make pfs_vncache_alloc() responsible
				 * for entering the vnode in the VFS
				 * cache.
				 */
				cache_purge(vp);
				return (0);
			}
			goto retry;
		}
	}
	mtx_unlock(&pfs_vncache_mutex);

	/* nope, get a new one */
	pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
	pvd->pvd_next = pvd->pvd_prev = NULL;
	error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
	if (error) {
		free(pvd, M_PFSVNCACHE);
		return (error);
	}
	pvd->pvd_pn = pn;
	pvd->pvd_pid = pid;
	(*vpp)->v_data = pvd;
	switch (pn->pn_type) {
	case pfstype_root:
		(*vpp)->v_vflag = VV_ROOT;
#if 0
		printf("root vnode allocated\n");
#endif
		/* fall through */
	case pfstype_dir:
	case pfstype_this:
	case pfstype_parent:
	case pfstype_procdir:
		(*vpp)->v_type = VDIR;
		break;
	case pfstype_file:
		(*vpp)->v_type = VREG;
		break;
	case pfstype_symlink:
		(*vpp)->v_type = VLNK;
		break;
	case pfstype_none:
		KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
	default:
		panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
	}
	/*
	 * Propagate flag through to vnode so users know it can change
	 * if the process changes (i.e. execve)
	 */
	if ((pn->pn_flags & PFS_PROCDEP) != 0)
		(*vpp)->v_vflag |= VV_PROCDEP;
	pvd->pvd_vnode = *vpp;
	vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
	VN_LOCK_AREC(*vpp);
	error = insmntque(*vpp, mp);
	if (error != 0) {
		free(pvd, M_PFSVNCACHE);
		*vpp = NULLVP;
		return (error);
	}
retry2:
	mtx_lock(&pfs_vncache_mutex);
	/*
	 * Other thread may race with us, creating the entry we are
	 * going to insert into the cache. Recheck after
	 * pfs_vncache_mutex is reacquired.
	 */
	for (pvd2 = pfs_vncache; pvd2; pvd2 = pvd2->pvd_next) {
		if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid &&
		    pvd2->pvd_vnode->v_mount == mp) {
			vp = pvd2->pvd_vnode;
			VI_LOCK(vp);
			mtx_unlock(&pfs_vncache_mutex);
			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
				++pfs_vncache_hits;
				vgone(*vpp);
				vput(*vpp);
				*vpp = vp;
				cache_purge(vp);
				return (0);
			}
			goto retry2;
		}
	}
	++pfs_vncache_misses;
	if (++pfs_vncache_entries > pfs_vncache_maxentries)
		pfs_vncache_maxentries = pfs_vncache_entries;
	pvd->pvd_prev = NULL;
	pvd->pvd_next = pfs_vncache;
	if (pvd->pvd_next)
		pvd->pvd_next->pvd_prev = pvd;
	pfs_vncache = pvd;
	mtx_unlock(&pfs_vncache_mutex);
	return (0);
}
示例#2
0
int
udf_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
{
	struct buf *bp;
	struct vnode *devvp;
	struct udf_mnt *udfmp;
	struct thread *td;
	struct vnode *vp;
	struct udf_node *unode;
	struct file_entry *fe;
	int error, sector, size;

	error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
	if (error || *vpp != NULL)
		return (error);

	/*
	 * We must promote to an exclusive lock for vnode creation.  This
	 * can happen if lookup is passed LOCKSHARED.
 	 */
	if ((flags & LK_TYPE_MASK) == LK_SHARED) {
		flags &= ~LK_TYPE_MASK;
		flags |= LK_EXCLUSIVE;
	}

	/*
	 * We do not lock vnode creation as it is believed to be too
	 * expensive for such rare case as simultaneous creation of vnode
	 * for same ino by different processes. We just allow them to race
	 * and check later to decide who wins. Let the race begin!
	 */

	td = curthread;
	udfmp = VFSTOUDFFS(mp);

	unode = uma_zalloc(udf_zone_node, M_WAITOK | M_ZERO);

	if ((error = udf_allocv(mp, &vp, td))) {
		printf("Error from udf_allocv\n");
		uma_zfree(udf_zone_node, unode);
		return (error);
	}

	unode->i_vnode = vp;
	unode->hash_id = ino;
	unode->udfmp = udfmp;
	vp->v_data = unode;

	lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
	error = insmntque(vp, mp);
	if (error != 0) {
		uma_zfree(udf_zone_node, unode);
		return (error);
	}
	error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
	if (error || *vpp != NULL)
		return (error);

	/*
	 * Copy in the file entry.  Per the spec, the size can only be 1 block.
	 */
	sector = ino + udfmp->part_start;
	devvp = udfmp->im_devvp;
	if ((error = RDSECTOR(devvp, sector, udfmp->bsize, &bp)) != 0) {
		printf("Cannot read sector %d\n", sector);
		vgone(vp);
		vput(vp);
		brelse(bp);
		*vpp = NULL;
		return (error);
	}

	fe = (struct file_entry *)bp->b_data;
	if (udf_checktag(&fe->tag, TAGID_FENTRY)) {
		printf("Invalid file entry!\n");
		vgone(vp);
		vput(vp);
		brelse(bp);
		*vpp = NULL;
		return (ENOMEM);
	}
	size = UDF_FENTRY_SIZE + le32toh(fe->l_ea) + le32toh(fe->l_ad);
	unode->fentry = malloc(size, M_UDFFENTRY, M_NOWAIT | M_ZERO);
	if (unode->fentry == NULL) {
		printf("Cannot allocate file entry block\n");
		vgone(vp);
		vput(vp);
		brelse(bp);
		*vpp = NULL;
		return (ENOMEM);
	}

	bcopy(bp->b_data, unode->fentry, size);
	
	brelse(bp);
	bp = NULL;

	switch (unode->fentry->icbtag.file_type) {
	default:
		vp->v_type = VBAD;
		break;
	case 4:
		vp->v_type = VDIR;
		break;
	case 5:
		vp->v_type = VREG;
		break;
	case 6:
		vp->v_type = VBLK;
		break;
	case 7:
		vp->v_type = VCHR;
		break;
	case 9:
		vp->v_type = VFIFO;
		vp->v_op = &udf_fifoops;
		break;
	case 10:
		vp->v_type = VSOCK;
		break;
	case 12:
		vp->v_type = VLNK;
		break;
	}

	if (vp->v_type != VFIFO)
		VN_LOCK_ASHARE(vp);

	if (ino == udf_getid(&udfmp->root_icb))
		vp->v_vflag |= VV_ROOT;

	*vpp = vp;

	return (0);
}
示例#3
0
/*
 * Get a p9node.  Nodes are represented by (fid, qid) tuples in 9P2000.
 * Fids are assigned by the client, while qids are assigned by the server.
 *
 * The caller is expected to have generated the FID via p9fs_getfid() and
 * obtained the QID from the server via p9fs_client_walk() and friends.
 */
int
p9fs_nget(struct p9fs_session *p9s, uint32_t fid, struct p9fs_qid *qid,
    int lkflags, struct p9fs_node **npp)
{
	int error = 0;
	struct p9fs_node *np;
	struct vnode *vp, *nvp;
	struct vattr vattr = {};
	struct thread *td = curthread;

	*npp = NULL;
	error = vfs_hash_get(p9s->p9s_mount, fid, lkflags, td, &vp, NULL, NULL);
	if (error != 0)
		return (error);
	if (vp != NULL) {
		*npp = vp->v_data;
		return (0);
	}

	np = malloc(sizeof (struct p9fs_node), M_P9NODE, M_WAITOK | M_ZERO);
	getnewvnode_reserve(1);

	error = getnewvnode("p9fs", p9s->p9s_mount, &p9fs_vnops, &nvp);
	if (error != 0) {
		getnewvnode_drop_reserve();
		free(np, M_P9NODE);
		return (error);
	}
	vp = nvp;
	vn_lock(vp, LK_EXCLUSIVE);

	error = insmntque(nvp, p9s->p9s_mount);
	if (error != 0) {
		/* vp was vput()'d by insmntque() */
		free(np, M_P9NODE);
		return (error);
	}
	error = vfs_hash_insert(nvp, fid, lkflags, td, &nvp, NULL, NULL);
	if (error != 0) {
		free(np, M_P9NODE);
		return (error);
	}
	if (nvp != NULL) {
		free(np, M_P9NODE);
		*npp = nvp->v_data;
		/* vp was vput()'d by vfs_hash_insert() */
		return (0);
	}

	error = p9fs_client_stat(p9s, fid, &vattr);
	if (error != 0) {
		free(np, M_P9NODE);
		return (error);
	}

	/* Our vnode is the winner.  Set up the new p9node for it. */
	vp->v_type = vattr.va_type;
	vp->v_data = np;
	np->p9n_fid = fid;
	np->p9n_session = p9s;
	np->p9n_vnode = vp;
	bcopy(qid, &np->p9n_qid, sizeof (*qid));
	*npp = np;

	return (error);
}
示例#4
0
/*
 * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this
 * function is going to be used to get Regular Files, code must be added
 * to fill in the "struct nfsv4node".
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp,
    int lkflags)
{
	struct thread *td = curthread;	/* XXX */
	struct nfsnode *np;
	struct vnode *vp;
	struct vnode *nvp;
	int error;
	u_int hash;
	struct nfsmount *nmp;
	struct nfsfh *nfhp;

	nmp = VFSTONFS(mntp);
	*npp = NULL;

	hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);

	MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
	    M_NFSFH, M_WAITOK);
	bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
	nfhp->nfh_len = fhsize;
	error = vfs_hash_get(mntp, hash, lkflags,
	    td, &nvp, newnfs_vncmpf, nfhp);
	FREE(nfhp, M_NFSFH);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		return (0);
	}

	/*
	 * Allocate before getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if zalloc should block.
	 */
	np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);

	error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp);
	if (error) {
		uma_zfree(newnfsnode_zone, np);
		return (error);
	}
	vp = nvp;
	KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0"));
	vp->v_bufobj.bo_ops = &buf_ops_newnfs;
	vp->v_data = np;
	np->n_vnode = vp;
	/* 
	 * Initialize the mutex even if the vnode is going to be a loser.
	 * This simplifies the logic in reclaim, which can then unconditionally
	 * destroy the mutex (in the case of the loser, or if hash_insert
	 * happened to return an error no special casing is needed).
	 */
	mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
	/*
	 * NFS supports recursive and shared locking.
	 */
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
	VN_LOCK_AREC(vp);
	VN_LOCK_ASHARE(vp);
	/* 
	 * Are we getting the root? If so, make sure the vnode flags
	 * are correct 
	 */
	if ((fhsize == nmp->nm_fhsize) &&
	    !bcmp(fhp, nmp->nm_fh, fhsize)) {
		if (vp->v_type == VNON)
			vp->v_type = VDIR;
		vp->v_vflag |= VV_ROOT;
	}
	
	MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
	    M_NFSFH, M_WAITOK);
	bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
	np->n_fhp->nfh_len = fhsize;
	error = insmntque(vp, mntp);
	if (error != 0) {
		*npp = NULL;
		FREE((caddr_t)np->n_fhp, M_NFSFH);
		mtx_destroy(&np->n_mtx);
		uma_zfree(newnfsnode_zone, np);
		return (error);
	}
	error = vfs_hash_insert(vp, hash, lkflags, 
	    td, &nvp, newnfs_vncmpf, np->n_fhp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		/* vfs_hash_insert() vput()'s the losing vnode */
		return (0);
	}
	*npp = np;

	return (0);
}
示例#5
0
/*
 * Look up an EXT2FS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
static int
ext2_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
{
	struct m_ext2fs *fs;
	struct inode *ip;
	struct ext2mount *ump;
	struct buf *bp;
	struct vnode *vp;
	struct cdev *dev;
	struct thread *td;
	int i, error;
	int used_blocks;

	td = curthread;
	error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL);
	if (error || *vpp != NULL)
		return (error);

	ump = VFSTOEXT2(mp);
	dev = ump->um_dev;
	ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO);

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode("ext2fs", mp, &ext2_vnodeops, &vp)) != 0) {
		*vpp = NULL;
		free(ip, M_EXT2NODE);
		return (error);
	}
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_e2fs = fs = ump->um_e2fs;
	ip->i_ump  = ump;
	ip->i_number = ino;

	lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
	error = insmntque(vp, mp);
	if (error != 0) {
		free(ip, M_EXT2NODE);
		*vpp = NULL;
		return (error);
	}
	error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
	if (error || *vpp != NULL)
		return (error);

	/* Read in the disk contents for the inode, copy into the inode. */
	if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
	    (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
		/*
		 * The inode does not contain anything useful, so it would
		 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */
		brelse(bp);
		vput(vp);
		*vpp = NULL;
		return (error);
	}
	/* convert ext2 inode to dinode */
	ext2_ei2i((struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) *
			ino_to_fsbo(fs, ino)), ip);
	ip->i_block_group = ino_to_cg(fs, ino);
	ip->i_next_alloc_block = 0;
	ip->i_next_alloc_goal = 0;

	/*
	 * Now we want to make sure that block pointers for unused
	 * blocks are zeroed out - ext2_balloc depends on this
	 * although for regular files and directories only
	 */
	if(S_ISDIR(ip->i_mode) || S_ISREG(ip->i_mode)) {
		used_blocks = (ip->i_size+fs->e2fs_bsize-1) / fs->e2fs_bsize;
		for (i = used_blocks; i < EXT2_NDIR_BLOCKS; i++)
			ip->i_db[i] = 0;
	}
/*
	ext2_print_inode(ip);
*/
	bqrelse(bp);

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 * Note that the underlying vnode may have changed.
	 */
	if ((error = ext2_vinit(mp, &ext2_fifoops, &vp)) != 0) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}

	/*
	 * Finish inode initialization.
	 */

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */
	if (ip->i_gen == 0) {
		ip->i_gen = random() + 1;
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}
	*vpp = vp;
	return (0);
}
示例#6
0
static int
smbfs_node_alloc(struct mount *mp, struct vnode *dvp, const char *dirnm, 
	int dirlen, const char *name, int nmlen, char sep, 
	struct smbfattr *fap, struct vnode **vpp)
{
	struct vattr vattr;
	struct thread *td = curthread;	/* XXX */
	struct smbmount *smp = VFSTOSMBFS(mp);
	struct smbnode *np, *dnp;
	struct vnode *vp, *vp2;
	struct smbcmp sc;
	char *p, *rpath;
	int error, rplen;

	sc.n_parent = dvp;
	sc.n_nmlen = nmlen;
	sc.n_name = name;	
	if (smp->sm_root != NULL && dvp == NULL) {
		SMBERROR("do not allocate root vnode twice!\n");
		return EINVAL;
	}
	if (nmlen == 2 && bcmp(name, "..", 2) == 0) {
		if (dvp == NULL)
			return EINVAL;
		vp = VTOSMB(VTOSMB(dvp)->n_parent)->n_vnode;
		error = vget(vp, LK_EXCLUSIVE, td);
		if (error == 0)
			*vpp = vp;
		return error;
	} else if (nmlen == 1 && name[0] == '.') {
		SMBERROR("do not call me with dot!\n");
		return EINVAL;
	}
	dnp = dvp ? VTOSMB(dvp) : NULL;
	if (dnp == NULL && dvp != NULL) {
		vprint("smbfs_node_alloc: dead parent vnode", dvp);
		return EINVAL;
	}
	error = vfs_hash_get(mp, smbfs_hash(name, nmlen), LK_EXCLUSIVE, td,
	    vpp, smbfs_vnode_cmp, &sc);
	if (error)
		return (error);
	if (*vpp) {
		np = VTOSMB(*vpp);
		/* Force cached attributes to be refreshed if stale. */
		(void)VOP_GETATTR(*vpp, &vattr, td->td_ucred);
		/*
		 * If the file type on the server is inconsistent with
		 * what it was when we created the vnode, kill the
		 * bogus vnode now and fall through to the code below
		 * to create a new one with the right type.
		 */
		if (((*vpp)->v_type == VDIR && 
		    (np->n_dosattr & SMB_FA_DIR) == 0) ||
	    	    ((*vpp)->v_type == VREG && 
		    (np->n_dosattr & SMB_FA_DIR) != 0)) {
			vgone(*vpp);
			vput(*vpp);
		}
		else {
			SMBVDEBUG("vnode taken from the hashtable\n");
			return (0);
		}
	}
	/*
	 * If we don't have node attributes, then it is an explicit lookup
	 * for an existing vnode.
	 */
	if (fap == NULL)
		return ENOENT;

	error = getnewvnode("smbfs", mp, &smbfs_vnodeops, vpp);
	if (error)
		return (error);
	vp = *vpp;
	np = malloc(sizeof *np, M_SMBNODE, M_WAITOK | M_ZERO);
	rplen = dirlen;
	if (sep != '\0')
		rplen++;
	rplen += nmlen;
	rpath = malloc(rplen + 1, M_SMBNODENAME, M_WAITOK);
	p = rpath;
	bcopy(dirnm, p, dirlen);
	p += dirlen;
	if (sep != '\0')
		*p++ = sep;
	if (name != NULL) {
		bcopy(name, p, nmlen);
		p += nmlen;
	}
	*p = '\0';
	MPASS(p == rpath + rplen);
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
	/* Vnode initialization */
	vp->v_type = fap->fa_attr & SMB_FA_DIR ? VDIR : VREG;
	vp->v_data = np;
	np->n_vnode = vp;
	np->n_mount = VFSTOSMBFS(mp);
	np->n_rpath = rpath;
	np->n_rplen = rplen;
	np->n_nmlen = nmlen;
	np->n_name = smbfs_name_alloc(name, nmlen);
	np->n_ino = fap->fa_ino;
	if (dvp) {
		ASSERT_VOP_LOCKED(dvp, "smbfs_node_alloc");
		np->n_parent = dvp;
		np->n_parentino = VTOSMB(dvp)->n_ino;
		if (/*vp->v_type == VDIR &&*/ (dvp->v_vflag & VV_ROOT) == 0) {
			vref(dvp);
			np->n_flag |= NREFPARENT;
		}
	} else if (vp->v_type == VREG)
		SMBERROR("new vnode '%s' born without parent ?\n", np->n_name);
	error = insmntque(vp, mp);
	if (error) {
		free(np, M_SMBNODE);
		return (error);
	}
	error = vfs_hash_insert(vp, smbfs_hash(name, nmlen), LK_EXCLUSIVE,
	    td, &vp2, smbfs_vnode_cmp, &sc);
	if (error) 
		return (error);
	if (vp2 != NULL)
		*vpp = vp2;
	return (0);
}
示例#7
0
文件: p9fs_vfsops.c 项目: wca/p9fs
static int
p9fs_mount(struct mount *mp)
{
	struct p9fsmount *p9mp;
	struct p9fs_session *p9s;
	int error;

	error = EINVAL;
	if (vfs_filteropt(mp->mnt_optnew, p9_opts))
		goto out;

	if (mp->mnt_flag & MNT_UPDATE)
		return (p9fs_mount_parse_opts(mp));

	/* Allocate and initialize the private mount structure. */
	p9mp = malloc(sizeof (struct p9fsmount), M_P9MNT, M_WAITOK | M_ZERO);
	mp->mnt_data = p9mp;
	p9mp->p9_mountp = mp;
	p9fs_init_session(&p9mp->p9_session);
	p9s = &p9mp->p9_session;
	p9s->p9s_mount = mp;

	error = p9fs_mount_parse_opts(mp);
	if (error != 0)
		goto out;

	error = p9fs_connect(mp);
	if (error != 0) {
		goto out;
	}

	/* Negotiate with the remote service.  XXX: Add auth call. */
	error = p9fs_client_version(p9s);
	if (error == 0) {
		/* Initialize the root vnode just before attaching. */
		struct vnode *vp, *ivp;
		struct p9fs_node *np = &p9s->p9s_rootnp;

		np->p9n_fid = ROOTFID;
		np->p9n_session = p9s;
		error = getnewvnode("p9fs", mp, &p9fs_vnops, &vp);
		if (error == 0) {
			vn_lock(vp, LK_EXCLUSIVE);
			error = insmntque(vp, mp);
		}
		ivp = NULL;
		if (error == 0)
			error = vfs_hash_insert(vp, ROOTFID, LK_EXCLUSIVE,
			    curthread, &ivp, NULL, NULL);
		if (error == 0 && ivp != NULL)
			error = EBUSY;
		if (error == 0) {
			np->p9n_vnode = vp;
			vp->v_data = np;
			vp->v_type = VDIR;
			vp->v_vflag |= VV_ROOT;
			VOP_UNLOCK(vp, 0);
		}
	}
	if (error == 0)
		error = p9fs_client_attach(p9s);
	if (error == 0)
		p9s->p9s_state = P9S_RUNNING;

out:
	if (error != 0)
		(void) p9fs_unmount(mp, MNT_FORCE);
	return (error);
}
示例#8
0
文件: nfs_node.c 项目: coyizumi/cs111
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int flags)
{
	struct thread *td = curthread;	/* XXX */
	struct nfsnode *np;
	struct vnode *vp;
	struct vnode *nvp;
	int error;
	u_int hash;
	struct nfsmount *nmp;
	struct nfs_vncmp ncmp;

	nmp = VFSTONFS(mntp);
	*npp = NULL;

	hash = fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT);
	ncmp.fhsize = fhsize;
	ncmp.fh = fhp;

	error = vfs_hash_get(mntp, hash, flags,
	    td, &nvp, nfs_vncmpf, &ncmp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		return (0);
	}
	np = uma_zalloc(nfsnode_zone, M_WAITOK | M_ZERO);

	error = getnewvnode("nfs", mntp, &nfs_vnodeops, &nvp);
	if (error) {
		uma_zfree(nfsnode_zone, np);
		return (error);
	}
	vp = nvp;
	vp->v_bufobj.bo_ops = &buf_ops_nfs;
	vp->v_data = np;
	np->n_vnode = vp;
	/* 
	 * Initialize the mutex even if the vnode is going to be a loser.
	 * This simplifies the logic in reclaim, which can then unconditionally
	 * destroy the mutex (in the case of the loser, or if hash_insert happened
	 * to return an error no special casing is needed).
	 */
	mtx_init(&np->n_mtx, "NFSnode lock", NULL, MTX_DEF);
	/*
	 * NFS supports recursive and shared locking.
	 */
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
	VN_LOCK_AREC(vp);
	VN_LOCK_ASHARE(vp);
	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = malloc(fhsize, M_NFSBIGFH, M_WAITOK);
	} else
		np->n_fhp = &np->n_fh;
	bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
	np->n_fhsize = fhsize;
	error = insmntque(vp, mntp);
	if (error != 0) {
		*npp = NULL;
		if (np->n_fhsize > NFS_SMALLFH) {
			free((caddr_t)np->n_fhp, M_NFSBIGFH);
		}
		mtx_destroy(&np->n_mtx);
		uma_zfree(nfsnode_zone, np);
		return (error);
	}
	error = vfs_hash_insert(vp, hash, flags, 
	    td, &nvp, nfs_vncmpf, &ncmp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		/* vfs_hash_insert() vput()'s the losing vnode */
		return (0);
	}
	*npp = np;

	return (0);
}
示例#9
0
/*
 * Make a new or get existing unionfs node.
 * 
 * uppervp and lowervp should be unlocked. Because if new unionfs vnode is
 * locked, uppervp or lowervp is locked too. In order to prevent dead lock,
 * you should not lock plurality simultaneously.
 */
int
unionfs_nodeget(struct mount *mp, struct vnode *uppervp,
		struct vnode *lowervp, struct vnode *dvp,
		struct vnode **vpp, struct componentname *cnp,
		struct thread *td)
{
	struct unionfs_mount *ump;
	struct unionfs_node *unp;
	struct vnode   *vp;
	int		error;
	int		lkflags;
	enum vtype	vt;
	char	       *path;

	ump = MOUNTTOUNIONFSMOUNT(mp);
	lkflags = (cnp ? cnp->cn_lkflags : 0);
	path = (cnp ? cnp->cn_nameptr : NULL);
	*vpp = NULLVP;

	if (uppervp == NULLVP && lowervp == NULLVP)
		panic("unionfs_nodeget: upper and lower is null");

	vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type);

	/* If it has no ISLASTCN flag, path check is skipped. */
	if (cnp && !(cnp->cn_flags & ISLASTCN))
		path = NULL;

	/* check the cache */
	if (path != NULL && dvp != NULLVP && vt == VDIR) {
		vp = unionfs_get_cached_vnode(uppervp, lowervp, dvp, path);
		if (vp != NULLVP) {
			vref(vp);
			*vpp = vp;
			goto unionfs_nodeget_out;
		}
	}

	if ((uppervp == NULLVP || ump->um_uppervp != uppervp) ||
	    (lowervp == NULLVP || ump->um_lowervp != lowervp)) {
		/* dvp will be NULLVP only in case of root vnode. */
		if (dvp == NULLVP)
			return (EINVAL);
	}
	unp = malloc(sizeof(struct unionfs_node),
	    M_UNIONFSNODE, M_WAITOK | M_ZERO);

	error = getnewvnode("unionfs", mp, &unionfs_vnodeops, &vp);
	if (error != 0) {
		free(unp, M_UNIONFSNODE);
		return (error);
	}
	error = insmntque(vp, mp);	/* XXX: Too early for mpsafe fs */
	if (error != 0) {
		free(unp, M_UNIONFSNODE);
		return (error);
	}
	if (dvp != NULLVP)
		vref(dvp);
	if (uppervp != NULLVP)
		vref(uppervp);
	if (lowervp != NULLVP)
		vref(lowervp);

	if (vt == VDIR)
		unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH,
		    &(unp->un_hashmask));

	unp->un_vnode = vp;
	unp->un_uppervp = uppervp;
	unp->un_lowervp = lowervp;
	unp->un_dvp = dvp;
	if (uppervp != NULLVP)
		vp->v_vnlock = uppervp->v_vnlock;
	else
		vp->v_vnlock = lowervp->v_vnlock;

	if (path != NULL) {
		unp->un_path = (char *)
		    malloc(cnp->cn_namelen +1, M_UNIONFSPATH, M_WAITOK|M_ZERO);
		bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen);
		unp->un_path[cnp->cn_namelen] = '\0';
	}
	vp->v_type = vt;
	vp->v_data = unp;

	if ((uppervp != NULLVP && ump->um_uppervp == uppervp) &&
	    (lowervp != NULLVP && ump->um_lowervp == lowervp))
		vp->v_vflag |= VV_ROOT;

	if (path != NULL && dvp != NULLVP && vt == VDIR)
		*vpp = unionfs_ins_cached_vnode(unp, dvp, path);
	if ((*vpp) != NULLVP) {
		if (dvp != NULLVP)
			vrele(dvp);
		if (uppervp != NULLVP)
			vrele(uppervp);
		if (lowervp != NULLVP)
			vrele(lowervp);

		unp->un_uppervp = NULLVP;
		unp->un_lowervp = NULLVP;
		unp->un_dvp = NULLVP;
		vrele(vp);
		vp = *vpp;
		vref(vp);
	} else
		*vpp = vp;

unionfs_nodeget_out:
	if (lkflags & LK_TYPE_MASK)
		vn_lock(vp, lkflags | LK_RETRY);

	return (0);
}