/*
 * allocate a ptyfsnode/vnode pair.  the vnode is
 * referenced, and locked.
 *
 * the pid, ptyfs_type, and mount point uniquely
 * identify a ptyfsnode.  the mount point is needed
 * because someone might mount this filesystem
 * twice.
 *
 * all ptyfsnodes are maintained on a singly-linked
 * list.  new nodes are only allocated when they cannot
 * be found on this list.  entries on the list are
 * removed when the vfs reclaim entry is called.
 *
 * a single lock is kept for the entire list.  this is
 * needed because the getnewvnode() function can block
 * waiting for a vnode to become free, in which case there
 * may be more than one ptyess trying to get the same
 * vnode.  this lock is only taken if we are going to
 * call getnewvnode, since the kernel itself is single-threaded.
 *
 * if an entry is found on the list, then call vget() to
 * take a reference.  this is done because there may be
 * zero references to it and so it needs to removed from
 * the vnode free list.
 */
int
ptyfs_allocvp(struct mount *mp, struct vnode **vpp, ptyfstype type, int pty,
    struct lwp *l)
{
	struct ptyfsnode *ptyfs;
	struct vnode *vp;
	int error;

 retry:
	if ((*vpp = ptyfs_used_get(type, pty, mp, LK_EXCLUSIVE)) != NULL)
		return 0;

	error = getnewvnode(VT_PTYFS, mp, ptyfs_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULL;
		return error;
	}

	mutex_enter(&ptyfs_hashlock);
	if (ptyfs_used_get(type, pty, mp, 0) != NULL) {
		mutex_exit(&ptyfs_hashlock);
		ungetnewvnode(vp);
		goto retry;
	}

	vp->v_data = ptyfs = ptyfs_free_get(type, pty, l);
	ptyfs->ptyfs_vnode = vp;

	switch (type) {
	case PTYFSroot:	/* /pts = dr-xr-xr-x */
		vp->v_type = VDIR;
		vp->v_vflag = VV_ROOT;
		break;

	case PTYFSpts:	/* /pts/N = cxxxxxxxxx */
	case PTYFSptc:	/* controlling side = cxxxxxxxxx */
		vp->v_type = VCHR;
		spec_node_init(vp, PTYFS_MAKEDEV(ptyfs));
		break;
	default:
		panic("ptyfs_allocvp");
	}

	ptyfs_hashins(ptyfs);
	uvm_vnp_setsize(vp, 0);
	mutex_exit(&ptyfs_hashlock);

	*vpp = vp;
	return 0;
}
Beispiel #2
0
/*
 * internal version with extra arguments to allow accessing resource fork
 */
int
hfs_vget_internal(struct mount *mp, ino_t ino, uint8_t fork,
    struct vnode **vpp)
{
	struct hfsmount *hmp;
	struct hfsnode *hnode;
	struct vnode *vp;
	hfs_callback_args cbargs;
	hfs_cnid_t cnid;
	hfs_catalog_keyed_record_t rec;
	hfs_catalog_key_t key; /* the search key used to find this file on disk */
	dev_t dev;
	int error;

#ifdef HFS_DEBUG	
	printf("vfsop = hfs_vget()\n");
#endif /* HFS_DEBUG */

	hnode = NULL;
	vp = NULL;
	hmp = VFSTOHFS(mp);
	dev = hmp->hm_dev;
	cnid = (hfs_cnid_t)ino;

	if (fork != HFS_RSRCFORK)
	    fork = HFS_DATAFORK;

 retry:
	/* Check if this vnode has already been allocated. If so, just return it. */
	if ((*vpp = hfs_nhashget(dev, cnid, fork, LK_EXCLUSIVE)) != NULL)
		return 0;

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_HFS, mp, hfs_vnodeop_p, &vp)) != 0)
		goto error;
	MALLOC(hnode, struct hfsnode *, sizeof(struct hfsnode), M_TEMP,
		M_WAITOK + M_ZERO);

	/*
	 * If someone beat us to it while sleeping in getnewvnode(),
	 * push back the freshly allocated vnode we don't need, and return.
	 */
	mutex_enter(&hfs_hashlock);
	if (hfs_nhashget(dev, cnid, fork, 0) != NULL) {
		mutex_exit(&hfs_hashlock);
		ungetnewvnode(vp);
		FREE(hnode, M_TEMP);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;	
	vp->v_data = hnode;
	genfs_node_init(vp, &hfs_genfsops);
	
	hnode->h_vnode = vp;
	hnode->h_hmp = hmp;
	hnode->dummy = 0x1337BABE;
	
	/*
	 * We need to put this vnode into the hash chain and lock it so that other
	 * requests for this inode will block if they arrive while we are sleeping
	 * waiting for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read. The hash chain requires the node's
	 * device and cnid to be known. Since this information was passed in the
	 * arguments, fill in the appropriate hfsnode fields without reading having
	 * to read the disk.
	 */
	hnode->h_dev = dev;
	hnode->h_rec.u.cnid = cnid;
	hnode->h_fork = fork;

	hfs_nhashinsert(hnode);
	mutex_exit(&hfs_hashlock);


	/*
	 * Read catalog record from disk.
	 */
	hfslib_init_cbargs(&cbargs);
	
	if (hfslib_find_catalog_record_with_cnid(&hmp->hm_vol, cnid,
		&rec, &key, &cbargs) != 0) {
		vput(vp);
		error = EBADF;
		goto error;
	}
		
	memcpy(&hnode->h_rec, &rec, sizeof(hnode->h_rec));
	hnode->h_parent = key.parent_cnid;

	/* XXX Eventually need to add an "ignore permissions" mount option */

	/*
	 * Now convert some of the catalog record's fields into values that make
	 * sense on this system.
	 */
	/* DATE AND TIME */

	/*
	 * Initialize the vnode from the hfsnode, check for aliases.
	 * Note that the underlying vnode may change.
	 */
	hfs_vinit(mp, hfs_specop_p, hfs_fifoop_p, &vp);

	hnode->h_devvp = hmp->hm_devvp;	
	VREF(hnode->h_devvp);  /* Increment the ref count to the volume's device. */

	/* Make sure UVM has allocated enough memory. (?) */
	if (hnode->h_rec.u.rec_type == HFS_REC_FILE) {
		if (hnode->h_fork == HFS_DATAFORK)
			uvm_vnp_setsize(vp,
			    hnode->h_rec.file.data_fork.logical_size);
		else
			uvm_vnp_setsize(vp,
			    hnode->h_rec.file.rsrc_fork.logical_size);
	}
	else
		uvm_vnp_setsize(vp, 0); /* no directly reading directories */
		
	*vpp = vp;
	
	return 0;

error:
	*vpp = NULL;
	return error;
}
Beispiel #3
0
int
lfs_fastvget(struct mount *mp, ino_t ino, daddr_t daddr, struct vnode **vpp,
	     struct ulfs1_dinode *dinp)
{
	struct inode *ip;
	struct ulfs1_dinode *dip;
	struct vnode *vp;
	struct ulfsmount *ump;
	dev_t dev;
	int error, retries;
	struct buf *bp;
	struct lfs *fs;

	ump = VFSTOULFS(mp);
	dev = ump->um_dev;
	fs = ump->um_lfs;

	/*
	 * Wait until the filesystem is fully mounted before allowing vget
	 * to complete.	 This prevents possible problems with roll-forward.
	 */
	mutex_enter(&lfs_lock);
	while (fs->lfs_flags & LFS_NOTYET) {
		mtsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0,
			&lfs_lock);
	}
	mutex_exit(&lfs_lock);

	/*
	 * This is playing fast and loose.  Someone may have the inode
	 * locked, in which case they are going to be distinctly unhappy
	 * if we trash something.
	 */

	error = lfs_fasthashget(dev, ino, vpp);
	if (error != 0 || *vpp != NULL)
		return (error);

	/*
	 * getnewvnode(9) will call vfs_busy, which will block if the
	 * filesystem is being unmounted; but umount(9) is waiting for
	 * us because we're already holding the fs busy.
	 * XXXMP
	 */
	if (mp->mnt_iflag & IMNT_UNMOUNT) {
		*vpp = NULL;
		return EDEADLK;
	}
	error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULL;
		return (error);
	}

	mutex_enter(&ulfs_hashlock);
	error = lfs_fasthashget(dev, ino, vpp);
	if (error != 0 || *vpp != NULL) {
		mutex_exit(&ulfs_hashlock);
		ungetnewvnode(vp);
		return (error);
	}

	/* Allocate new vnode/inode. */
	lfs_vcreate(mp, ino, vp);

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */
	ip = VTOI(vp);
	ulfs_ihashins(ip);
	mutex_exit(&ulfs_hashlock);

#ifdef notyet
	/* Not found in the cache => this vnode was loaded only for cleaning. */
	ip->i_lfs_iflags |= LFSI_BMAP;
#endif

	/*
	 * XXX
	 * This may not need to be here, logically it should go down with
	 * the i_devvp initialization.
	 * Ask Kirk.
	 */
	ip->i_lfs = fs;

	/* Read in the disk contents for the inode, copy into the inode. */
	if (dinp) {
		error = copyin(dinp, ip->i_din.ffs1_din, sizeof (struct ulfs1_dinode));
		if (error) {
			DLOG((DLOG_CLEAN, "lfs_fastvget: dinode copyin failed"
			      " for ino %d\n", ino));
			ulfs_ihashrem(ip);

			/* Unlock and discard unneeded inode. */
			VOP_UNLOCK(vp);
			lfs_vunref(vp);
			*vpp = NULL;
			return (error);
		}
		if (ip->i_number != ino)
			panic("lfs_fastvget: I was fed the wrong inode!");
	} else {
		retries = 0;
	    again:
		error = bread(ump->um_devvp, LFS_FSBTODB(fs, daddr), fs->lfs_ibsize,
			      NOCRED, 0, &bp);
		if (error) {
			DLOG((DLOG_CLEAN, "lfs_fastvget: bread failed (%d)\n",
			      error));
			/*
			 * The inode does not contain anything useful, so it
			 * would be misleading to leave it on its hash chain.
			 * Iput() will return it to the free list.
			 */
			ulfs_ihashrem(ip);

			/* Unlock and discard unneeded inode. */
			VOP_UNLOCK(vp);
			lfs_vunref(vp);
			*vpp = NULL;
			return (error);
		}
		dip = lfs_ifind(ump->um_lfs, ino, bp);
		if (dip == NULL) {
			/* Assume write has not completed yet; try again */
			brelse(bp, BC_INVAL);
			++retries;
			if (retries > LFS_IFIND_RETRIES)
				panic("lfs_fastvget: dinode not found");
			DLOG((DLOG_CLEAN, "lfs_fastvget: dinode not found,"
			      " retrying...\n"));
			goto again;
		}
		*ip->i_din.ffs1_din = *dip;
		brelse(bp, 0);
	}
	lfs_vinit(mp, &vp);

	*vpp = vp;

	KASSERT(VOP_ISLOCKED(vp));
	VOP_UNLOCK(vp);

	return (0);
}
Beispiel #4
0
/*
 * Look up a EXT2FS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
int
ext2fs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct m_ext2fs *fs;
	struct inode *ip;
	struct ufsmount *ump;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;
	void *cp;

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
		return (0);

	/* Allocate a new vnode/inode. */
	error = getnewvnode(VT_EXT2FS, mp, ext2fs_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULL;
		return (error);
	}
	ip = pool_get(&ext2fs_inode_pool, PR_WAITOK);

	mutex_enter(&ufs_hashlock);
	if ((*vpp = ufs_ihashget(dev, ino, 0)) != NULL) {
		mutex_exit(&ufs_hashlock);
		ungetnewvnode(vp);
		pool_put(&ext2fs_inode_pool, ip);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;

	memset(ip, 0, sizeof(struct inode));
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_ump = ump;
	ip->i_e2fs = fs = ump->um_e2fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_e2fs_last_lblk = 0;
	ip->i_e2fs_last_blk = 0;
	genfs_node_init(vp, &ext2fs_genfsops);

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */

	ufs_ihashins(ip);
	mutex_exit(&ufs_hashlock);

	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ino)),
	    (int)fs->e2fs_bsize, NOCRED, 0, &bp);
	if (error) {

		/*
		 * The inode does not contain anything useful, so it would
		 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */

		vput(vp);
		*vpp = NULL;
		return (error);
	}
	cp = (char *)bp->b_data + (ino_to_fsbo(fs, ino) * EXT2_DINODE_SIZE(fs));
	ip->i_din.e2fs_din = pool_get(&ext2fs_dinode_pool, PR_WAITOK);
	e2fs_iload((struct ext2fs_dinode *)cp, ip->i_din.e2fs_din);
	ext2fs_set_inode_guid(ip);
	brelse(bp, 0);

	/* If the inode was deleted, reset all fields */
	if (ip->i_e2fs_dtime != 0) {
		ip->i_e2fs_mode = 0;
		(void)ext2fs_setsize(ip, 0);
		(void)ext2fs_setnblock(ip, 0);
		memset(ip->i_e2fs_blocks, 0, sizeof(ip->i_e2fs_blocks));
	}

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 */

	error = ext2fs_vinit(mp, ext2fs_specop_p, ext2fs_fifoop_p, &vp);
	if (error) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}
	/*
	 * Finish inode initialization now that aliasing has been resolved.
	 */

	ip->i_devvp = ump->um_devvp;
	vref(ip->i_devvp);

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */

	if (ip->i_e2fs_gen == 0) {
		if (++ext2gennumber < (u_long)time_second)
			ext2gennumber = time_second;
		ip->i_e2fs_gen = ext2gennumber;
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}
	uvm_vnp_setsize(vp, ext2fs_size(ip));
	*vpp = vp;
	return (0);
}
Beispiel #5
0
/*
 * layer_node_alloc: make a new layerfs vnode.
 *
 * => vp is the alias vnode, lowervp is the lower vnode.
 * => We will hold a reference to lowervp.
 */
int
layer_node_alloc(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
{
    struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
    struct layer_node_hashhead *hd;
    struct layer_node *xp;
    struct vnode *vp, *nvp;
    int error;

    /* Get a new vnode and share its interlock with underlying vnode. */
    error = getnewvnode(lmp->layerm_tag, mp, lmp->layerm_vnodeop_p,
                        lowervp->v_interlock, &vp);
    if (error) {
        return error;
    }
    vp->v_type = lowervp->v_type;
    mutex_enter(vp->v_interlock);
    vp->v_iflag |= VI_LAYER;
    mutex_exit(vp->v_interlock);

    xp = kmem_alloc(lmp->layerm_size, KM_SLEEP);
    if (xp == NULL) {
        ungetnewvnode(vp);
        return ENOMEM;
    }
    if (vp->v_type == VBLK || vp->v_type == VCHR) {
        spec_node_init(vp, lowervp->v_rdev);
    }

    /*
     * Before inserting the node into the hash, check if other thread
     * did not race with us.  If so - return that node, destroy ours.
     */
    mutex_enter(&lmp->layerm_hashlock);
    if ((nvp = layer_node_find(mp, lowervp)) != NULL) {
        ungetnewvnode(vp);
        kmem_free(xp, lmp->layerm_size);
        *vpp = nvp;
        return 0;
    }

    vp->v_data = xp;
    vp->v_vflag = (vp->v_vflag & ~VV_MPSAFE) |
                  (lowervp->v_vflag & VV_MPSAFE);
    xp->layer_vnode = vp;
    xp->layer_lowervp = lowervp;
    xp->layer_flags = 0;

    /*
     * Insert the new node into the hash.
     * Add a reference to the lower node.
     */
    vref(lowervp);
    hd = LAYER_NHASH(lmp, lowervp);
    LIST_INSERT_HEAD(hd, xp, layer_hash);
    uvm_vnp_setsize(vp, 0);
    mutex_exit(&lmp->layerm_hashlock);

    *vpp = vp;
    return 0;
}
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp,
    int lkflags)
{
	struct nfsnode *np;
	struct vnode *vp;
	struct nfsmount *nmp = VFSTONFS(mntp);
	int error;
	struct fh_match fhm;

	fhm.fhm_fhp = fhp;
	fhm.fhm_fhsize = fhsize;

loop:
	rw_enter(&nmp->nm_rbtlock, RW_READER);
	np = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
	if (np != NULL) {
		vp = NFSTOV(np);
		mutex_enter(vp->v_interlock);
		rw_exit(&nmp->nm_rbtlock);
		error = vget(vp, LK_EXCLUSIVE | lkflags);
		if (error == EBUSY)
			return error;
		if (error)
			goto loop;
		*npp = np;
		return(0);
	}
	rw_exit(&nmp->nm_rbtlock);

	error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp);
	if (error) {
		*npp = 0;
		return (error);
	}
	np = pool_get(&nfs_node_pool, PR_WAITOK);
	memset(np, 0, sizeof *np);
	np->n_vnode = vp;

	/*
	 * Insert the nfsnode in the hash queue for its new file handle
	 */

	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
	} else
		np->n_fhp = &np->n_fh;
	memcpy(np->n_fhp, fhp, fhsize);
	np->n_fhsize = fhsize;
	np->n_accstamp = -1;
	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);

	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
	if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
		rw_exit(&nmp->nm_rbtlock);
		if (fhsize > NFS_SMALLFH) {
			kmem_free(np->n_fhp, fhsize);
		}
		pool_put(&nfs_vattr_pool, np->n_vattr);
		pool_put(&nfs_node_pool, np);
		ungetnewvnode(vp);
		goto loop;
	}
	vp->v_data = np;
	genfs_node_init(vp, &nfs_genfsops);
	/*
	 * Initalize read/write creds to useful values. VOP_OPEN will
	 * overwrite these.
	 */
	np->n_rcred = curlwp->l_cred;
	kauth_cred_hold(np->n_rcred);
	np->n_wcred = curlwp->l_cred;
	kauth_cred_hold(np->n_wcred);
	VOP_LOCK(vp, LK_EXCLUSIVE);
	NFS_INVALIDATE_ATTRCACHE(np);
	uvm_vnp_setsize(vp, 0);
	(void)rb_tree_insert_node(&nmp->nm_rbtree, np);
	rw_exit(&nmp->nm_rbtlock);

	*npp = np;
	return (0);
}
Beispiel #7
0
int
ntfs_vgetex(
	struct mount *mp,
	ino_t ino,
	u_int32_t attrtype,
	char *attrname,
	u_long lkflags,
	u_long flags,
	struct vnode **vpp)
{
	int error;
	struct ntfsmount *ntmp;
	struct ntnode *ip;
	struct fnode *fp;
	struct vnode *vp;
	enum vtype f_type = VBAD;

	dprintf(("ntfs_vgetex: ino: %llu, attr: 0x%x:%s, lkf: 0x%lx, f:"
	    " 0x%lx\n", (unsigned long long)ino, attrtype,
	    attrname ? attrname : "", (u_long)lkflags, (u_long)flags));

	ntmp = VFSTONTFS(mp);
	*vpp = NULL;

loop:
	/* Get ntnode */
	error = ntfs_ntlookup(ntmp, ino, &ip);
	if (error) {
		printf("ntfs_vget: ntfs_ntget failed\n");
		return (error);
	}

	/* It may be not initialized fully, so force load it */
	if (!(flags & VG_DONTLOADIN) && !(ip->i_flag & IN_LOADED)) {
		error = ntfs_loadntnode(ntmp, ip);
		if(error) {
			printf("ntfs_vget: CAN'T LOAD ATTRIBUTES FOR INO:"
			    " %llu\n", (unsigned long long)ip->i_number);
			ntfs_ntput(ip);
			return (error);
		}
	}

	error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp);
	if (error) {
		printf("ntfs_vget: ntfs_fget failed\n");
		ntfs_ntput(ip);
		return (error);
	}

	if (!(flags & VG_DONTVALIDFN) && !(fp->f_flag & FN_VALID)) {
		if ((ip->i_frflag & NTFS_FRFLAG_DIR) &&
		    (fp->f_attrtype == NTFS_A_DATA && fp->f_attrname == NULL)) {
			f_type = VDIR;
		} else if (flags & VG_EXT) {
			f_type = VNON;
			fp->f_size = fp->f_allocated = 0;
		} else {
			f_type = VREG;

			error = ntfs_filesize(ntmp, fp,
					      &fp->f_size, &fp->f_allocated);
			if (error) {
				ntfs_ntput(ip);
				return (error);
			}
		}

		fp->f_flag |= FN_VALID;
	}

	/*
	 * We may be calling vget() now. To avoid potential deadlock, we need
	 * to release ntnode lock, since due to locking order vnode
	 * lock has to be acquired first.
	 * ntfs_fget() bumped ntnode usecount, so ntnode won't be recycled
	 * prematurely.
	 * Take v_interlock before releasing ntnode lock to avoid races.
	 */
	vp = FTOV(fp);
	if (vp) {
		mutex_enter(vp->v_interlock);
		ntfs_ntput(ip);
		if (vget(vp, lkflags) != 0)
			goto loop;
		*vpp = vp;
		return 0;
	}
	ntfs_ntput(ip);

	error = getnewvnode(VT_NTFS, ntmp->ntm_mountp, ntfs_vnodeop_p,
	    NULL, &vp);
	if(error) {
		ntfs_frele(fp);
		return (error);
	}
	ntfs_ntget(ip);
	error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp);
	if (error) {
		printf("ntfs_vget: ntfs_fget failed\n");
		ntfs_ntput(ip);
		return (error);
	}
	if (FTOV(fp)) {
		/*
		 * Another thread beat us, put back freshly allocated
		 * vnode and retry.
		 */
		ntfs_ntput(ip);
		ungetnewvnode(vp);
		goto loop;
	}
	dprintf(("ntfs_vget: vnode: %p for ntnode: %llu\n", vp,
	    (unsigned long long)ino));

	fp->f_vp = vp;
	vp->v_data = fp;
	if (f_type != VBAD)
		vp->v_type = f_type;
	genfs_node_init(vp, &ntfs_genfsops);

	if (ino == NTFS_ROOTINO)
		vp->v_vflag |= VV_ROOT;

	ntfs_ntput(ip);

	if (lkflags & (LK_EXCLUSIVE | LK_SHARED)) {
		error = vn_lock(vp, lkflags);
		if (error) {
			vput(vp);
			return (error);
		}
	}

	uvm_vnp_setsize(vp, fp->f_size); /* XXX: mess, cf. ntfs_lookupfile() */
	vref(ip->i_devvp);
	*vpp = vp;
	return (0);
}
Beispiel #8
0
static int
chfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct chfs_mount *chmp;
	struct chfs_inode *ip;
	struct ufsmount *ump;
	struct vnode *vp;
	dev_t dev;
	int error;
	struct chfs_vnode_cache* chvc = NULL;
	struct chfs_node_ref* nref = NULL;
	struct buf *bp;

	dbg("vget() | ino: %llu\n", (unsigned long long)ino);

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if (!vpp) {
		vpp = kmem_alloc(sizeof(struct vnode*), KM_SLEEP);
	}

	/* Get node from inode hash. */
	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
		return 0;
	}

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_CHFS,
		    mp, chfs_vnodeop_p, NULL, &vp)) != 0) {
		*vpp = NULL;
		return (error);
	}
	ip = pool_get(&chfs_inode_pool, PR_WAITOK);

	mutex_enter(&chfs_hashlock);
	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
		mutex_exit(&chfs_hashlock);
		ungetnewvnode(vp);
		pool_put(&chfs_inode_pool, ip);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;

	/* Initialize vnode/inode. */
	memset(ip, 0, sizeof(*ip));
	vp->v_data = ip;
	ip->vp = vp;
	ip->ch_type = VTTOCHT(vp->v_type);
	ip->ump = ump;
	ip->chmp = chmp = ump->um_chfs;
	ip->dev = dev;
	ip->ino = ino;
	vp->v_mount = mp;
	genfs_node_init(vp, &chfs_genfsops);

	rb_tree_init(&ip->fragtree, &frag_rbtree_ops);

	chfs_ihashins(ip);
	mutex_exit(&chfs_hashlock);

	/* Set root inode. */
	if (ino == CHFS_ROOTINO) {
		dbg("SETROOT\n");
		vp->v_vflag |= VV_ROOT;
		vp->v_type = VDIR;
		ip->ch_type = CHT_DIR;
		ip->mode = IFMT | IEXEC | IWRITE | IREAD;
		ip->iflag |= (IN_ACCESS | IN_CHANGE | IN_UPDATE);
		chfs_update(vp, NULL, NULL, UPDATE_WAIT);
		TAILQ_INIT(&ip->dents);
		chfs_set_vnode_size(vp, 512);
	}

	mutex_enter(&chmp->chm_lock_vnocache);
	chvc = chfs_vnode_cache_get(chmp, ino);
	mutex_exit(&chmp->chm_lock_vnocache);
	if (!chvc) {
		dbg("!chvc\n");
		/* Initialize the corresponding vnode cache. */
		/* XXX, we cant alloc under a lock, refactor this! */
		chvc = chfs_vnode_cache_alloc(ino);
		mutex_enter(&chmp->chm_lock_vnocache);
		if (ino == CHFS_ROOTINO) {
			chvc->nlink = 2;
			chvc->pvno = CHFS_ROOTINO;
			chvc->state = VNO_STATE_CHECKEDABSENT;
		}
		chfs_vnode_cache_add(chmp, chvc);
		mutex_exit(&chmp->chm_lock_vnocache);

		ip->chvc = chvc;
		TAILQ_INIT(&ip->dents);
	} else {
		dbg("chvc\n");
		ip->chvc = chvc;
		/* We had a vnode cache, the node is already on flash, so read it */
		if (ino == CHFS_ROOTINO) {
			chvc->pvno = CHFS_ROOTINO;
			TAILQ_INIT(&chvc->scan_dirents);
		} else {
			chfs_readvnode(mp, ino, &vp);
		}

		mutex_enter(&chmp->chm_lock_mountfields);
		/* Initialize type specific things. */
		switch (ip->ch_type) {
		case CHT_DIR:
			/* Read every dirent. */
			nref = chvc->dirents;
			while (nref &&
			    (struct chfs_vnode_cache *)nref != chvc) {
				chfs_readdirent(mp, nref, ip);
				nref = nref->nref_next;
			}
			chfs_set_vnode_size(vp, 512);
			break;
		case CHT_REG:
			/* FALLTHROUGH */
		case CHT_SOCK:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}
			break;
		case CHT_LNK:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode_internal(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}

			/* Set link. */
			dbg("size: %llu\n", (unsigned long long)ip->size);
			bp = getiobuf(vp, true);
			bp->b_blkno = 0;
			bp->b_bufsize = bp->b_resid =
			    bp->b_bcount = ip->size;
			bp->b_data = kmem_alloc(ip->size, KM_SLEEP);
			chfs_read_data(chmp, vp, bp);
			if (!ip->target)
				ip->target = kmem_alloc(ip->size,
				    KM_SLEEP);
			memcpy(ip->target, bp->b_data, ip->size);
			kmem_free(bp->b_data, ip->size);
			putiobuf(bp);

			break;
		case CHT_CHR:
			/* FALLTHROUGH */
		case CHT_BLK:
			/* FALLTHROUGH */
		case CHT_FIFO:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode_internal(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}

			/* Set device. */
			bp = getiobuf(vp, true);
			bp->b_blkno = 0;
			bp->b_bufsize = bp->b_resid =
			    bp->b_bcount = sizeof(dev_t);
			bp->b_data = kmem_alloc(sizeof(dev_t), KM_SLEEP);
			chfs_read_data(chmp, vp, bp);
			memcpy(&ip->rdev,
			    bp->b_data, sizeof(dev_t));
			kmem_free(bp->b_data, sizeof(dev_t));
			putiobuf(bp);
			/* Set specific operations. */
			if (ip->ch_type == CHT_FIFO) {
				vp->v_op = chfs_fifoop_p;
			} else {
				vp->v_op = chfs_specop_p;
				spec_node_init(vp, ip->rdev);
			}

		    break;
		case CHT_BLANK:
			/* FALLTHROUGH */
		case CHT_BAD:
			break;
		}
		mutex_exit(&chmp->chm_lock_mountfields);

	}

	/* Finish inode initalization. */
	ip->devvp = ump->um_devvp;
	vref(ip->devvp);

	uvm_vnp_setsize(vp, ip->size);
	*vpp = vp;

	return 0;
}
Beispiel #9
0
int
cd9660_vget_internal(struct mount *mp, ino_t ino, struct vnode **vpp,
	int relocated, struct iso_directory_record *isodir)
{
	struct iso_mnt *imp;
	struct iso_node *ip;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;

	imp = VFSTOISOFS(mp);
	dev = imp->im_dev;

 retry:
	if ((*vpp = cd9660_ihashget(dev, ino, LK_EXCLUSIVE)) != NULLVP)
		return (0);

	/* Allocate a new vnode/iso_node. */
	error = getnewvnode(VT_ISOFS, mp, cd9660_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULLVP;
		return (error);
	}
	ip = pool_get(&cd9660_node_pool, PR_WAITOK);

	/*
	 * If someone beat us to it, put back the freshly allocated
	 * vnode/inode pair and retry.
	 */
	mutex_enter(&cd9660_hashlock);
	if (cd9660_ihashget(dev, ino, 0) != NULL) {
		mutex_exit(&cd9660_hashlock);
		ungetnewvnode(vp);
		pool_put(&cd9660_node_pool, ip);
		goto retry;
	}

	memset(ip, 0, sizeof(struct iso_node));
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_mnt = imp;
	ip->i_devvp = imp->im_devvp;
	genfs_node_init(vp, &cd9660_genfsops);

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */
	cd9660_ihashins(ip);
	mutex_exit(&cd9660_hashlock);

	if (isodir == 0) {
		int lbn, off;

		lbn = cd9660_lblkno(imp, ino);
		if (lbn >= imp->volume_space_size) {
			vput(vp);
			printf("fhtovp: lbn exceed volume space %d\n", lbn);
			return (ESTALE);
		}

		off = cd9660_blkoff(imp, ino);
		if (off + ISO_DIRECTORY_RECORD_SIZE > imp->logical_block_size) {
			vput(vp);
			printf("fhtovp: crosses block boundary %d\n",
			    off + ISO_DIRECTORY_RECORD_SIZE);
			return (ESTALE);
		}

		error = bread(imp->im_devvp,
			      lbn << (imp->im_bshift - DEV_BSHIFT),
			      imp->logical_block_size, NOCRED, 0, &bp);
		if (error) {
			vput(vp);
			printf("fhtovp: bread error %d\n",error);
			return (error);
		}
		isodir = (struct iso_directory_record *)((char *)bp->b_data + off);

		if (off + isonum_711(isodir->length) >
		    imp->logical_block_size) {
			vput(vp);
			if (bp != 0)
				brelse(bp, 0);
			printf("fhtovp: directory crosses block boundary %d[off=%d/len=%d]\n",
			    off +isonum_711(isodir->length), off,
			    isonum_711(isodir->length));
			return (ESTALE);
		}

#if 0
		if (isonum_733(isodir->extent) +
		    isonum_711(isodir->ext_attr_length) != ifhp->ifid_start) {
			if (bp != 0)
				brelse(bp, 0);
			printf("fhtovp: file start miss %d vs %d\n",
			    isonum_733(isodir->extent) + isonum_711(isodir->ext_attr_length),
			    ifhp->ifid_start);
			return (ESTALE);
		}
#endif
	} else
		bp = 0;

	vref(ip->i_devvp);

	if (relocated) {
		/*
		 * On relocated directories we must
		 * read the `.' entry out of a dir.
		 */
		ip->iso_start = ino >> imp->im_bshift;
		if (bp != 0)
			brelse(bp, 0);
		if ((error = cd9660_blkatoff(vp, (off_t)0, NULL, &bp)) != 0) {
			vput(vp);
			return (error);
		}
		isodir = (struct iso_directory_record *)bp->b_data;
	}

	ip->iso_extent = isonum_733(isodir->extent);
	ip->i_size = isonum_733(isodir->size);
	ip->iso_start = isonum_711(isodir->ext_attr_length) + ip->iso_extent;

	/*
	 * Setup time stamp, attribute
	 */
	vp->v_type = VNON;
	switch (imp->iso_ftype) {
	default:	/* ISO_FTYPE_9660 */
	    {
		struct buf *bp2;
		int off;
		if ((imp->im_flags & ISOFSMNT_EXTATT)
		    && (off = isonum_711(isodir->ext_attr_length)))
			cd9660_blkatoff(vp, (off_t)-(off << imp->im_bshift),
			    NULL, &bp2);
		else
			bp2 = NULL;
		cd9660_defattr(isodir, ip, bp2);
		cd9660_deftstamp(isodir, ip, bp2);
		if (bp2)
			brelse(bp2, 0);
		break;
	    }
	case ISO_FTYPE_RRIP:
		cd9660_rrip_analyze(isodir, ip, imp);
		break;
	}

	if (bp != 0)
		brelse(bp, 0);

	/*
	 * Initialize the associated vnode
	 */
	switch (vp->v_type = IFTOVT(ip->inode.iso_mode)) {
	case VFIFO:
		vp->v_op = cd9660_fifoop_p;
		break;
	case VCHR:
	case VBLK:
		/*
		 * if device, look at device number table for translation
		 */
		vp->v_op = cd9660_specop_p;
		spec_node_init(vp, ip->inode.iso_rdev);
		break;
	case VLNK:
	case VNON:
	case VSOCK:
	case VDIR:
	case VBAD:
		break;
	case VREG:
		uvm_vnp_setsize(vp, ip->i_size);
		break;
	}

	if (vp->v_type != VREG)
		uvm_vnp_setsize(vp, 0);

	if (ip->iso_extent == imp->root_extent)
		vp->v_vflag |= VV_ROOT;

	/*
	 * XXX need generation number?
	 */

	*vpp = vp;
	return (0);
}
/*
 * Obtain a locked vnode for the given on-disk inode number.
 *
 * We currently allocate a new vnode from getnewnode(), tack it with
 * our in-core inode structure (efs_inode), and read in the inode from
 * disk. The returned inode must be locked.
 *
 * Returns 0 on success.
 */
static int
efs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	int err;
	struct vnode *vp;
	struct efs_inode *eip;
	struct efs_mount *emp;

	emp = VFSTOEFS(mp);

	while (true) {
		*vpp = efs_ihashget(emp->em_dev, ino, LK_EXCLUSIVE);
		if (*vpp != NULL)
			return (0);

		err = getnewvnode(VT_EFS, mp, efs_vnodeop_p, NULL, &vp);
		if (err)
			return (err);
		
		eip = pool_get(&efs_inode_pool, PR_WAITOK);

		/*
		 * See if anybody has raced us here.  If not, continue
		 * setting up the new inode, otherwise start over.
		 */
		efs_ihashlock();

		if (efs_ihashget(emp->em_dev, ino, 0) == NULL)
			break;

		efs_ihashunlock();
		ungetnewvnode(vp);
		pool_put(&efs_inode_pool, eip);
	}

	vp->v_vflag |= VV_LOCKSWORK;
	eip->ei_mode = 0;
	eip->ei_lockf = NULL;
	eip->ei_number = ino;
	eip->ei_dev = emp->em_dev;
	eip->ei_vp = vp;
	vp->v_data = eip;

	/*
	 * Place the vnode on the hash chain. Doing so will lock the
	 * vnode, so it's okay to drop the global lock and read in
	 * the inode from disk.
	 */
	efs_ihashins(eip);
	efs_ihashunlock();

	/*
	 * Init genfs early, otherwise we'll trip up on genfs_node_destroy
	 * in efs_reclaim when vput()ing in an error branch here.
	 */
	genfs_node_init(vp, &efs_genfsops);

	err = efs_read_inode(emp, ino, NULL, &eip->ei_di);
	if (err) {
		vput(vp);
		*vpp = NULL;
		return (err);
	}

	efs_sync_dinode_to_inode(eip);

	if (ino == EFS_ROOTINO && !S_ISDIR(eip->ei_mode)) {
		printf("efs: root inode (%lu) is not a directory!\n",
		    (ulong)EFS_ROOTINO);
		vput(vp);
		*vpp = NULL;
		return (EIO);
	}

	switch (eip->ei_mode & S_IFMT) {
	case S_IFIFO:
		vp->v_type = VFIFO;
		vp->v_op = efs_fifoop_p;
		break;
	case S_IFCHR:
		vp->v_type = VCHR;
		vp->v_op = efs_specop_p;
		spec_node_init(vp, eip->ei_dev);
		break;
	case S_IFDIR:
		vp->v_type = VDIR;
		if (ino == EFS_ROOTINO)
			vp->v_vflag |= VV_ROOT;
		break;
	case S_IFBLK:
		vp->v_type = VBLK;
		vp->v_op = efs_specop_p;
		spec_node_init(vp, eip->ei_dev);
		break;
	case S_IFREG:
		vp->v_type = VREG;
		break;
	case S_IFLNK:
		vp->v_type = VLNK;
		break;
	case S_IFSOCK:
		vp->v_type = VSOCK;
		break;
	default:
		printf("efs: invalid mode 0x%x in inode %lu on mount %s\n",
		    eip->ei_mode, (ulong)ino, mp->mnt_stat.f_mntonname);
		vput(vp);
		*vpp = NULL;
		return (EIO);
	}

	uvm_vnp_setsize(vp, eip->ei_size);
	*vpp = vp;

	KASSERT(VOP_ISLOCKED(vp));

	return (0);
}