Example #1
0
int
nnpfs_getnewvnode(struct nnpfs *nnpfsp, struct vnode **vpp, 
		struct nnpfs_handle *handle)
{
    struct nnpfs_node *result, *check;
    int error;

    error = getnewvnode(VT_NNPFS, NNPFS_TO_VFS(nnpfsp), &nnpfs_vops,  vpp);
    if (error)
	return error;
    
    result = nnpfs_alloc(sizeof(*result), M_NNPFS_NODE);
    bzero(result, sizeof(*result));
    
    (*vpp)->v_data = result;
    result->vn = *vpp;
    
    result->handle = *handle;
    result->flags = 0;
    result->tokens = 0;
    result->offset = 0;
#if defined(HAVE_KERNEL_LOCKMGR) || defined(HAVE_KERNEL_DEBUGLOCKMGR)
    lockinit (&result->lock, PVFS, "nnpfs_lock", 0, LK_NOPAUSE);
#else
    result->vnlocks = 0;
#endif
    result->anonrights = 0;
    result->rd_cred = NULL;
    result->wr_cred = NULL;

#if defined(__NetBSD_Version__) && __NetBSD_Version__ >= 105280000
    genfs_node_init(*vpp, &nnpfs_genfsops);
#endif

    check = nnpfs_node_find(&nnpfsp->nodehead, handle);
    if (check) {
	vput(*vpp);
	*vpp = result->vn;
	return 0;
    }

    nnpfs_insert(&nnpfs->nodehead, result);

    return 0;
}
Example #2
0
/*
 * Initialize this vnode / nfs node pair.
 * Caller assures no other thread will try to load this node.
 */
int
nfs_loadvnode(struct mount *mp, struct vnode *vp,
    const void *key, size_t key_len, const void **new_key)
{
	int fhsize = key_len;
	const nfsfh_t *fhp = key;
	struct nfsnode *np;

	/* Aloocate and initialize the nfsnode. */
	np = pool_get(&nfs_node_pool, PR_WAITOK);
	memset(np, 0, sizeof *np);
	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
	} else
		np->n_fhp = &np->n_fh;
	vp->v_tag = VT_NFS;
	vp->v_type = VNON;
	vp->v_op = nfsv2_vnodeop_p;
	vp->v_data = np;
	memcpy(np->n_fhp, fhp, fhsize);
	np->n_fhsize = fhsize;
	np->n_accstamp = -1;
	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
	np->n_vnode = vp;

	/* Initialize genfs node. */
	genfs_node_init(vp, &nfs_genfsops);
	/*
	 * Initalize read/write creds to useful values. VOP_OPEN will
	 * overwrite these.
	 */
	np->n_rcred = curlwp->l_cred;
	kauth_cred_hold(np->n_rcred);
	np->n_wcred = curlwp->l_cred;
	kauth_cred_hold(np->n_wcred);
	NFS_INVALIDATE_ATTRCACHE(np);
	uvm_vnp_setsize(vp, 0);
	*new_key = np->n_fhp;
	return 0;
}
Example #3
0
/*
 * internal version with extra arguments to allow accessing resource fork
 */
int
hfs_vget_internal(struct mount *mp, ino_t ino, uint8_t fork,
    struct vnode **vpp)
{
	struct hfsmount *hmp;
	struct hfsnode *hnode;
	struct vnode *vp;
	hfs_callback_args cbargs;
	hfs_cnid_t cnid;
	hfs_catalog_keyed_record_t rec;
	hfs_catalog_key_t key; /* the search key used to find this file on disk */
	dev_t dev;
	int error;

#ifdef HFS_DEBUG	
	printf("vfsop = hfs_vget()\n");
#endif /* HFS_DEBUG */

	hnode = NULL;
	vp = NULL;
	hmp = VFSTOHFS(mp);
	dev = hmp->hm_dev;
	cnid = (hfs_cnid_t)ino;

	if (fork != HFS_RSRCFORK)
	    fork = HFS_DATAFORK;

 retry:
	/* Check if this vnode has already been allocated. If so, just return it. */
	if ((*vpp = hfs_nhashget(dev, cnid, fork, LK_EXCLUSIVE)) != NULL)
		return 0;

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_HFS, mp, hfs_vnodeop_p, &vp)) != 0)
		goto error;
	MALLOC(hnode, struct hfsnode *, sizeof(struct hfsnode), M_TEMP,
		M_WAITOK + M_ZERO);

	/*
	 * If someone beat us to it while sleeping in getnewvnode(),
	 * push back the freshly allocated vnode we don't need, and return.
	 */
	mutex_enter(&hfs_hashlock);
	if (hfs_nhashget(dev, cnid, fork, 0) != NULL) {
		mutex_exit(&hfs_hashlock);
		ungetnewvnode(vp);
		FREE(hnode, M_TEMP);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;	
	vp->v_data = hnode;
	genfs_node_init(vp, &hfs_genfsops);
	
	hnode->h_vnode = vp;
	hnode->h_hmp = hmp;
	hnode->dummy = 0x1337BABE;
	
	/*
	 * We need to put this vnode into the hash chain and lock it so that other
	 * requests for this inode will block if they arrive while we are sleeping
	 * waiting for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read. The hash chain requires the node's
	 * device and cnid to be known. Since this information was passed in the
	 * arguments, fill in the appropriate hfsnode fields without reading having
	 * to read the disk.
	 */
	hnode->h_dev = dev;
	hnode->h_rec.u.cnid = cnid;
	hnode->h_fork = fork;

	hfs_nhashinsert(hnode);
	mutex_exit(&hfs_hashlock);


	/*
	 * Read catalog record from disk.
	 */
	hfslib_init_cbargs(&cbargs);
	
	if (hfslib_find_catalog_record_with_cnid(&hmp->hm_vol, cnid,
		&rec, &key, &cbargs) != 0) {
		vput(vp);
		error = EBADF;
		goto error;
	}
		
	memcpy(&hnode->h_rec, &rec, sizeof(hnode->h_rec));
	hnode->h_parent = key.parent_cnid;

	/* XXX Eventually need to add an "ignore permissions" mount option */

	/*
	 * Now convert some of the catalog record's fields into values that make
	 * sense on this system.
	 */
	/* DATE AND TIME */

	/*
	 * Initialize the vnode from the hfsnode, check for aliases.
	 * Note that the underlying vnode may change.
	 */
	hfs_vinit(mp, hfs_specop_p, hfs_fifoop_p, &vp);

	hnode->h_devvp = hmp->hm_devvp;	
	VREF(hnode->h_devvp);  /* Increment the ref count to the volume's device. */

	/* Make sure UVM has allocated enough memory. (?) */
	if (hnode->h_rec.u.rec_type == HFS_REC_FILE) {
		if (hnode->h_fork == HFS_DATAFORK)
			uvm_vnp_setsize(vp,
			    hnode->h_rec.file.data_fork.logical_size);
		else
			uvm_vnp_setsize(vp,
			    hnode->h_rec.file.rsrc_fork.logical_size);
	}
	else
		uvm_vnp_setsize(vp, 0); /* no directly reading directories */
		
	*vpp = vp;
	
	return 0;

error:
	*vpp = NULL;
	return error;
}
Example #4
0
/*
 * lookup an anode, check mount's hash table if not found, create
 * return locked and referenced al la vget(vp, 1);
 */
int
adosfs_vget(struct mount *mp, ino_t an, struct vnode **vpp)
{
	struct adosfsmount *amp;
	struct vnode *vp;
	struct anode *ap;
	struct buf *bp;
	char *nam, *tmp;
	int namlen, error;

	error = 0;
	amp = VFSTOADOSFS(mp);
	bp = NULL;

	/*
	 * check hash table. we are done if found
	 */
	if ((*vpp = adosfs_ahashget(mp, an)) != NULL)
		return (0);

	error = getnewvnode(VT_ADOSFS, mp, adosfs_vnodeop_p, NULL, &vp);
	if (error)
		return (error);

	/*
	 * setup, insert in hash, and lock before io.
	 */
	vp->v_data = ap = pool_get(&adosfs_node_pool, PR_WAITOK);
	memset(ap, 0, sizeof(struct anode));
	ap->vp = vp;
	ap->amp = amp;
	ap->block = an;
	ap->nwords = amp->nwords;
	genfs_node_init(vp, &adosfs_genfsops);
	adosfs_ainshash(amp, ap);

	if ((error = bread(amp->devvp, an * amp->bsize / DEV_BSIZE,
			   amp->bsize, NOCRED, 0, &bp)) != 0) {
		vput(vp);
		return (error);
	}

	/*
	 * get type and fill rest in based on that.
	 */
	switch (ap->type = adosfs_getblktype(amp, bp)) {
	case AROOT:
		vp->v_type = VDIR;
		vp->v_vflag |= VV_ROOT;
		ap->mtimev.days = adoswordn(bp, ap->nwords - 10);
		ap->mtimev.mins = adoswordn(bp, ap->nwords - 9);
		ap->mtimev.ticks = adoswordn(bp, ap->nwords - 8);
		ap->created.days = adoswordn(bp, ap->nwords - 7);
		ap->created.mins = adoswordn(bp, ap->nwords - 6);
		ap->created.ticks = adoswordn(bp, ap->nwords - 5);
		break;
	case ALDIR:
	case ADIR:
		vp->v_type = VDIR;
		break;
	case ALFILE:
	case AFILE:
		vp->v_type = VREG;
		ap->fsize = adoswordn(bp, ap->nwords - 47);
		break;
	case ASLINK:		/* XXX soft link */
		vp->v_type = VLNK;
		/*
		 * convert from BCPL string and
		 * from: "part:dir/file" to: "/part/dir/file"
		 */
		nam = (char *)bp->b_data + (6 * sizeof(long));
		namlen = strlen(nam);
		tmp = nam;
		while (*tmp && *tmp != ':')
			tmp++;
		if (*tmp == 0) {
			ap->slinkto = malloc(namlen + 1, M_ANODE, M_WAITOK);
			memcpy(ap->slinkto, nam, namlen);
		} else if (*nam == ':') {
			ap->slinkto = malloc(namlen + 1, M_ANODE, M_WAITOK);
			memcpy(ap->slinkto, nam, namlen);
			ap->slinkto[0] = '/';
		} else {
			ap->slinkto = malloc(namlen + 2, M_ANODE, M_WAITOK);
			ap->slinkto[0] = '/';
			memcpy(&ap->slinkto[1], nam, namlen);
			ap->slinkto[tmp - nam + 1] = '/';
			namlen++;
		}
		ap->slinkto[namlen] = 0;
		ap->fsize = namlen;
		break;
	default:
		brelse(bp, 0);
		vput(vp);
		return (EINVAL);
	}

	/*
	 * Get appropriate data from this block;  hard link needs
	 * to get other data from the "real" block.
	 */

	/*
	 * copy in name (from original block)
	 */
	nam = (char *)bp->b_data + (ap->nwords - 20) * sizeof(u_int32_t);
	namlen = *(u_char *)nam++;
	if (namlen > 30) {
#ifdef DIAGNOSTIC
		printf("adosfs: aget: name length too long blk %llu\n",
		    (unsigned long long)an);
#endif
		brelse(bp, 0);
		vput(vp);
		return (EINVAL);
	}
	memcpy(ap->name, nam, namlen);
	ap->name[namlen] = 0;

	/*
	 * if dir alloc hash table and copy it in
	 */
	if (vp->v_type == VDIR) {
		int i;

		ap->tab = malloc(ANODETABSZ(ap) * 2, M_ANODE, M_WAITOK);
		ap->ntabent = ANODETABENT(ap);
		ap->tabi = (int *)&ap->tab[ap->ntabent];
		memset(ap->tabi, 0, ANODETABSZ(ap));
		for (i = 0; i < ap->ntabent; i++)
			ap->tab[i] = adoswordn(bp, i + 6);
	}

	/*
	 * misc.
	 */
	ap->pblock = adoswordn(bp, ap->nwords - 3);
	ap->hashf = adoswordn(bp, ap->nwords - 4);
	ap->linknext = adoswordn(bp, ap->nwords - 10);
	ap->linkto = adoswordn(bp, ap->nwords - 11);

	/*
	 * setup last indirect block cache.
	 */
	ap->lastlindblk = 0;
	if (ap->type == AFILE)  {
		ap->lastindblk = ap->block;
		if (adoswordn(bp, ap->nwords - 10))
			ap->linkto = ap->block;
	} else if (ap->type == ALFILE) {
		ap->lastindblk = ap->linkto;
		brelse(bp, 0);
		bp = NULL;
		error = bread(amp->devvp, ap->linkto * amp->bsize / DEV_BSIZE,
		    amp->bsize, NOCRED, 0, &bp);
		if (error) {
			vput(vp);
			return (error);
		}
		ap->fsize = adoswordn(bp, ap->nwords - 47);
		/*
		 * Should ap->block be set to the real file header block?
		 */
		ap->block = ap->linkto;
	}

	if (ap->type == AROOT) {
		ap->adprot = 15;
		ap->uid = amp->uid;
		ap->gid = amp->gid;
	} else {
		ap->adprot = adoswordn(bp, ap->nwords - 48) ^ 15;
		/*
		 * ADOS directories do not have a `x' protection bit as
		 * it is known in VFS; this functionality is fulfilled
		 * by the ADOS `r' bit.
		 *
		 * To retain the ADOS behaviour, fake execute permissions
		 * in that case.
		 */
		if ((ap->type == ADIR || ap->type == ALDIR) &&
		    (ap->adprot & 0x00000008) == 0)
			ap->adprot &= ~0x00000002;

		/*
		 * Get uid/gid from extensions in file header
		 * (really need to know if this is a muFS partition)
		 */
		ap->uid = (adoswordn(bp, ap->nwords - 49) >> 16) & 0xffff;
		ap->gid = adoswordn(bp, ap->nwords - 49) & 0xffff;
		if (ap->uid || ap->gid) {
			if (ap->uid == 0xffff)
				ap->uid = 0;
			if (ap->gid == 0xffff)
				ap->gid = 0;
			ap->adprot |= 0x40000000;	/* Kludge */
		}
		else {
			/*
			 * uid & gid extension don't exist,
			 * so use the mount-point uid/gid
			 */
			ap->uid = amp->uid;
			ap->gid = amp->gid;
		}
	}
	ap->mtime.days = adoswordn(bp, ap->nwords - 23);
	ap->mtime.mins = adoswordn(bp, ap->nwords - 22);
	ap->mtime.ticks = adoswordn(bp, ap->nwords - 21);

	*vpp = vp;
	brelse(bp, 0);
	uvm_vnp_setsize(vp, ap->fsize);
	return (0);
}
Example #5
0
/*
 * Read an inode from disk and initialize this vnode / inode pair.
 * Caller assures no other thread will try to load this inode.
 */
int
ext2fs_loadvnode(struct mount *mp, struct vnode *vp,
    const void *key, size_t key_len, const void **new_key)
{
	ino_t ino;
	struct m_ext2fs *fs;
	struct inode *ip;
	struct ufsmount *ump;
	struct buf *bp;
	dev_t dev;
	int error;

	KASSERT(key_len == sizeof(ino));
	memcpy(&ino, key, key_len);
	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
	fs = ump->um_e2fs;

	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ino)),
	    (int)fs->e2fs_bsize, 0, &bp);
	if (error)
		return error;

	/* Allocate and initialize inode. */
	ip = pool_get(&ext2fs_inode_pool, PR_WAITOK);
	memset(ip, 0, sizeof(struct inode));
	vp->v_tag = VT_EXT2FS;
	vp->v_op = ext2fs_vnodeop_p;
	vp->v_vflag |= VV_LOCKSWORK;
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_ump = ump;
	ip->i_e2fs = fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_e2fs_last_lblk = 0;
	ip->i_e2fs_last_blk = 0;

	/* Initialize genfs node. */
	genfs_node_init(vp, &ext2fs_genfsops);

	error = ext2fs_loadvnode_content(fs, ino, bp, ip);
	brelse(bp, 0);
	if (error)
		return error;

	/* If the inode was deleted, reset all fields */
	if (ip->i_e2fs_dtime != 0) {
		ip->i_e2fs_mode = 0;
		(void)ext2fs_setsize(ip, 0);
		(void)ext2fs_setnblock(ip, 0);
		memset(ip->i_e2fs_blocks, 0, sizeof(ip->i_e2fs_blocks));
	}

	/* Initialize the vnode from the inode. */
	ext2fs_vinit(mp, ext2fs_specop_p, ext2fs_fifoop_p, &vp);

	/* Finish inode initialization. */
	ip->i_devvp = ump->um_devvp;
	vref(ip->i_devvp);

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */

	if (ip->i_e2fs_gen == 0) {
		if (++ext2gennumber < (u_long)time_second)
			ext2gennumber = time_second;
		ip->i_e2fs_gen = ext2gennumber;
		if ((mp->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}
	uvm_vnp_setsize(vp, ext2fs_size(ip));
	*new_key = &ip->i_number;
	return 0;
}
Example #6
0
/*
 * Look up a EXT2FS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
int
ext2fs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct m_ext2fs *fs;
	struct inode *ip;
	struct ufsmount *ump;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;
	void *cp;

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
		return (0);

	/* Allocate a new vnode/inode. */
	error = getnewvnode(VT_EXT2FS, mp, ext2fs_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULL;
		return (error);
	}
	ip = pool_get(&ext2fs_inode_pool, PR_WAITOK);

	mutex_enter(&ufs_hashlock);
	if ((*vpp = ufs_ihashget(dev, ino, 0)) != NULL) {
		mutex_exit(&ufs_hashlock);
		ungetnewvnode(vp);
		pool_put(&ext2fs_inode_pool, ip);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;

	memset(ip, 0, sizeof(struct inode));
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_ump = ump;
	ip->i_e2fs = fs = ump->um_e2fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_e2fs_last_lblk = 0;
	ip->i_e2fs_last_blk = 0;
	genfs_node_init(vp, &ext2fs_genfsops);

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */

	ufs_ihashins(ip);
	mutex_exit(&ufs_hashlock);

	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ino)),
	    (int)fs->e2fs_bsize, NOCRED, 0, &bp);
	if (error) {

		/*
		 * The inode does not contain anything useful, so it would
		 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */

		vput(vp);
		*vpp = NULL;
		return (error);
	}
	cp = (char *)bp->b_data + (ino_to_fsbo(fs, ino) * EXT2_DINODE_SIZE(fs));
	ip->i_din.e2fs_din = pool_get(&ext2fs_dinode_pool, PR_WAITOK);
	e2fs_iload((struct ext2fs_dinode *)cp, ip->i_din.e2fs_din);
	ext2fs_set_inode_guid(ip);
	brelse(bp, 0);

	/* If the inode was deleted, reset all fields */
	if (ip->i_e2fs_dtime != 0) {
		ip->i_e2fs_mode = 0;
		(void)ext2fs_setsize(ip, 0);
		(void)ext2fs_setnblock(ip, 0);
		memset(ip->i_e2fs_blocks, 0, sizeof(ip->i_e2fs_blocks));
	}

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 */

	error = ext2fs_vinit(mp, ext2fs_specop_p, ext2fs_fifoop_p, &vp);
	if (error) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}
	/*
	 * Finish inode initialization now that aliasing has been resolved.
	 */

	ip->i_devvp = ump->um_devvp;
	vref(ip->i_devvp);

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */

	if (ip->i_e2fs_gen == 0) {
		if (++ext2gennumber < (u_long)time_second)
			ext2gennumber = time_second;
		ip->i_e2fs_gen = ext2gennumber;
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}
	uvm_vnp_setsize(vp, ext2fs_size(ip));
	*vpp = vp;
	return (0);
}
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp,
    int lkflags)
{
	struct nfsnode *np;
	struct vnode *vp;
	struct nfsmount *nmp = VFSTONFS(mntp);
	int error;
	struct fh_match fhm;

	fhm.fhm_fhp = fhp;
	fhm.fhm_fhsize = fhsize;

loop:
	rw_enter(&nmp->nm_rbtlock, RW_READER);
	np = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
	if (np != NULL) {
		vp = NFSTOV(np);
		mutex_enter(vp->v_interlock);
		rw_exit(&nmp->nm_rbtlock);
		error = vget(vp, LK_EXCLUSIVE | lkflags);
		if (error == EBUSY)
			return error;
		if (error)
			goto loop;
		*npp = np;
		return(0);
	}
	rw_exit(&nmp->nm_rbtlock);

	error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp);
	if (error) {
		*npp = 0;
		return (error);
	}
	np = pool_get(&nfs_node_pool, PR_WAITOK);
	memset(np, 0, sizeof *np);
	np->n_vnode = vp;

	/*
	 * Insert the nfsnode in the hash queue for its new file handle
	 */

	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
	} else
		np->n_fhp = &np->n_fh;
	memcpy(np->n_fhp, fhp, fhsize);
	np->n_fhsize = fhsize;
	np->n_accstamp = -1;
	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);

	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
	if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
		rw_exit(&nmp->nm_rbtlock);
		if (fhsize > NFS_SMALLFH) {
			kmem_free(np->n_fhp, fhsize);
		}
		pool_put(&nfs_vattr_pool, np->n_vattr);
		pool_put(&nfs_node_pool, np);
		ungetnewvnode(vp);
		goto loop;
	}
	vp->v_data = np;
	genfs_node_init(vp, &nfs_genfsops);
	/*
	 * Initalize read/write creds to useful values. VOP_OPEN will
	 * overwrite these.
	 */
	np->n_rcred = curlwp->l_cred;
	kauth_cred_hold(np->n_rcred);
	np->n_wcred = curlwp->l_cred;
	kauth_cred_hold(np->n_wcred);
	VOP_LOCK(vp, LK_EXCLUSIVE);
	NFS_INVALIDATE_ATTRCACHE(np);
	uvm_vnp_setsize(vp, 0);
	(void)rb_tree_insert_node(&nmp->nm_rbtree, np);
	rw_exit(&nmp->nm_rbtlock);

	*npp = np;
	return (0);
}
Example #8
0
int
ntfs_vgetex(
	struct mount *mp,
	ino_t ino,
	u_int32_t attrtype,
	char *attrname,
	u_long lkflags,
	u_long flags,
	struct vnode **vpp)
{
	int error;
	struct ntfsmount *ntmp;
	struct ntnode *ip;
	struct fnode *fp;
	struct vnode *vp;
	enum vtype f_type = VBAD;

	dprintf(("ntfs_vgetex: ino: %llu, attr: 0x%x:%s, lkf: 0x%lx, f:"
	    " 0x%lx\n", (unsigned long long)ino, attrtype,
	    attrname ? attrname : "", (u_long)lkflags, (u_long)flags));

	ntmp = VFSTONTFS(mp);
	*vpp = NULL;

loop:
	/* Get ntnode */
	error = ntfs_ntlookup(ntmp, ino, &ip);
	if (error) {
		printf("ntfs_vget: ntfs_ntget failed\n");
		return (error);
	}

	/* It may be not initialized fully, so force load it */
	if (!(flags & VG_DONTLOADIN) && !(ip->i_flag & IN_LOADED)) {
		error = ntfs_loadntnode(ntmp, ip);
		if(error) {
			printf("ntfs_vget: CAN'T LOAD ATTRIBUTES FOR INO:"
			    " %llu\n", (unsigned long long)ip->i_number);
			ntfs_ntput(ip);
			return (error);
		}
	}

	error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp);
	if (error) {
		printf("ntfs_vget: ntfs_fget failed\n");
		ntfs_ntput(ip);
		return (error);
	}

	if (!(flags & VG_DONTVALIDFN) && !(fp->f_flag & FN_VALID)) {
		if ((ip->i_frflag & NTFS_FRFLAG_DIR) &&
		    (fp->f_attrtype == NTFS_A_DATA && fp->f_attrname == NULL)) {
			f_type = VDIR;
		} else if (flags & VG_EXT) {
			f_type = VNON;
			fp->f_size = fp->f_allocated = 0;
		} else {
			f_type = VREG;

			error = ntfs_filesize(ntmp, fp,
					      &fp->f_size, &fp->f_allocated);
			if (error) {
				ntfs_ntput(ip);
				return (error);
			}
		}

		fp->f_flag |= FN_VALID;
	}

	/*
	 * We may be calling vget() now. To avoid potential deadlock, we need
	 * to release ntnode lock, since due to locking order vnode
	 * lock has to be acquired first.
	 * ntfs_fget() bumped ntnode usecount, so ntnode won't be recycled
	 * prematurely.
	 * Take v_interlock before releasing ntnode lock to avoid races.
	 */
	vp = FTOV(fp);
	if (vp) {
		mutex_enter(vp->v_interlock);
		ntfs_ntput(ip);
		if (vget(vp, lkflags) != 0)
			goto loop;
		*vpp = vp;
		return 0;
	}
	ntfs_ntput(ip);

	error = getnewvnode(VT_NTFS, ntmp->ntm_mountp, ntfs_vnodeop_p,
	    NULL, &vp);
	if(error) {
		ntfs_frele(fp);
		return (error);
	}
	ntfs_ntget(ip);
	error = ntfs_fget(ntmp, ip, attrtype, attrname, &fp);
	if (error) {
		printf("ntfs_vget: ntfs_fget failed\n");
		ntfs_ntput(ip);
		return (error);
	}
	if (FTOV(fp)) {
		/*
		 * Another thread beat us, put back freshly allocated
		 * vnode and retry.
		 */
		ntfs_ntput(ip);
		ungetnewvnode(vp);
		goto loop;
	}
	dprintf(("ntfs_vget: vnode: %p for ntnode: %llu\n", vp,
	    (unsigned long long)ino));

	fp->f_vp = vp;
	vp->v_data = fp;
	if (f_type != VBAD)
		vp->v_type = f_type;
	genfs_node_init(vp, &ntfs_genfsops);

	if (ino == NTFS_ROOTINO)
		vp->v_vflag |= VV_ROOT;

	ntfs_ntput(ip);

	if (lkflags & (LK_EXCLUSIVE | LK_SHARED)) {
		error = vn_lock(vp, lkflags);
		if (error) {
			vput(vp);
			return (error);
		}
	}

	uvm_vnp_setsize(vp, fp->f_size); /* XXX: mess, cf. ntfs_lookupfile() */
	vref(ip->i_devvp);
	*vpp = vp;
	return (0);
}
Example #9
0
static int
chfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct chfs_mount *chmp;
	struct chfs_inode *ip;
	struct ufsmount *ump;
	struct vnode *vp;
	dev_t dev;
	int error;
	struct chfs_vnode_cache* chvc = NULL;
	struct chfs_node_ref* nref = NULL;
	struct buf *bp;

	dbg("vget() | ino: %llu\n", (unsigned long long)ino);

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if (!vpp) {
		vpp = kmem_alloc(sizeof(struct vnode*), KM_SLEEP);
	}

	/* Get node from inode hash. */
	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
		return 0;
	}

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_CHFS,
		    mp, chfs_vnodeop_p, NULL, &vp)) != 0) {
		*vpp = NULL;
		return (error);
	}
	ip = pool_get(&chfs_inode_pool, PR_WAITOK);

	mutex_enter(&chfs_hashlock);
	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
		mutex_exit(&chfs_hashlock);
		ungetnewvnode(vp);
		pool_put(&chfs_inode_pool, ip);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;

	/* Initialize vnode/inode. */
	memset(ip, 0, sizeof(*ip));
	vp->v_data = ip;
	ip->vp = vp;
	ip->ch_type = VTTOCHT(vp->v_type);
	ip->ump = ump;
	ip->chmp = chmp = ump->um_chfs;
	ip->dev = dev;
	ip->ino = ino;
	vp->v_mount = mp;
	genfs_node_init(vp, &chfs_genfsops);

	rb_tree_init(&ip->fragtree, &frag_rbtree_ops);

	chfs_ihashins(ip);
	mutex_exit(&chfs_hashlock);

	/* Set root inode. */
	if (ino == CHFS_ROOTINO) {
		dbg("SETROOT\n");
		vp->v_vflag |= VV_ROOT;
		vp->v_type = VDIR;
		ip->ch_type = CHT_DIR;
		ip->mode = IFMT | IEXEC | IWRITE | IREAD;
		ip->iflag |= (IN_ACCESS | IN_CHANGE | IN_UPDATE);
		chfs_update(vp, NULL, NULL, UPDATE_WAIT);
		TAILQ_INIT(&ip->dents);
		chfs_set_vnode_size(vp, 512);
	}

	mutex_enter(&chmp->chm_lock_vnocache);
	chvc = chfs_vnode_cache_get(chmp, ino);
	mutex_exit(&chmp->chm_lock_vnocache);
	if (!chvc) {
		dbg("!chvc\n");
		/* Initialize the corresponding vnode cache. */
		/* XXX, we cant alloc under a lock, refactor this! */
		chvc = chfs_vnode_cache_alloc(ino);
		mutex_enter(&chmp->chm_lock_vnocache);
		if (ino == CHFS_ROOTINO) {
			chvc->nlink = 2;
			chvc->pvno = CHFS_ROOTINO;
			chvc->state = VNO_STATE_CHECKEDABSENT;
		}
		chfs_vnode_cache_add(chmp, chvc);
		mutex_exit(&chmp->chm_lock_vnocache);

		ip->chvc = chvc;
		TAILQ_INIT(&ip->dents);
	} else {
		dbg("chvc\n");
		ip->chvc = chvc;
		/* We had a vnode cache, the node is already on flash, so read it */
		if (ino == CHFS_ROOTINO) {
			chvc->pvno = CHFS_ROOTINO;
			TAILQ_INIT(&chvc->scan_dirents);
		} else {
			chfs_readvnode(mp, ino, &vp);
		}

		mutex_enter(&chmp->chm_lock_mountfields);
		/* Initialize type specific things. */
		switch (ip->ch_type) {
		case CHT_DIR:
			/* Read every dirent. */
			nref = chvc->dirents;
			while (nref &&
			    (struct chfs_vnode_cache *)nref != chvc) {
				chfs_readdirent(mp, nref, ip);
				nref = nref->nref_next;
			}
			chfs_set_vnode_size(vp, 512);
			break;
		case CHT_REG:
			/* FALLTHROUGH */
		case CHT_SOCK:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}
			break;
		case CHT_LNK:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode_internal(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}

			/* Set link. */
			dbg("size: %llu\n", (unsigned long long)ip->size);
			bp = getiobuf(vp, true);
			bp->b_blkno = 0;
			bp->b_bufsize = bp->b_resid =
			    bp->b_bcount = ip->size;
			bp->b_data = kmem_alloc(ip->size, KM_SLEEP);
			chfs_read_data(chmp, vp, bp);
			if (!ip->target)
				ip->target = kmem_alloc(ip->size,
				    KM_SLEEP);
			memcpy(ip->target, bp->b_data, ip->size);
			kmem_free(bp->b_data, ip->size);
			putiobuf(bp);

			break;
		case CHT_CHR:
			/* FALLTHROUGH */
		case CHT_BLK:
			/* FALLTHROUGH */
		case CHT_FIFO:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode_internal(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}

			/* Set device. */
			bp = getiobuf(vp, true);
			bp->b_blkno = 0;
			bp->b_bufsize = bp->b_resid =
			    bp->b_bcount = sizeof(dev_t);
			bp->b_data = kmem_alloc(sizeof(dev_t), KM_SLEEP);
			chfs_read_data(chmp, vp, bp);
			memcpy(&ip->rdev,
			    bp->b_data, sizeof(dev_t));
			kmem_free(bp->b_data, sizeof(dev_t));
			putiobuf(bp);
			/* Set specific operations. */
			if (ip->ch_type == CHT_FIFO) {
				vp->v_op = chfs_fifoop_p;
			} else {
				vp->v_op = chfs_specop_p;
				spec_node_init(vp, ip->rdev);
			}

		    break;
		case CHT_BLANK:
			/* FALLTHROUGH */
		case CHT_BAD:
			break;
		}
		mutex_exit(&chmp->chm_lock_mountfields);

	}

	/* Finish inode initalization. */
	ip->devvp = ump->um_devvp;
	vref(ip->devvp);

	uvm_vnp_setsize(vp, ip->size);
	*vpp = vp;

	return 0;
}
Example #10
0
int
v7fs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
    struct v7fs_mount *v7fsmount = mp->mnt_data;
    struct v7fs_self *fs = v7fsmount->core;
    struct vnode *vp;
    struct v7fs_node *v7fs_node;
    struct v7fs_inode inode;
    int error;

    /* Lookup requested i-node */
    if ((error = v7fs_inode_load(fs, &inode, ino))) {
        DPRINTF("v7fs_inode_load failed.\n");
        return error;
    }

retry:
    mutex_enter(&mntvnode_lock);
    for (v7fs_node = LIST_FIRST(&v7fsmount->v7fs_node_head);
            v7fs_node != NULL; v7fs_node = LIST_NEXT(v7fs_node, link)) {
        if (v7fs_node->inode.inode_number == ino) {
            vp = v7fs_node->vnode;
            mutex_enter(vp->v_interlock);
            mutex_exit(&mntvnode_lock);
            if (vget(vp, LK_EXCLUSIVE) == 0) {
                *vpp = vp;
                return 0;
            } else {
                DPRINTF("retry!\n");
                goto retry;
            }
        }
    }
    mutex_exit(&mntvnode_lock);

    /* Allocate v-node. */
    if ((error = getnewvnode(VT_V7FS, mp, v7fs_vnodeop_p, NULL, &vp))) {
        DPRINTF("getnewvnode error.\n");
        return error;
    }
    /* Lock vnode here */
    vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);

    /* Allocate i-node */
    vp->v_data = pool_get(&v7fs_node_pool, PR_WAITOK);
    memset(vp->v_data, 0, sizeof(*v7fs_node));
    v7fs_node = vp->v_data;
    mutex_enter(&mntvnode_lock);
    LIST_INSERT_HEAD(&v7fsmount->v7fs_node_head, v7fs_node, link);
    mutex_exit(&mntvnode_lock);
    v7fs_node->vnode = vp;
    v7fs_node->v7fsmount = v7fsmount;
    v7fs_node->inode = inode;/*structure copy */
    v7fs_node->lockf = NULL; /* advlock */

    genfs_node_init(vp, &v7fs_genfsops);
    uvm_vnp_setsize(vp, v7fs_inode_filesize(&inode));

    if (ino == V7FS_ROOT_INODE) {
        vp->v_type = VDIR;
        vp->v_vflag |= VV_ROOT;
    } else {
        vp->v_type = v7fs_mode_to_vtype(inode.mode);

        if (vp->v_type == VBLK || vp->v_type == VCHR) {
            dev_t rdev = inode.device;
            vp->v_op = v7fs_specop_p;
            spec_node_init(vp, rdev);
        } else if (vp->v_type == VFIFO) {
            vp->v_op = v7fs_fifoop_p;
        }
    }

    *vpp = vp;

    return 0;
}
Example #11
0
int
cd9660_vget_internal(struct mount *mp, ino_t ino, struct vnode **vpp,
	int relocated, struct iso_directory_record *isodir)
{
	struct iso_mnt *imp;
	struct iso_node *ip;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;

	imp = VFSTOISOFS(mp);
	dev = imp->im_dev;

 retry:
	if ((*vpp = cd9660_ihashget(dev, ino, LK_EXCLUSIVE)) != NULLVP)
		return (0);

	/* Allocate a new vnode/iso_node. */
	error = getnewvnode(VT_ISOFS, mp, cd9660_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULLVP;
		return (error);
	}
	ip = pool_get(&cd9660_node_pool, PR_WAITOK);

	/*
	 * If someone beat us to it, put back the freshly allocated
	 * vnode/inode pair and retry.
	 */
	mutex_enter(&cd9660_hashlock);
	if (cd9660_ihashget(dev, ino, 0) != NULL) {
		mutex_exit(&cd9660_hashlock);
		ungetnewvnode(vp);
		pool_put(&cd9660_node_pool, ip);
		goto retry;
	}

	memset(ip, 0, sizeof(struct iso_node));
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_mnt = imp;
	ip->i_devvp = imp->im_devvp;
	genfs_node_init(vp, &cd9660_genfsops);

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */
	cd9660_ihashins(ip);
	mutex_exit(&cd9660_hashlock);

	if (isodir == 0) {
		int lbn, off;

		lbn = cd9660_lblkno(imp, ino);
		if (lbn >= imp->volume_space_size) {
			vput(vp);
			printf("fhtovp: lbn exceed volume space %d\n", lbn);
			return (ESTALE);
		}

		off = cd9660_blkoff(imp, ino);
		if (off + ISO_DIRECTORY_RECORD_SIZE > imp->logical_block_size) {
			vput(vp);
			printf("fhtovp: crosses block boundary %d\n",
			    off + ISO_DIRECTORY_RECORD_SIZE);
			return (ESTALE);
		}

		error = bread(imp->im_devvp,
			      lbn << (imp->im_bshift - DEV_BSHIFT),
			      imp->logical_block_size, NOCRED, 0, &bp);
		if (error) {
			vput(vp);
			printf("fhtovp: bread error %d\n",error);
			return (error);
		}
		isodir = (struct iso_directory_record *)((char *)bp->b_data + off);

		if (off + isonum_711(isodir->length) >
		    imp->logical_block_size) {
			vput(vp);
			if (bp != 0)
				brelse(bp, 0);
			printf("fhtovp: directory crosses block boundary %d[off=%d/len=%d]\n",
			    off +isonum_711(isodir->length), off,
			    isonum_711(isodir->length));
			return (ESTALE);
		}

#if 0
		if (isonum_733(isodir->extent) +
		    isonum_711(isodir->ext_attr_length) != ifhp->ifid_start) {
			if (bp != 0)
				brelse(bp, 0);
			printf("fhtovp: file start miss %d vs %d\n",
			    isonum_733(isodir->extent) + isonum_711(isodir->ext_attr_length),
			    ifhp->ifid_start);
			return (ESTALE);
		}
#endif
	} else
		bp = 0;

	vref(ip->i_devvp);

	if (relocated) {
		/*
		 * On relocated directories we must
		 * read the `.' entry out of a dir.
		 */
		ip->iso_start = ino >> imp->im_bshift;
		if (bp != 0)
			brelse(bp, 0);
		if ((error = cd9660_blkatoff(vp, (off_t)0, NULL, &bp)) != 0) {
			vput(vp);
			return (error);
		}
		isodir = (struct iso_directory_record *)bp->b_data;
	}

	ip->iso_extent = isonum_733(isodir->extent);
	ip->i_size = isonum_733(isodir->size);
	ip->iso_start = isonum_711(isodir->ext_attr_length) + ip->iso_extent;

	/*
	 * Setup time stamp, attribute
	 */
	vp->v_type = VNON;
	switch (imp->iso_ftype) {
	default:	/* ISO_FTYPE_9660 */
	    {
		struct buf *bp2;
		int off;
		if ((imp->im_flags & ISOFSMNT_EXTATT)
		    && (off = isonum_711(isodir->ext_attr_length)))
			cd9660_blkatoff(vp, (off_t)-(off << imp->im_bshift),
			    NULL, &bp2);
		else
			bp2 = NULL;
		cd9660_defattr(isodir, ip, bp2);
		cd9660_deftstamp(isodir, ip, bp2);
		if (bp2)
			brelse(bp2, 0);
		break;
	    }
	case ISO_FTYPE_RRIP:
		cd9660_rrip_analyze(isodir, ip, imp);
		break;
	}

	if (bp != 0)
		brelse(bp, 0);

	/*
	 * Initialize the associated vnode
	 */
	switch (vp->v_type = IFTOVT(ip->inode.iso_mode)) {
	case VFIFO:
		vp->v_op = cd9660_fifoop_p;
		break;
	case VCHR:
	case VBLK:
		/*
		 * if device, look at device number table for translation
		 */
		vp->v_op = cd9660_specop_p;
		spec_node_init(vp, ip->inode.iso_rdev);
		break;
	case VLNK:
	case VNON:
	case VSOCK:
	case VDIR:
	case VBAD:
		break;
	case VREG:
		uvm_vnp_setsize(vp, ip->i_size);
		break;
	}

	if (vp->v_type != VREG)
		uvm_vnp_setsize(vp, 0);

	if (ip->iso_extent == imp->root_extent)
		vp->v_vflag |= VV_ROOT;

	/*
	 * XXX need generation number?
	 */

	*vpp = vp;
	return (0);
}
Example #12
0
/*
 * Obtain a locked vnode for the given on-disk inode number.
 *
 * We currently allocate a new vnode from getnewnode(), tack it with
 * our in-core inode structure (efs_inode), and read in the inode from
 * disk. The returned inode must be locked.
 *
 * Returns 0 on success.
 */
static int
efs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	int err;
	struct vnode *vp;
	struct efs_inode *eip;
	struct efs_mount *emp;

	emp = VFSTOEFS(mp);

	while (true) {
		*vpp = efs_ihashget(emp->em_dev, ino, LK_EXCLUSIVE);
		if (*vpp != NULL)
			return (0);

		err = getnewvnode(VT_EFS, mp, efs_vnodeop_p, NULL, &vp);
		if (err)
			return (err);
		
		eip = pool_get(&efs_inode_pool, PR_WAITOK);

		/*
		 * See if anybody has raced us here.  If not, continue
		 * setting up the new inode, otherwise start over.
		 */
		efs_ihashlock();

		if (efs_ihashget(emp->em_dev, ino, 0) == NULL)
			break;

		efs_ihashunlock();
		ungetnewvnode(vp);
		pool_put(&efs_inode_pool, eip);
	}

	vp->v_vflag |= VV_LOCKSWORK;
	eip->ei_mode = 0;
	eip->ei_lockf = NULL;
	eip->ei_number = ino;
	eip->ei_dev = emp->em_dev;
	eip->ei_vp = vp;
	vp->v_data = eip;

	/*
	 * Place the vnode on the hash chain. Doing so will lock the
	 * vnode, so it's okay to drop the global lock and read in
	 * the inode from disk.
	 */
	efs_ihashins(eip);
	efs_ihashunlock();

	/*
	 * Init genfs early, otherwise we'll trip up on genfs_node_destroy
	 * in efs_reclaim when vput()ing in an error branch here.
	 */
	genfs_node_init(vp, &efs_genfsops);

	err = efs_read_inode(emp, ino, NULL, &eip->ei_di);
	if (err) {
		vput(vp);
		*vpp = NULL;
		return (err);
	}

	efs_sync_dinode_to_inode(eip);

	if (ino == EFS_ROOTINO && !S_ISDIR(eip->ei_mode)) {
		printf("efs: root inode (%lu) is not a directory!\n",
		    (ulong)EFS_ROOTINO);
		vput(vp);
		*vpp = NULL;
		return (EIO);
	}

	switch (eip->ei_mode & S_IFMT) {
	case S_IFIFO:
		vp->v_type = VFIFO;
		vp->v_op = efs_fifoop_p;
		break;
	case S_IFCHR:
		vp->v_type = VCHR;
		vp->v_op = efs_specop_p;
		spec_node_init(vp, eip->ei_dev);
		break;
	case S_IFDIR:
		vp->v_type = VDIR;
		if (ino == EFS_ROOTINO)
			vp->v_vflag |= VV_ROOT;
		break;
	case S_IFBLK:
		vp->v_type = VBLK;
		vp->v_op = efs_specop_p;
		spec_node_init(vp, eip->ei_dev);
		break;
	case S_IFREG:
		vp->v_type = VREG;
		break;
	case S_IFLNK:
		vp->v_type = VLNK;
		break;
	case S_IFSOCK:
		vp->v_type = VSOCK;
		break;
	default:
		printf("efs: invalid mode 0x%x in inode %lu on mount %s\n",
		    eip->ei_mode, (ulong)ino, mp->mnt_stat.f_mntonname);
		vput(vp);
		*vpp = NULL;
		return (EIO);
	}

	uvm_vnp_setsize(vp, eip->ei_size);
	*vpp = vp;

	KASSERT(VOP_ISLOCKED(vp));

	return (0);
}