Beispiel #1
0
/*
 * chfs_scan_make_vnode_cache - makes a new vnode cache during scan
 * This function returns a vnode cache belonging to @vno.
 */
struct chfs_vnode_cache *
chfs_scan_make_vnode_cache(struct chfs_mount *chmp, ino_t vno)
{
	struct chfs_vnode_cache *vc;

	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));

	/* vnode cache already exists */
	vc = chfs_vnode_cache_get(chmp, vno);
	if (vc) {
		return vc;
	}

	/* update max vnode number if needed */
	if (vno > chmp->chm_max_vno) {
		chmp->chm_max_vno = vno;
	}

	/* create new vnode cache */
	vc = chfs_vnode_cache_alloc(vno);

	chfs_vnode_cache_add(chmp, vc);

	if (vno == CHFS_ROOTINO) {
		vc->nlink = 2;
		vc->pvno = CHFS_ROOTINO;
		vc->state = VNO_STATE_CHECKEDABSENT;
	}

	return vc;
}
Beispiel #2
0
static int
chfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct chfs_mount *chmp;
	struct chfs_inode *ip;
	struct ufsmount *ump;
	struct vnode *vp;
	dev_t dev;
	int error;
	struct chfs_vnode_cache* chvc = NULL;
	struct chfs_node_ref* nref = NULL;
	struct buf *bp;

	dbg("vget() | ino: %llu\n", (unsigned long long)ino);

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if (!vpp) {
		vpp = kmem_alloc(sizeof(struct vnode*), KM_SLEEP);
	}

	/* Get node from inode hash. */
	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
		return 0;
	}

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_CHFS,
		    mp, chfs_vnodeop_p, NULL, &vp)) != 0) {
		*vpp = NULL;
		return (error);
	}
	ip = pool_get(&chfs_inode_pool, PR_WAITOK);

	mutex_enter(&chfs_hashlock);
	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
		mutex_exit(&chfs_hashlock);
		ungetnewvnode(vp);
		pool_put(&chfs_inode_pool, ip);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;

	/* Initialize vnode/inode. */
	memset(ip, 0, sizeof(*ip));
	vp->v_data = ip;
	ip->vp = vp;
	ip->ch_type = VTTOCHT(vp->v_type);
	ip->ump = ump;
	ip->chmp = chmp = ump->um_chfs;
	ip->dev = dev;
	ip->ino = ino;
	vp->v_mount = mp;
	genfs_node_init(vp, &chfs_genfsops);

	rb_tree_init(&ip->fragtree, &frag_rbtree_ops);

	chfs_ihashins(ip);
	mutex_exit(&chfs_hashlock);

	/* Set root inode. */
	if (ino == CHFS_ROOTINO) {
		dbg("SETROOT\n");
		vp->v_vflag |= VV_ROOT;
		vp->v_type = VDIR;
		ip->ch_type = CHT_DIR;
		ip->mode = IFMT | IEXEC | IWRITE | IREAD;
		ip->iflag |= (IN_ACCESS | IN_CHANGE | IN_UPDATE);
		chfs_update(vp, NULL, NULL, UPDATE_WAIT);
		TAILQ_INIT(&ip->dents);
		chfs_set_vnode_size(vp, 512);
	}

	mutex_enter(&chmp->chm_lock_vnocache);
	chvc = chfs_vnode_cache_get(chmp, ino);
	mutex_exit(&chmp->chm_lock_vnocache);
	if (!chvc) {
		dbg("!chvc\n");
		/* Initialize the corresponding vnode cache. */
		/* XXX, we cant alloc under a lock, refactor this! */
		chvc = chfs_vnode_cache_alloc(ino);
		mutex_enter(&chmp->chm_lock_vnocache);
		if (ino == CHFS_ROOTINO) {
			chvc->nlink = 2;
			chvc->pvno = CHFS_ROOTINO;
			chvc->state = VNO_STATE_CHECKEDABSENT;
		}
		chfs_vnode_cache_add(chmp, chvc);
		mutex_exit(&chmp->chm_lock_vnocache);

		ip->chvc = chvc;
		TAILQ_INIT(&ip->dents);
	} else {
		dbg("chvc\n");
		ip->chvc = chvc;
		/* We had a vnode cache, the node is already on flash, so read it */
		if (ino == CHFS_ROOTINO) {
			chvc->pvno = CHFS_ROOTINO;
			TAILQ_INIT(&chvc->scan_dirents);
		} else {
			chfs_readvnode(mp, ino, &vp);
		}

		mutex_enter(&chmp->chm_lock_mountfields);
		/* Initialize type specific things. */
		switch (ip->ch_type) {
		case CHT_DIR:
			/* Read every dirent. */
			nref = chvc->dirents;
			while (nref &&
			    (struct chfs_vnode_cache *)nref != chvc) {
				chfs_readdirent(mp, nref, ip);
				nref = nref->nref_next;
			}
			chfs_set_vnode_size(vp, 512);
			break;
		case CHT_REG:
			/* FALLTHROUGH */
		case CHT_SOCK:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}
			break;
		case CHT_LNK:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode_internal(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}

			/* Set link. */
			dbg("size: %llu\n", (unsigned long long)ip->size);
			bp = getiobuf(vp, true);
			bp->b_blkno = 0;
			bp->b_bufsize = bp->b_resid =
			    bp->b_bcount = ip->size;
			bp->b_data = kmem_alloc(ip->size, KM_SLEEP);
			chfs_read_data(chmp, vp, bp);
			if (!ip->target)
				ip->target = kmem_alloc(ip->size,
				    KM_SLEEP);
			memcpy(ip->target, bp->b_data, ip->size);
			kmem_free(bp->b_data, ip->size);
			putiobuf(bp);

			break;
		case CHT_CHR:
			/* FALLTHROUGH */
		case CHT_BLK:
			/* FALLTHROUGH */
		case CHT_FIFO:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode_internal(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}

			/* Set device. */
			bp = getiobuf(vp, true);
			bp->b_blkno = 0;
			bp->b_bufsize = bp->b_resid =
			    bp->b_bcount = sizeof(dev_t);
			bp->b_data = kmem_alloc(sizeof(dev_t), KM_SLEEP);
			chfs_read_data(chmp, vp, bp);
			memcpy(&ip->rdev,
			    bp->b_data, sizeof(dev_t));
			kmem_free(bp->b_data, sizeof(dev_t));
			putiobuf(bp);
			/* Set specific operations. */
			if (ip->ch_type == CHT_FIFO) {
				vp->v_op = chfs_fifoop_p;
			} else {
				vp->v_op = chfs_specop_p;
				spec_node_init(vp, ip->rdev);
			}

		    break;
		case CHT_BLANK:
			/* FALLTHROUGH */
		case CHT_BAD:
			break;
		}
		mutex_exit(&chmp->chm_lock_mountfields);

	}

	/* Finish inode initalization. */
	ip->devvp = ump->um_devvp;
	vref(ip->devvp);

	uvm_vnp_setsize(vp, ip->size);
	*vpp = vp;

	return 0;
}