Exemplo n.º 1
0
int
layerfs_loadvnode(struct mount *mp, struct vnode *vp,
    const void *key, size_t key_len, const void **new_key)
{
	struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
	struct vnode *lowervp;
	struct layer_node *xp;

	KASSERT(key_len == sizeof(struct vnode *));
	memcpy(&lowervp, key, key_len);

	xp = kmem_alloc(lmp->layerm_size, KM_SLEEP);
	if (xp == NULL)
		return ENOMEM;

	/* Share the interlock with the lower node. */
	mutex_obj_hold(lowervp->v_interlock);
	uvm_obj_setlock(&vp->v_uobj, lowervp->v_interlock);

	vp->v_tag = lmp->layerm_tag;
	vp->v_type = lowervp->v_type;
	vp->v_op = lmp->layerm_vnodeop_p;
	if (vp->v_type == VBLK || vp->v_type == VCHR)
		spec_node_init(vp, lowervp->v_rdev);
	vp->v_data = xp;
	xp->layer_vnode = vp;
	xp->layer_lowervp = lowervp;
	xp->layer_flags = 0;
	uvm_vnp_setsize(vp, 0);

	/*  Add a reference to the lower node. */
	vref(lowervp);
	*new_key = &xp->layer_lowervp;
	return 0;
}
/*
 * allocate a ptyfsnode/vnode pair.  the vnode is
 * referenced, and locked.
 *
 * the pid, ptyfs_type, and mount point uniquely
 * identify a ptyfsnode.  the mount point is needed
 * because someone might mount this filesystem
 * twice.
 *
 * all ptyfsnodes are maintained on a singly-linked
 * list.  new nodes are only allocated when they cannot
 * be found on this list.  entries on the list are
 * removed when the vfs reclaim entry is called.
 *
 * a single lock is kept for the entire list.  this is
 * needed because the getnewvnode() function can block
 * waiting for a vnode to become free, in which case there
 * may be more than one ptyess trying to get the same
 * vnode.  this lock is only taken if we are going to
 * call getnewvnode, since the kernel itself is single-threaded.
 *
 * if an entry is found on the list, then call vget() to
 * take a reference.  this is done because there may be
 * zero references to it and so it needs to removed from
 * the vnode free list.
 */
int
ptyfs_allocvp(struct mount *mp, struct vnode **vpp, ptyfstype type, int pty,
    struct lwp *l)
{
	struct ptyfsnode *ptyfs;
	struct vnode *vp;
	int error;

 retry:
	if ((*vpp = ptyfs_used_get(type, pty, mp, LK_EXCLUSIVE)) != NULL)
		return 0;

	error = getnewvnode(VT_PTYFS, mp, ptyfs_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULL;
		return error;
	}

	mutex_enter(&ptyfs_hashlock);
	if (ptyfs_used_get(type, pty, mp, 0) != NULL) {
		mutex_exit(&ptyfs_hashlock);
		ungetnewvnode(vp);
		goto retry;
	}

	vp->v_data = ptyfs = ptyfs_free_get(type, pty, l);
	ptyfs->ptyfs_vnode = vp;

	switch (type) {
	case PTYFSroot:	/* /pts = dr-xr-xr-x */
		vp->v_type = VDIR;
		vp->v_vflag = VV_ROOT;
		break;

	case PTYFSpts:	/* /pts/N = cxxxxxxxxx */
	case PTYFSptc:	/* controlling side = cxxxxxxxxx */
		vp->v_type = VCHR;
		spec_node_init(vp, PTYFS_MAKEDEV(ptyfs));
		break;
	default:
		panic("ptyfs_allocvp");
	}

	ptyfs_hashins(ptyfs);
	uvm_vnp_setsize(vp, 0);
	mutex_exit(&ptyfs_hashlock);

	*vpp = vp;
	return 0;
}
/*
 * Initialize the vnode associated with a new inode, handle aliased
 * vnodes.
 */
void
ulfs_vinit(struct mount *mntp, int (**specops)(void *), int (**fifoops)(void *),
	struct vnode **vpp)
{
	struct timeval	tv;
	struct inode	*ip;
	struct vnode	*vp;
	dev_t		rdev;
	struct ulfsmount *ump;

	vp = *vpp;
	ip = VTOI(vp);
	switch(vp->v_type = IFTOVT(ip->i_mode)) {
	case VCHR:
	case VBLK:
		vp->v_op = specops;
		ump = ip->i_ump;
		// XXX clean this up
		if (ump->um_fstype == ULFS1)
			rdev = (dev_t)ulfs_rw32(ip->i_din->u_32.di_rdev,
			    ULFS_MPNEEDSWAP(ump->um_lfs));
		else
			rdev = (dev_t)ulfs_rw64(ip->i_din->u_64.di_rdev,
			    ULFS_MPNEEDSWAP(ump->um_lfs));
		spec_node_init(vp, rdev);
		break;
	case VFIFO:
		vp->v_op = fifoops;
		break;
	case VNON:
	case VBAD:
	case VSOCK:
	case VLNK:
	case VDIR:
	case VREG:
		break;
	}
	if (ip->i_number == ULFS_ROOTINO)
                vp->v_vflag |= VV_ROOT;
	/*
	 * Initialize modrev times
	 */
	getmicrouptime(&tv);
	ip->i_modrev = (uint64_t)(uint)tv.tv_sec << 32
			| tv.tv_usec * 4294u;
	*vpp = vp;
}
Exemplo n.º 4
0
/*
 * Initialize the vnode associated with a new hfsnode.
 */
void
hfs_vinit(struct mount *mp, int (**specops)(void *), int (**fifoops)(void *),
	   struct vnode **vpp)
{
	struct hfsnode	*hp;
	struct vnode	*vp;

	vp = *vpp;
	hp = VTOH(vp);

	vp->v_type = hfs_catalog_keyed_record_vtype(
		(hfs_catalog_keyed_record_t *)&hp->h_rec);

	switch(vp->v_type) {
		case VCHR:
		case VBLK:
			vp->v_op = specops;
			spec_node_init(vp,
			    HFS_CONVERT_RDEV(hp->h_rec.file.bsd.special.raw_device));
			break;
		case VFIFO:
			vp->v_op = fifoops;
			break;

		case VNON:
		case VBAD:
		case VSOCK:
		case VDIR:
		case VREG:
		case VLNK:
			break;
	}

	if (hp->h_rec.u.cnid == HFS_CNID_ROOT_FOLDER)
		vp->v_vflag |= VV_ROOT;

	*vpp = vp;
}
Exemplo n.º 5
0
/* ARGSUSED */
int
mfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
{
	struct lwp *l = curlwp;
	struct vnode *devvp;
	struct mfs_args *args = data;
	struct ufsmount *ump;
	struct fs *fs;
	struct mfsnode *mfsp;
	struct proc *p;
	int flags, error = 0;

	if (*data_len < sizeof *args)
		return EINVAL;

	p = l->l_proc;
	if (mp->mnt_flag & MNT_GETARGS) {
		struct vnode *vp;

		ump = VFSTOUFS(mp);
		if (ump == NULL)
			return EIO;

		vp = ump->um_devvp;
		if (vp == NULL)
			return EIO;

		mfsp = VTOMFS(vp);
		if (mfsp == NULL)
			return EIO;

		args->fspec = NULL;
		args->base = mfsp->mfs_baseoff;
		args->size = mfsp->mfs_size;
		*data_len = sizeof *args;
		return 0;
	}
	/*
	 * XXX turn off async to avoid hangs when writing lots of data.
	 * the problem is that MFS needs to allocate pages to clean pages,
	 * so if we wait until the last minute to clean pages then there
	 * may not be any pages available to do the cleaning.
	 * ... and since the default partially-synchronous mode turns out
	 * to not be sufficient under heavy load, make it full synchronous.
	 */
	mp->mnt_flag &= ~MNT_ASYNC;
	mp->mnt_flag |= MNT_SYNCHRONOUS;

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		ump = VFSTOUFS(mp);
		fs = ump->um_fs;
		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = ffs_flushfiles(mp, flags, l);
			if (error)
				return (error);
		}
		if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR))
			fs->fs_ronly = 0;
		if (args->fspec == NULL)
			return EINVAL;
		return (0);
	}
	error = getnewvnode(VT_MFS, NULL, mfs_vnodeop_p, NULL, &devvp);
	if (error)
		return (error);
	devvp->v_vflag |= VV_MPSAFE;
	devvp->v_type = VBLK;
	spec_node_init(devvp, makedev(255, mfs_minor));
	mfs_minor++;
	mfsp = kmem_alloc(sizeof(*mfsp), KM_SLEEP);
	devvp->v_data = mfsp;
	mfsp->mfs_baseoff = args->base;
	mfsp->mfs_size = args->size;
	mfsp->mfs_vnode = devvp;
	mfsp->mfs_proc = p;
	mfsp->mfs_shutdown = 0;
	cv_init(&mfsp->mfs_cv, "mfsidl");
	mfsp->mfs_refcnt = 1;
	bufq_alloc(&mfsp->mfs_buflist, "fcfs", 0);
	if ((error = ffs_mountfs(devvp, mp, l)) != 0) {
		mfsp->mfs_shutdown = 1;
		vrele(devvp);
		return (error);
	}
	ump = VFSTOUFS(mp);
	fs = ump->um_fs;
	error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
	    UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
	if (error)
		return error;
	(void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
		sizeof(fs->fs_fsmnt));
	fs->fs_fsmnt[sizeof(fs->fs_fsmnt) - 1] = '\0';
	/* XXX: cleanup on error */
	return 0;
}
Exemplo n.º 6
0
/*
 * layer_node_alloc: make a new layerfs vnode.
 *
 * => vp is the alias vnode, lowervp is the lower vnode.
 * => We will hold a reference to lowervp.
 */
int
layer_node_alloc(struct mount *mp, struct vnode *lowervp, struct vnode **vpp)
{
    struct layer_mount *lmp = MOUNTTOLAYERMOUNT(mp);
    struct layer_node_hashhead *hd;
    struct layer_node *xp;
    struct vnode *vp, *nvp;
    int error;

    /* Get a new vnode and share its interlock with underlying vnode. */
    error = getnewvnode(lmp->layerm_tag, mp, lmp->layerm_vnodeop_p,
                        lowervp->v_interlock, &vp);
    if (error) {
        return error;
    }
    vp->v_type = lowervp->v_type;
    mutex_enter(vp->v_interlock);
    vp->v_iflag |= VI_LAYER;
    mutex_exit(vp->v_interlock);

    xp = kmem_alloc(lmp->layerm_size, KM_SLEEP);
    if (xp == NULL) {
        ungetnewvnode(vp);
        return ENOMEM;
    }
    if (vp->v_type == VBLK || vp->v_type == VCHR) {
        spec_node_init(vp, lowervp->v_rdev);
    }

    /*
     * Before inserting the node into the hash, check if other thread
     * did not race with us.  If so - return that node, destroy ours.
     */
    mutex_enter(&lmp->layerm_hashlock);
    if ((nvp = layer_node_find(mp, lowervp)) != NULL) {
        ungetnewvnode(vp);
        kmem_free(xp, lmp->layerm_size);
        *vpp = nvp;
        return 0;
    }

    vp->v_data = xp;
    vp->v_vflag = (vp->v_vflag & ~VV_MPSAFE) |
                  (lowervp->v_vflag & VV_MPSAFE);
    xp->layer_vnode = vp;
    xp->layer_lowervp = lowervp;
    xp->layer_flags = 0;

    /*
     * Insert the new node into the hash.
     * Add a reference to the lower node.
     */
    vref(lowervp);
    hd = LAYER_NHASH(lmp, lowervp);
    LIST_INSERT_HEAD(hd, xp, layer_hash);
    uvm_vnp_setsize(vp, 0);
    mutex_exit(&lmp->layerm_hashlock);

    *vpp = vp;
    return 0;
}
Exemplo n.º 7
0
static int
chfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct chfs_mount *chmp;
	struct chfs_inode *ip;
	struct ufsmount *ump;
	struct vnode *vp;
	dev_t dev;
	int error;
	struct chfs_vnode_cache* chvc = NULL;
	struct chfs_node_ref* nref = NULL;
	struct buf *bp;

	dbg("vget() | ino: %llu\n", (unsigned long long)ino);

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if (!vpp) {
		vpp = kmem_alloc(sizeof(struct vnode*), KM_SLEEP);
	}

	/* Get node from inode hash. */
	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
		return 0;
	}

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_CHFS,
		    mp, chfs_vnodeop_p, NULL, &vp)) != 0) {
		*vpp = NULL;
		return (error);
	}
	ip = pool_get(&chfs_inode_pool, PR_WAITOK);

	mutex_enter(&chfs_hashlock);
	if ((*vpp = chfs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
		mutex_exit(&chfs_hashlock);
		ungetnewvnode(vp);
		pool_put(&chfs_inode_pool, ip);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;

	/* Initialize vnode/inode. */
	memset(ip, 0, sizeof(*ip));
	vp->v_data = ip;
	ip->vp = vp;
	ip->ch_type = VTTOCHT(vp->v_type);
	ip->ump = ump;
	ip->chmp = chmp = ump->um_chfs;
	ip->dev = dev;
	ip->ino = ino;
	vp->v_mount = mp;
	genfs_node_init(vp, &chfs_genfsops);

	rb_tree_init(&ip->fragtree, &frag_rbtree_ops);

	chfs_ihashins(ip);
	mutex_exit(&chfs_hashlock);

	/* Set root inode. */
	if (ino == CHFS_ROOTINO) {
		dbg("SETROOT\n");
		vp->v_vflag |= VV_ROOT;
		vp->v_type = VDIR;
		ip->ch_type = CHT_DIR;
		ip->mode = IFMT | IEXEC | IWRITE | IREAD;
		ip->iflag |= (IN_ACCESS | IN_CHANGE | IN_UPDATE);
		chfs_update(vp, NULL, NULL, UPDATE_WAIT);
		TAILQ_INIT(&ip->dents);
		chfs_set_vnode_size(vp, 512);
	}

	mutex_enter(&chmp->chm_lock_vnocache);
	chvc = chfs_vnode_cache_get(chmp, ino);
	mutex_exit(&chmp->chm_lock_vnocache);
	if (!chvc) {
		dbg("!chvc\n");
		/* Initialize the corresponding vnode cache. */
		/* XXX, we cant alloc under a lock, refactor this! */
		chvc = chfs_vnode_cache_alloc(ino);
		mutex_enter(&chmp->chm_lock_vnocache);
		if (ino == CHFS_ROOTINO) {
			chvc->nlink = 2;
			chvc->pvno = CHFS_ROOTINO;
			chvc->state = VNO_STATE_CHECKEDABSENT;
		}
		chfs_vnode_cache_add(chmp, chvc);
		mutex_exit(&chmp->chm_lock_vnocache);

		ip->chvc = chvc;
		TAILQ_INIT(&ip->dents);
	} else {
		dbg("chvc\n");
		ip->chvc = chvc;
		/* We had a vnode cache, the node is already on flash, so read it */
		if (ino == CHFS_ROOTINO) {
			chvc->pvno = CHFS_ROOTINO;
			TAILQ_INIT(&chvc->scan_dirents);
		} else {
			chfs_readvnode(mp, ino, &vp);
		}

		mutex_enter(&chmp->chm_lock_mountfields);
		/* Initialize type specific things. */
		switch (ip->ch_type) {
		case CHT_DIR:
			/* Read every dirent. */
			nref = chvc->dirents;
			while (nref &&
			    (struct chfs_vnode_cache *)nref != chvc) {
				chfs_readdirent(mp, nref, ip);
				nref = nref->nref_next;
			}
			chfs_set_vnode_size(vp, 512);
			break;
		case CHT_REG:
			/* FALLTHROUGH */
		case CHT_SOCK:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}
			break;
		case CHT_LNK:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode_internal(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}

			/* Set link. */
			dbg("size: %llu\n", (unsigned long long)ip->size);
			bp = getiobuf(vp, true);
			bp->b_blkno = 0;
			bp->b_bufsize = bp->b_resid =
			    bp->b_bcount = ip->size;
			bp->b_data = kmem_alloc(ip->size, KM_SLEEP);
			chfs_read_data(chmp, vp, bp);
			if (!ip->target)
				ip->target = kmem_alloc(ip->size,
				    KM_SLEEP);
			memcpy(ip->target, bp->b_data, ip->size);
			kmem_free(bp->b_data, ip->size);
			putiobuf(bp);

			break;
		case CHT_CHR:
			/* FALLTHROUGH */
		case CHT_BLK:
			/* FALLTHROUGH */
		case CHT_FIFO:
			/* Collect data. */
			dbg("read_inode_internal | ino: %llu\n",
				(unsigned long long)ip->ino);
			error = chfs_read_inode_internal(chmp, ip);
			if (error) {
				vput(vp);
				*vpp = NULL;
				mutex_exit(&chmp->chm_lock_mountfields);
				return (error);
			}

			/* Set device. */
			bp = getiobuf(vp, true);
			bp->b_blkno = 0;
			bp->b_bufsize = bp->b_resid =
			    bp->b_bcount = sizeof(dev_t);
			bp->b_data = kmem_alloc(sizeof(dev_t), KM_SLEEP);
			chfs_read_data(chmp, vp, bp);
			memcpy(&ip->rdev,
			    bp->b_data, sizeof(dev_t));
			kmem_free(bp->b_data, sizeof(dev_t));
			putiobuf(bp);
			/* Set specific operations. */
			if (ip->ch_type == CHT_FIFO) {
				vp->v_op = chfs_fifoop_p;
			} else {
				vp->v_op = chfs_specop_p;
				spec_node_init(vp, ip->rdev);
			}

		    break;
		case CHT_BLANK:
			/* FALLTHROUGH */
		case CHT_BAD:
			break;
		}
		mutex_exit(&chmp->chm_lock_mountfields);

	}

	/* Finish inode initalization. */
	ip->devvp = ump->um_devvp;
	vref(ip->devvp);

	uvm_vnp_setsize(vp, ip->size);
	*vpp = vp;

	return 0;
}
Exemplo n.º 8
0
int
v7fs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
    struct v7fs_mount *v7fsmount = mp->mnt_data;
    struct v7fs_self *fs = v7fsmount->core;
    struct vnode *vp;
    struct v7fs_node *v7fs_node;
    struct v7fs_inode inode;
    int error;

    /* Lookup requested i-node */
    if ((error = v7fs_inode_load(fs, &inode, ino))) {
        DPRINTF("v7fs_inode_load failed.\n");
        return error;
    }

retry:
    mutex_enter(&mntvnode_lock);
    for (v7fs_node = LIST_FIRST(&v7fsmount->v7fs_node_head);
            v7fs_node != NULL; v7fs_node = LIST_NEXT(v7fs_node, link)) {
        if (v7fs_node->inode.inode_number == ino) {
            vp = v7fs_node->vnode;
            mutex_enter(vp->v_interlock);
            mutex_exit(&mntvnode_lock);
            if (vget(vp, LK_EXCLUSIVE) == 0) {
                *vpp = vp;
                return 0;
            } else {
                DPRINTF("retry!\n");
                goto retry;
            }
        }
    }
    mutex_exit(&mntvnode_lock);

    /* Allocate v-node. */
    if ((error = getnewvnode(VT_V7FS, mp, v7fs_vnodeop_p, NULL, &vp))) {
        DPRINTF("getnewvnode error.\n");
        return error;
    }
    /* Lock vnode here */
    vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);

    /* Allocate i-node */
    vp->v_data = pool_get(&v7fs_node_pool, PR_WAITOK);
    memset(vp->v_data, 0, sizeof(*v7fs_node));
    v7fs_node = vp->v_data;
    mutex_enter(&mntvnode_lock);
    LIST_INSERT_HEAD(&v7fsmount->v7fs_node_head, v7fs_node, link);
    mutex_exit(&mntvnode_lock);
    v7fs_node->vnode = vp;
    v7fs_node->v7fsmount = v7fsmount;
    v7fs_node->inode = inode;/*structure copy */
    v7fs_node->lockf = NULL; /* advlock */

    genfs_node_init(vp, &v7fs_genfsops);
    uvm_vnp_setsize(vp, v7fs_inode_filesize(&inode));

    if (ino == V7FS_ROOT_INODE) {
        vp->v_type = VDIR;
        vp->v_vflag |= VV_ROOT;
    } else {
        vp->v_type = v7fs_mode_to_vtype(inode.mode);

        if (vp->v_type == VBLK || vp->v_type == VCHR) {
            dev_t rdev = inode.device;
            vp->v_op = v7fs_specop_p;
            spec_node_init(vp, rdev);
        } else if (vp->v_type == VFIFO) {
            vp->v_op = v7fs_fifoop_p;
        }
    }

    *vpp = vp;

    return 0;
}
Exemplo n.º 9
0
int
cd9660_vget_internal(struct mount *mp, ino_t ino, struct vnode **vpp,
	int relocated, struct iso_directory_record *isodir)
{
	struct iso_mnt *imp;
	struct iso_node *ip;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;

	imp = VFSTOISOFS(mp);
	dev = imp->im_dev;

 retry:
	if ((*vpp = cd9660_ihashget(dev, ino, LK_EXCLUSIVE)) != NULLVP)
		return (0);

	/* Allocate a new vnode/iso_node. */
	error = getnewvnode(VT_ISOFS, mp, cd9660_vnodeop_p, NULL, &vp);
	if (error) {
		*vpp = NULLVP;
		return (error);
	}
	ip = pool_get(&cd9660_node_pool, PR_WAITOK);

	/*
	 * If someone beat us to it, put back the freshly allocated
	 * vnode/inode pair and retry.
	 */
	mutex_enter(&cd9660_hashlock);
	if (cd9660_ihashget(dev, ino, 0) != NULL) {
		mutex_exit(&cd9660_hashlock);
		ungetnewvnode(vp);
		pool_put(&cd9660_node_pool, ip);
		goto retry;
	}

	memset(ip, 0, sizeof(struct iso_node));
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_mnt = imp;
	ip->i_devvp = imp->im_devvp;
	genfs_node_init(vp, &cd9660_genfsops);

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */
	cd9660_ihashins(ip);
	mutex_exit(&cd9660_hashlock);

	if (isodir == 0) {
		int lbn, off;

		lbn = cd9660_lblkno(imp, ino);
		if (lbn >= imp->volume_space_size) {
			vput(vp);
			printf("fhtovp: lbn exceed volume space %d\n", lbn);
			return (ESTALE);
		}

		off = cd9660_blkoff(imp, ino);
		if (off + ISO_DIRECTORY_RECORD_SIZE > imp->logical_block_size) {
			vput(vp);
			printf("fhtovp: crosses block boundary %d\n",
			    off + ISO_DIRECTORY_RECORD_SIZE);
			return (ESTALE);
		}

		error = bread(imp->im_devvp,
			      lbn << (imp->im_bshift - DEV_BSHIFT),
			      imp->logical_block_size, NOCRED, 0, &bp);
		if (error) {
			vput(vp);
			printf("fhtovp: bread error %d\n",error);
			return (error);
		}
		isodir = (struct iso_directory_record *)((char *)bp->b_data + off);

		if (off + isonum_711(isodir->length) >
		    imp->logical_block_size) {
			vput(vp);
			if (bp != 0)
				brelse(bp, 0);
			printf("fhtovp: directory crosses block boundary %d[off=%d/len=%d]\n",
			    off +isonum_711(isodir->length), off,
			    isonum_711(isodir->length));
			return (ESTALE);
		}

#if 0
		if (isonum_733(isodir->extent) +
		    isonum_711(isodir->ext_attr_length) != ifhp->ifid_start) {
			if (bp != 0)
				brelse(bp, 0);
			printf("fhtovp: file start miss %d vs %d\n",
			    isonum_733(isodir->extent) + isonum_711(isodir->ext_attr_length),
			    ifhp->ifid_start);
			return (ESTALE);
		}
#endif
	} else
		bp = 0;

	vref(ip->i_devvp);

	if (relocated) {
		/*
		 * On relocated directories we must
		 * read the `.' entry out of a dir.
		 */
		ip->iso_start = ino >> imp->im_bshift;
		if (bp != 0)
			brelse(bp, 0);
		if ((error = cd9660_blkatoff(vp, (off_t)0, NULL, &bp)) != 0) {
			vput(vp);
			return (error);
		}
		isodir = (struct iso_directory_record *)bp->b_data;
	}

	ip->iso_extent = isonum_733(isodir->extent);
	ip->i_size = isonum_733(isodir->size);
	ip->iso_start = isonum_711(isodir->ext_attr_length) + ip->iso_extent;

	/*
	 * Setup time stamp, attribute
	 */
	vp->v_type = VNON;
	switch (imp->iso_ftype) {
	default:	/* ISO_FTYPE_9660 */
	    {
		struct buf *bp2;
		int off;
		if ((imp->im_flags & ISOFSMNT_EXTATT)
		    && (off = isonum_711(isodir->ext_attr_length)))
			cd9660_blkatoff(vp, (off_t)-(off << imp->im_bshift),
			    NULL, &bp2);
		else
			bp2 = NULL;
		cd9660_defattr(isodir, ip, bp2);
		cd9660_deftstamp(isodir, ip, bp2);
		if (bp2)
			brelse(bp2, 0);
		break;
	    }
	case ISO_FTYPE_RRIP:
		cd9660_rrip_analyze(isodir, ip, imp);
		break;
	}

	if (bp != 0)
		brelse(bp, 0);

	/*
	 * Initialize the associated vnode
	 */
	switch (vp->v_type = IFTOVT(ip->inode.iso_mode)) {
	case VFIFO:
		vp->v_op = cd9660_fifoop_p;
		break;
	case VCHR:
	case VBLK:
		/*
		 * if device, look at device number table for translation
		 */
		vp->v_op = cd9660_specop_p;
		spec_node_init(vp, ip->inode.iso_rdev);
		break;
	case VLNK:
	case VNON:
	case VSOCK:
	case VDIR:
	case VBAD:
		break;
	case VREG:
		uvm_vnp_setsize(vp, ip->i_size);
		break;
	}

	if (vp->v_type != VREG)
		uvm_vnp_setsize(vp, 0);

	if (ip->iso_extent == imp->root_extent)
		vp->v_vflag |= VV_ROOT;

	/*
	 * XXX need generation number?
	 */

	*vpp = vp;
	return (0);
}
Exemplo n.º 10
0
/*
 * Obtain a locked vnode for the given on-disk inode number.
 *
 * We currently allocate a new vnode from getnewnode(), tack it with
 * our in-core inode structure (efs_inode), and read in the inode from
 * disk. The returned inode must be locked.
 *
 * Returns 0 on success.
 */
static int
efs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	int err;
	struct vnode *vp;
	struct efs_inode *eip;
	struct efs_mount *emp;

	emp = VFSTOEFS(mp);

	while (true) {
		*vpp = efs_ihashget(emp->em_dev, ino, LK_EXCLUSIVE);
		if (*vpp != NULL)
			return (0);

		err = getnewvnode(VT_EFS, mp, efs_vnodeop_p, NULL, &vp);
		if (err)
			return (err);
		
		eip = pool_get(&efs_inode_pool, PR_WAITOK);

		/*
		 * See if anybody has raced us here.  If not, continue
		 * setting up the new inode, otherwise start over.
		 */
		efs_ihashlock();

		if (efs_ihashget(emp->em_dev, ino, 0) == NULL)
			break;

		efs_ihashunlock();
		ungetnewvnode(vp);
		pool_put(&efs_inode_pool, eip);
	}

	vp->v_vflag |= VV_LOCKSWORK;
	eip->ei_mode = 0;
	eip->ei_lockf = NULL;
	eip->ei_number = ino;
	eip->ei_dev = emp->em_dev;
	eip->ei_vp = vp;
	vp->v_data = eip;

	/*
	 * Place the vnode on the hash chain. Doing so will lock the
	 * vnode, so it's okay to drop the global lock and read in
	 * the inode from disk.
	 */
	efs_ihashins(eip);
	efs_ihashunlock();

	/*
	 * Init genfs early, otherwise we'll trip up on genfs_node_destroy
	 * in efs_reclaim when vput()ing in an error branch here.
	 */
	genfs_node_init(vp, &efs_genfsops);

	err = efs_read_inode(emp, ino, NULL, &eip->ei_di);
	if (err) {
		vput(vp);
		*vpp = NULL;
		return (err);
	}

	efs_sync_dinode_to_inode(eip);

	if (ino == EFS_ROOTINO && !S_ISDIR(eip->ei_mode)) {
		printf("efs: root inode (%lu) is not a directory!\n",
		    (ulong)EFS_ROOTINO);
		vput(vp);
		*vpp = NULL;
		return (EIO);
	}

	switch (eip->ei_mode & S_IFMT) {
	case S_IFIFO:
		vp->v_type = VFIFO;
		vp->v_op = efs_fifoop_p;
		break;
	case S_IFCHR:
		vp->v_type = VCHR;
		vp->v_op = efs_specop_p;
		spec_node_init(vp, eip->ei_dev);
		break;
	case S_IFDIR:
		vp->v_type = VDIR;
		if (ino == EFS_ROOTINO)
			vp->v_vflag |= VV_ROOT;
		break;
	case S_IFBLK:
		vp->v_type = VBLK;
		vp->v_op = efs_specop_p;
		spec_node_init(vp, eip->ei_dev);
		break;
	case S_IFREG:
		vp->v_type = VREG;
		break;
	case S_IFLNK:
		vp->v_type = VLNK;
		break;
	case S_IFSOCK:
		vp->v_type = VSOCK;
		break;
	default:
		printf("efs: invalid mode 0x%x in inode %lu on mount %s\n",
		    eip->ei_mode, (ulong)ino, mp->mnt_stat.f_mntonname);
		vput(vp);
		*vpp = NULL;
		return (EIO);
	}

	uvm_vnp_setsize(vp, eip->ei_size);
	*vpp = vp;

	KASSERT(VOP_ISLOCKED(vp));

	return (0);
}