Ejemplo n.º 1
0
static int
zfs_replay_rename(zfsvfs_t *zsb, lr_rename_t *lr, boolean_t byteswap)
{
	char *sname = (char *)(lr + 1);	/* sname and tname follow lr_rename_t */
	char *tname = sname + strlen(sname) + 1;
	znode_t *sdzp, *tdzp;
	int error;
	int vflg = 0;

	if (byteswap)
		byteswap_uint64_array(lr, sizeof (*lr));

	if ((error = zfs_zget(zsb, lr->lr_sdoid, &sdzp)) != 0)
		return (error);
    zfs_znode_wait_vnode(sdzp);

	if ((error = zfs_zget(zsb, lr->lr_tdoid, &tdzp)) != 0) {
		vnode_put(ZTOV(sdzp));
		return (error);
	}
    zfs_znode_wait_vnode(tdzp);

	if (lr->lr_common.lrc_txtype & TX_CI)
		vflg |= FIGNORECASE;

	error = zfs_rename(ZTOV(sdzp), sname, ZTOV(tdzp), tname, kcred, NULL,vflg);

	vnode_put(ZTOV(tdzp));
	vnode_put(ZTOV(sdzp));

	return (error);
}
Ejemplo n.º 2
0
static int
zfs_replay_link(zfsvfs_t *zsb, lr_link_t *lr, boolean_t byteswap)
{
	char *name = (char *)(lr + 1);	/* name follows lr_link_t */
	znode_t *dzp, *zp;
	int error;
	int vflg = 0;

	if (byteswap)
		byteswap_uint64_array(lr, sizeof (*lr));

	if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0)
		return (error);
    zfs_znode_wait_vnode(dzp);

	if ((error = zfs_zget(zsb, lr->lr_link_obj, &zp)) != 0) {
		vnode_put(ZTOV(dzp));
		return (error);
	}
    zfs_znode_wait_vnode(zp);

	if (lr->lr_common.lrc_txtype & TX_CI)
		vflg |= FIGNORECASE;

	error = zfs_link(ZTOV(dzp), ZTOV(zp), name, kcred, NULL, vflg);

	vnode_put(ZTOV(zp));
	vnode_put(ZTOV(dzp));

	return (error);
}
Ejemplo n.º 3
0
static void
vdev_file_io_start(zio_t *zio)
{
    vdev_t *vd = zio->io_vd;
    vdev_file_t *vf = vd->vdev_tsd;
    ssize_t resid = 0;


    if (zio->io_type == ZIO_TYPE_IOCTL) {

        if (!vdev_readable(vd)) {
            zio->io_error = SET_ERROR(ENXIO);
			zio_interrupt(zio);
            return;
        }

        switch (zio->io_cmd) {
        case DKIOCFLUSHWRITECACHE:
            if (!vnode_getwithvid(vf->vf_vnode, vf->vf_vid)) {
                zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC,
                                          kcred, NULL);
                vnode_put(vf->vf_vnode);
            }
            break;
        default:
            zio->io_error = SET_ERROR(ENOTSUP);
        }

		zio_interrupt(zio);
        return;
    }

	ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);

    if (!vnode_getwithvid(vf->vf_vnode, vf->vf_vid)) {

		/*
		VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio,
	    TQ_PUSHPAGE), !=, 0);
		*/

        zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ?
                           UIO_READ : UIO_WRITE, vf->vf_vnode, zio->io_data,
                           zio->io_size, zio->io_offset, UIO_SYSSPACE,
                           0, RLIM64_INFINITY, kcred, &resid);
        vnode_put(vf->vf_vnode);
    }

    if (resid != 0 && zio->io_error == 0)
        zio->io_error = SET_ERROR(ENOSPC);

    zio_interrupt(zio);

    return;
}
Ejemplo n.º 4
0
void getfinderinfo(znode_t *zp, cred_t *cr, finderinfo_t *fip)
{
	vnode_t	*xdvp = NULLVP;
	vnode_t	*xvp = NULLVP;
	struct uio		*auio = NULL;
	struct componentname  cn;
	int		error;
    uint64_t xattr = 0;

    if (sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zp->z_zfsvfs),
                   &xattr, sizeof(xattr)) ||
        (xattr == 0)) {
        goto nodata;
    }

	auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ);
	if (auio == NULL) {
		goto nodata;
	}
	uio_addiov(auio, CAST_USER_ADDR_T(fip), sizeof (finderinfo_t));

	/*
	 * Grab the hidden attribute directory vnode.
	 *
	 * XXX - switch to embedded Finder Info when it becomes available
	 */
	if ((error = zfs_get_xattrdir(zp, &xdvp, cr, 0))) {
		goto out;
	}

	bzero(&cn, sizeof (cn));
	cn.cn_nameiop = LOOKUP;
	cn.cn_flags = ISLASTCN;
	cn.cn_nameptr = XATTR_FINDERINFO_NAME;
	cn.cn_namelen = strlen(cn.cn_nameptr);

	if ((error = zfs_dirlook(VTOZ(xdvp), cn.cn_nameptr, &xvp, 0, NULL, &cn))) {
		goto out;
	}
	error = dmu_read_uio(zp->z_zfsvfs->z_os, VTOZ(xvp)->z_id, auio,
	                     sizeof (finderinfo_t));
out:
	if (auio)
		uio_free(auio);
	if (xvp)
		vnode_put(xvp);
	if (xdvp)
		vnode_put(xdvp);
	if (error == 0)
		return;
nodata:
	bzero(fip, sizeof (finderinfo_t));
}
Ejemplo n.º 5
0
static int
vniocattach_shadow(struct vn_softc *vn, struct vn_ioctl_64 *vniop, 
				   __unused dev_t dev, int in_kernel, proc_t p)
{
	vfs_context_t ctx = vfs_context_current();
	struct nameidata nd;
	int error, flags;
	shadow_map_t *	map;
	off_t file_size;

	flags = FREAD|FWRITE;
	if (in_kernel) {
		NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, vniop->vn_file, ctx);
	}
	else {
		NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, 
			   (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), 
			   vniop->vn_file, ctx);
	}
	/* vn_open gives both long- and short-term references */
	error = vn_open(&nd, flags, 0);
	if (error) {
		/* shadow MUST be writable! */
		return (error);
	}
	if (nd.ni_vp->v_type != VREG 
	    || (error = vnode_size(nd.ni_vp, &file_size, ctx))) {
		(void)vn_close(nd.ni_vp, flags, ctx);
		vnode_put(nd.ni_vp);
		return (error ? error : EINVAL);
	}
	map = shadow_map_create(vn->sc_fsize, file_size,
				0, vn->sc_secsize);
	if (map == NULL) {
		(void)vn_close(nd.ni_vp, flags, ctx);
		vnode_put(nd.ni_vp);
		vn->sc_shadow_vp = NULL;
		return (ENOMEM);
	}
	vn->sc_shadow_vp = nd.ni_vp;
	vn->sc_shadow_vid = vnode_vid(nd.ni_vp);
	vn->sc_shadow_vp->v_flag |= VNOCACHE_DATA;
	vn->sc_shadow_map = map;
	vn->sc_flags &= ~VNF_READONLY; /* we're now read/write */

	/* lose the short-term reference */
	vnode_put(nd.ni_vp);
	return(0);
}
Ejemplo n.º 6
0
static int
zfs_replay_remove(zfsvfs_t *zsb, lr_remove_t *lr, boolean_t byteswap)
{
	char *name = (char *)(lr + 1);	/* name follows lr_remove_t */
	znode_t *dzp;
	int error;
	int vflg = 0;

	if (byteswap)
		byteswap_uint64_array(lr, sizeof (*lr));

	if ((error = zfs_zget(zsb, lr->lr_doid, &dzp)) != 0)
		return (error);
    zfs_znode_wait_vnode(dzp);

	if (lr->lr_common.lrc_txtype & TX_CI)
		vflg |= FIGNORECASE;

	switch ((int)lr->lr_common.lrc_txtype) {
	case TX_REMOVE:
		error = zfs_remove(ZTOV(dzp), name, kcred, NULL, vflg);
		break;
	case TX_RMDIR:
		error = zfs_rmdir(ZTOV(dzp), name, NULL, kcred, NULL, vflg);
		break;
	default:
		error = SET_ERROR(ENOTSUP);
	}

	vnode_put(ZTOV(dzp));

	return (error);
}
Ejemplo n.º 7
0
static int
zfs_replay_truncate(zfsvfs_t *zsb, lr_truncate_t *lr, boolean_t byteswap)
{
	znode_t *zp;
	struct flock fl;
	int error;

	if (byteswap)
		byteswap_uint64_array(lr, sizeof (*lr));

	if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
		return (error);
    zfs_znode_wait_vnode(zp);

	bzero(&fl, sizeof (fl));
	fl.l_type = F_WRLCK;
	fl.l_whence = 0;
	fl.l_start = lr->lr_offset;
	fl.l_len = lr->lr_length;

	error = zfs_space(ZTOV(zp), F_FREESP, &fl, FWRITE | FOFFMAX,
                      lr->lr_offset, kcred, NULL);

	vnode_put(ZTOV(zp));

	return (error);
}
Ejemplo n.º 8
0
static int
nullfs_readlink(struct vnop_readlink_args * ap)
{
	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
	int error;
	struct vnode *vp, *lvp;

	if (nullfs_checkspecialvp(ap->a_vp)) {
		return ENOTSUP; /* the special vnodes aren't links */
	}

	vp  = ap->a_vp;
	lvp = NULLVPTOLOWERVP(vp);

	error = vnode_getwithref(lvp);
	if (error == 0) {
		error = VNOP_READLINK(lvp, ap->a_uio, ap->a_context);
		vnode_put(lvp);

		if (error) {
			NULLFSDEBUG("readlink failed: %d\n", error);
		}
	}

	return error;
}
Ejemplo n.º 9
0
static int
zfs_replay_acl_v0(zfsvfs_t *zsb, lr_acl_v0_t *lr, boolean_t byteswap)
{
	ace_t *ace = (ace_t *)(lr + 1);	/* ace array follows lr_acl_t */
	vsecattr_t vsa;
	znode_t *zp;
	int error;

	if (byteswap) {
		byteswap_uint64_array(lr, sizeof (*lr));
		zfs_oldace_byteswap(ace, lr->lr_aclcnt);
	}

	if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
		return (error);
    zfs_znode_wait_vnode(zp);

	bzero(&vsa, sizeof (vsa));
	vsa.vsa_mask = VSA_ACE | VSA_ACECNT;
	vsa.vsa_aclcnt = lr->lr_aclcnt;
	vsa.vsa_aclentsz = sizeof (ace_t) * vsa.vsa_aclcnt;
	vsa.vsa_aclflags = 0;
	vsa.vsa_aclentp = ace;

	error = zfs_setsecattr(ZTOV(zp), &vsa, 0, kcred, NULL);

	vnode_put(ZTOV(zp));

	return (error);
}
Ejemplo n.º 10
0
int readFile(char *file, uint8_t *buffer, off_t offset, user_size_t size) {
    int res = EIO;

    vfs_context_t vfsContext = vfs_context_create(NULL);
    if (vfsContext == NULL) {
        return EIO;
    }

    vnode_t fileVnode = NULLVP;
    if (vnode_lookup(file, 0, &fileVnode, vfsContext) == 0) {
        uio_t uio = uio_create(1, offset, UIO_SYSSPACE, UIO_READ);
        if (uio == NULL)
            goto exit;

        if (uio_addiov(uio, CAST_USER_ADDR_T(buffer), size))
            goto exit;

        if (VNOP_READ(fileVnode, uio, 0, vfsContext))
            goto exit;

        if (uio_resid(uio))
            goto exit;

        res = 0;
    } else {
        vfs_context_rele(vfsContext);
        return ENOENT;
    }

exit:
    vnode_put(fileVnode);
    vfs_context_rele(vfsContext);
    return res;
}
Ejemplo n.º 11
0
static int
nullfs_readdir(struct vnop_readdir_args * ap)
{
	struct vnode *vp, *lvp;
	int error;
	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));

	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
	/* assumption is that any vp that comes through here had to go through lookup
	 */

	lck_mtx_lock(&null_mp->nullm_lock);
	if (nullfs_isspecialvp(ap->a_vp)) {
		error = nullfs_special_readdir(ap);
		lck_mtx_unlock(&null_mp->nullm_lock);
		return error;
	}
	lck_mtx_unlock(&null_mp->nullm_lock);

	vp    = ap->a_vp;
	lvp   = NULLVPTOLOWERVP(vp);
	error = vnode_getwithref(lvp);
	if (error == 0) {
		error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context);
		vnode_put(lvp);
	}

	return error;
}
Ejemplo n.º 12
0
static int
vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_t ctx)
{
	vnode_t vp;
	struct mount *mp = NULL;
	char  *str;
	char ch;
	uint32_t  id;
	ino64_t ino;
	int error;
	int length;

	/* Get file system id and move str to next component. */
	id = strtoul(path, &str, 10);
	if (id == 0 || str[0] != '/') {
		return (EINVAL);
	}
	while (*str == '/') {
		str++;
	}
	ch = *str;

	mp = mount_lookupby_volfsid(id, 1);
	if (mp == NULL) {
		return (EINVAL);  /* unexpected failure */
	}
	/* Check for an alias to a file system root. */
	if (ch == '@' && str[1] == '\0') {
		ino = 2;
		str++;
	} else {
		/* Get file id and move str to next component. */
	    ino = strtouq(str, &str, 10);
	}

	/* Get the target vnode. */
	if (ino == 2) {
		error = VFS_ROOT(mp, &vp, ctx);
	} else {
		error = VFS_VGET(mp, ino, &vp, ctx);
	}
	vfs_unbusy(mp);
	if (error) {
		goto out;
	}
	realpath[0] = '\0';

	/* Get the absolute path to this vnode. */
	error = build_path(vp, realpath, bufsize, &length, 0, ctx);
	vnode_put(vp);

	if (error == 0 && *str != '\0') {
		int attempt = strlcat(realpath, str, MAXPATHLEN);
		if (attempt > MAXPATHLEN){
			error = ENAMETOOLONG;
		}
	}
out:
	return (error);
}
Ejemplo n.º 13
0
static int
nullfs_getattr(struct vnop_getattr_args * args)
{
	int error;
	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);

	lck_mtx_lock(&null_mp->nullm_lock);
	if (nullfs_isspecialvp(args->a_vp)) {
		error = nullfs_special_getattr(args);
		lck_mtx_unlock(&null_mp->nullm_lock);
		return error;
	}
	lck_mtx_unlock(&null_mp->nullm_lock);

	/* this will return a different inode for third than read dir will */
	struct vnode * lowervp = NULLVPTOLOWERVP(args->a_vp);

	error = vnode_getwithref(lowervp);
	if (error == 0) {
		error = VNOP_GETATTR(lowervp, args->a_vap, args->a_context);
		vnode_put(lowervp);

		if (error == 0) {
			/* fix up fsid so it doesn't say the underlying fs*/
			VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]);
		}
	}

	return error;
}
Ejemplo n.º 14
0
int
imageboot_setup()
{
	dev_t       dev;
	int         error = 0;
	char *root_path = NULL;

	DBG_TRACE("%s: entry\n", __FUNCTION__);

	MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
	if (root_path == NULL)
		return (ENOMEM);

	if(PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == FALSE) {
		error = ENOENT;
		goto done;
	}

	printf("%s: root image url is %s\n", __FUNCTION__, root_path);
	error = di_root_image(root_path, rootdevice, &dev);
	if(error) {
		printf("%s: di_root_image failed: %d\n", __FUNCTION__, error);
		goto done;
	}

	rootdev = dev;
	mountroot = NULL;
	printf("%s: root device 0x%x\n", __FUNCTION__, rootdev);
	error = vfs_mountroot();

	if (error == 0 && rootvnode != NULL) {
		struct vnode *tvp;
		struct vnode *newdp;

		/*
		 * Get the vnode for '/'.
		 * Set fdp->fd_fd.fd_cdir to reference it.
		 */
		if (VFS_ROOT(TAILQ_LAST(&mountlist,mntlist), &newdp, vfs_context_kernel()))
			panic("%s: cannot find root vnode", __FUNCTION__);

		vnode_ref(newdp);
		vnode_put(newdp);
		tvp = rootvnode;
		vnode_rele(tvp);
		filedesc0.fd_cdir = newdp;
		rootvnode = newdp;
		mount_list_lock();
		TAILQ_REMOVE(&mountlist, TAILQ_FIRST(&mountlist), mnt_list);
		mount_list_unlock();
		mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;
		DBG_TRACE("%s: root switched\n", __FUNCTION__);
	}
done:
	FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI);

	DBG_TRACE("%s: exit\n", __FUNCTION__);

	return (error);
}
Ejemplo n.º 15
0
int op_chown (const char *path, uid_t uid, gid_t gid)
{
	int rt;
	int rc;
	ino_t ino;
	struct ufs_vnode *vnode;
	struct inode *inode;
	uufsd_t *ufs = current_ufs();

	RETURN_IF_RDONLY(ufs);

	debugf("enter");
	debugf("path = %s", path);
	
	rt = do_readvnode(ufs, path, &ino, &vnode);
	if (rt) {
		debugf("do_readvnode(%s, &ino, &vnode); failed", path);
		return rt;
	}
	inode = vnode2inode(vnode);
	
	if (uid != -1)
		inode->i_gid = gid;
	if (gid != -1)
		inode->i_uid = uid;

	rc=vnode_put(vnode,1);
	if (rc) {
		debugf("vnode_put(vnode,1); failed");
		return -EIO;
	}

	debugf("leave");
	return 0;
}
Ejemplo n.º 16
0
/*
 * TX_WRITE2 are only generated when dmu_sync() returns EALREADY
 * meaning the pool block is already being synced. So now that we always write
 * out full blocks, all we have to do is expand the eof if
 * the file is grown.
 */
static int
zfs_replay_write2(zfsvfs_t *zsb, lr_write_t *lr, boolean_t byteswap)
{
	znode_t	*zp;
	int error;
	uint64_t end;

	if (byteswap)
		byteswap_uint64_array(lr, sizeof (*lr));

	if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
		return (error);
    zfs_znode_wait_vnode(zp);

top:
	end = lr->lr_offset + lr->lr_length;
	if (end > zp->z_size) {
		dmu_tx_t *tx = dmu_tx_create(zsb->z_os);

		zp->z_size = end;
		dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
		error = dmu_tx_assign(tx, TXG_WAIT);
		if (error) {
			vnode_put(ZTOV(zp));
			if (error == ERESTART) {
				dmu_tx_wait(tx);
				dmu_tx_abort(tx);
				goto top;
			}
			dmu_tx_abort(tx);
			return (error);
		}
		(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
		    (void *)&zp->z_size, sizeof (uint64_t), tx);

		/* Ensure the replayed seq is updated */
		(void) zil_replaying(zsb->z_log, tx);

		dmu_tx_commit(tx);
	}

	vnode_put(ZTOV(zp));

	return (error);
}
Ejemplo n.º 17
0
static int
vdev_file_io_start(zio_t *zio)
{
	vdev_t *vd = zio->io_vd;
    vdev_file_t *vf = vd->vdev_tsd;
	ssize_t resid = 0;


	if (zio->io_type == ZIO_TYPE_IOCTL) {

        if (!vdev_readable(vd)) {
            zio->io_error = ENXIO;
            return (ZIO_PIPELINE_CONTINUE);
        }

		switch (zio->io_cmd) {
		case DKIOCFLUSHWRITECACHE:
            vnode_getwithvid(vf->vf_vnode, vf->vf_vid);
			zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC,
			    kcred, NULL);
            vnode_put(vf->vf_vnode);
			break;
		default:
			zio->io_error = ENOTSUP;
		}

		return (ZIO_PIPELINE_CONTINUE);
	}

    vnode_getwithvid(vf->vf_vnode, vf->vf_vid);
	zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ?
	    UIO_READ : UIO_WRITE, vf->vf_vnode, zio->io_data,
	    zio->io_size, zio->io_offset, UIO_SYSSPACE,
	    0, RLIM64_INFINITY, kcred, &resid);
    vnode_put(vf->vf_vnode);


	if (resid != 0 && zio->io_error == 0)
		zio->io_error = ENOSPC;

	zio_interrupt(zio);

	return (ZIO_PIPELINE_STOP);
}
Ejemplo n.º 18
0
/**
 * Unmount VBoxVFS.
 *
 * @param mp        Mount data provided by VFS layer.
 * @param fFlags    Unmounting flags.
 * @param pContext  kAuth context needed in order to authentificate mount operation.
 *
 * @return 0 on success or BSD error code otherwise.
 */
static int
vboxvfs_unmount(struct mount *mp, int fFlags, vfs_context_t pContext)
{
    NOREF(pContext);

    vboxvfs_mount_t *pMount;
    int                  rc = EBUSY;
    int                  fFlush = (fFlags & MNT_FORCE) ? FORCECLOSE : 0;

    PDEBUG("Attempting to %s unmount a shared folder", (fFlags & MNT_FORCE) ? "forcibly" : "normally");

    AssertReturn(mp, EINVAL);

    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);

    AssertReturn(pMount,             EINVAL);
    AssertReturn(pMount->pRootVnode, EINVAL);

    /* Check if we can do unmount at the moment */
    if (!vnode_isinuse(pMount->pRootVnode, 1))
    {
        /* Flush child vnodes first */
        rc = vflush(mp, pMount->pRootVnode, fFlush);
        if (rc == 0)
        {
            /* Flush root vnode */
            rc = vflush(mp, NULL, fFlush);
            if (rc == 0)
            {
                vfs_setfsprivate(mp, NULL);

                rc = VbglR0SfUnmapFolder(&g_vboxSFClient, &pMount->pMap);
                if (RT_SUCCESS(rc))
                {
                    vboxvfs_destroy_internal_data(&pMount);
                    PDEBUG("A shared folder has been successfully unmounted");
                    return 0;
                }

                PDEBUG("Unable to unmount shared folder");
                rc = EPROTO;
            }
            else
                PDEBUG("Unable to flush filesystem before unmount, some data might be lost");
        }
        else
            PDEBUG("Unable to flush child vnodes");

    }
    else
        PDEBUG("Root vnode is in use, can't unmount");

    vnode_put(pMount->pRootVnode);

    return rc;
}
Ejemplo n.º 19
0
/*
 * Make a new or get existing nullfs node.
 * Vp is the alias vnode, lowervp is the lower vnode.
 *
 * lowervp is assumed to have an iocount on it from the caller
 */
int
null_nodeget(
    struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root)
{
	struct vnode * vp;
	int error;

	/* Lookup the hash firstly. */
	error = null_hashget(mp, lowervp, vpp);
	/* ENOENT means it wasn't found, EIO is a failure we should bail from, 0 is it
	 * was found */
	if (error != ENOENT) {
		/* null_hashget checked the vid, so if we got something here its legit to
		 * the best of our knowledge*/
		/* if we found something then there is an iocount on vpp,
		   if we didn't find something then vpp shouldn't be used by the caller */
		return error;
	}

	/*
	 * We do not serialize vnode creation, instead we will check for
	 * duplicates later, when adding new vnode to hash.
	 */
	error = vnode_ref(lowervp); // take a ref on lowervp so we let the system know we care about it
	if(error)
	{
		// Failed to get a reference on the lower vp so bail. Lowervp may be gone already.
		return error;
	}

	error = null_getnewvnode(mp, lowervp, dvp, &vp, cnp, root);

	if (error) {
		vnode_rele(lowervp);
		return (error);
	}

	/*
	 * Atomically insert our new node into the hash or vget existing
	 * if someone else has beaten us to it.
	 */
	error = null_hashins(mp, VTONULL(vp), vpp);
	if (error || *vpp != NULL) {
		/* recycle will call reclaim which will get rid of the internals */
		vnode_recycle(vp);
		vnode_put(vp);
		/* if we found vpp, then null_hashins put an iocount on it */
		return error;
	}

	/* vp has an iocount from null_getnewvnode */
	*vpp = vp;

	return (0);
}
Ejemplo n.º 20
0
static vdev_t *
vdev_lookup_by_path(vdev_t *vd, const char *name)
{
	vdev_t *mvd;
	int c;
	char pathbuf[MAXPATHLEN];
	char *lookup_name;
	int err = 0;

	if (!vd) return NULL;

	// Check both strings are valid
	if (name && *name &&
		vd->vdev_path && vd->vdev_path[0]) {
		int off;
		struct vnode *vp;

		lookup_name = vd->vdev_path;

		// We need to resolve symlinks here to get the final source name
		dprintf("ZFS: Looking up '%s'\n", vd->vdev_path);

		if ((err = vnode_lookup(vd->vdev_path, 0,
								&vp, vfs_context_current())) == 0) {
			int len = MAXPATHLEN;

			if ((err = vn_getpath(vp, pathbuf, &len)) == 0) {
				dprintf("ZFS: '%s' resolved name is '%s'\n",
						vd->vdev_path, pathbuf);
				lookup_name = pathbuf;
			}

			vnode_put(vp);
		}

		if (err) dprintf("ZFS: Lookup failed %d\n", err);

		// Skip /dev/ or not?
		strncmp("/dev/", lookup_name, 5) == 0 ? off=5 : off=0;

		dprintf("ZFS: vdev '%s' == '%s' ?\n", name,
				&lookup_name[off]);

		if (!strcmp(name, &lookup_name[off])) return vd;
	}

	for (c = 0; c < vd->vdev_children; c++)
		if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], name)) !=
			NULL)
			return (mvd);

	return (NULL);
}
Ejemplo n.º 21
0
static int
zfs_replay_setattr(zfsvfs_t *zsb, lr_setattr_t *lr, boolean_t byteswap)
{
	znode_t *zp;
	xvattr_t xva;
	vattr_t *vap = &xva.xva_vattr;
	int error;
	void *start;

	xva_init(&xva);
	if (byteswap) {
		byteswap_uint64_array(lr, sizeof (*lr));

		if ((lr->lr_mask & ATTR_XVATTR) &&
		    zsb->z_version >= ZPL_VERSION_INITIAL)
			zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
	}

	if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
		return (error);
    zfs_znode_wait_vnode(zp);

	zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode,
	    lr->lr_uid, lr->lr_gid, 0, lr->lr_foid);

	vap->va_size = lr->lr_size;
	ZFS_TIME_DECODE(&vap->va_atime, lr->lr_atime);
	ZFS_TIME_DECODE(&vap->va_mtime, lr->lr_mtime);

	/*
	 * Fill in xvattr_t portions if necessary.
	 */

	start = (lr_setattr_t *)(lr + 1);
	if (vap->va_mask & ATTR_XVATTR) {
		zfs_replay_xvattr((lr_attr_t *)start, &xva);
		start = (caddr_t)start +
		    ZIL_XVAT_SIZE(((lr_attr_t *)start)->lr_attr_masksize);
	} else
		xva.xva_vattr.va_mask &= ~ATTR_XVATTR;

	zsb->z_fuid_replay = zfs_replay_fuid_domain(start, &start,
	    lr->lr_uid, lr->lr_gid);

	error = zfs_setattr(ZTOV(zp), vap, 0, kcred, NULL);

	zfs_fuid_info_free(zsb->z_fuid_replay);
	zsb->z_fuid_replay = NULL;
	vnode_put(ZTOV(zp));

	return (error);
}
Ejemplo n.º 22
0
static int
null_reclaim(struct vnop_reclaim_args * ap)
{
	struct vnode * vp;
	struct null_node * xp;
	struct vnode * lowervp;
	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));

	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);

	vp = ap->a_vp;

	xp      = VTONULL(vp);
	lowervp = xp->null_lowervp;

	lck_mtx_lock(&null_mp->nullm_lock);

	vnode_removefsref(vp);

	if (lowervp != NULL) {
		/* root and second don't have a lowervp, so nothing to release and nothing
		 * got hashed */
		if (xp->null_flags & NULL_FLAG_HASHED) {
			/* only call this if we actually made it into the hash list. reclaim gets
			   called also to
			   clean up a vnode that got created when it didn't need to under race
			   conditions */
			null_hashrem(xp);
		}
		vnode_getwithref(lowervp);
		vnode_rele(lowervp);
		vnode_put(lowervp);
	}

	if (vp == null_mp->nullm_rootvp) {
		null_mp->nullm_rootvp = NULL;
	} else if (vp == null_mp->nullm_secondvp) {
		null_mp->nullm_secondvp = NULL;
	} else if (vp == null_mp->nullm_thirdcovervp) {
		null_mp->nullm_thirdcovervp = NULL;
	}

	lck_mtx_unlock(&null_mp->nullm_lock);

	cache_purge(vp);
	vnode_clearfsnode(vp);

	FREE(xp, M_TEMP);

	return 0;
}
Ejemplo n.º 23
0
void
vm_swapfile_open(const char *path, vnode_t *vp)
{
	int error = 0;
	vfs_context_t	ctx = vfs_context_current();

	if ((error = vnode_open(path, (O_CREAT | O_TRUNC | FREAD | FWRITE), S_IRUSR | S_IWUSR, 0, vp, ctx))) {
		printf("Failed to open swap file %d\n", error);
		*vp = NULL;
		return;
	}	

	vnode_put(*vp);
}
Ejemplo n.º 24
0
/*
 * File handle to vnode pointer
 */
static int
zfs_vfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp,
               struct vnode **vpp, __unused vfs_context_t context)
{
	zfsvfs_t *zfsvfs = vfs_fsprivate(mp);
	zfs_zfid_t	*zfid = (zfs_zfid_t *)fhp;
	znode_t		*zp;
	uint64_t	obj_num = 0;
	uint64_t	fid_gen = 0;
	uint64_t	zp_gen;
	int 		i;
	int		error;

	*vpp = NULL;

	ZFS_ENTER(zfsvfs);

	if (fhlen < sizeof (zfs_zfid_t)) {
		error = EINVAL;
		goto out;
	}

	/*
	 * Grab the object and gen numbers in an endian neutral manner
	 */
	for (i = 0; i < sizeof (zfid->zf_object); i++)
		obj_num |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
	
	for (i = 0; i < sizeof (zfid->zf_gen); i++)
		fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);

	if ((error = zfs_zget(zfsvfs, obj_num, &zp))) {
		goto out;
	}

	zp_gen = zp->z_phys->zp_gen;
	if (zp_gen == 0)
		zp_gen = 1;

	if (zp->z_unlinked || zp_gen != fid_gen) {
		vnode_put(ZTOV(zp));
		error = EINVAL;
		goto out;
	}
	*vpp = ZTOV(zp);
out:
	ZFS_EXIT(zfsvfs);
	return (error);
}
Ejemplo n.º 25
0
/*
 * Replaying ACLs is complicated by FUID support.
 * The log record may contain some optional data
 * to be used for replaying FUID's.  These pieces
 * are the actual FUIDs that were created initially.
 * The FUID table index may no longer be valid and
 * during zfs_create() a new index may be assigned.
 * Because of this the log will contain the original
 * doman+rid in order to create a new FUID.
 *
 * The individual ACEs may contain an ephemeral uid/gid which is no
 * longer valid and will need to be replaced with an actual FUID.
 *
 */
static int
zfs_replay_acl(zfsvfs_t *zsb, lr_acl_t *lr, boolean_t byteswap)
{
	ace_t *ace = (ace_t *)(lr + 1);
	vsecattr_t vsa;
	znode_t *zp;
	int error;

	if (byteswap) {
		byteswap_uint64_array(lr, sizeof (*lr));
		zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE);
		if (lr->lr_fuidcnt) {
			byteswap_uint64_array((caddr_t)ace +
			    ZIL_ACE_LENGTH(lr->lr_acl_bytes),
			    lr->lr_fuidcnt * sizeof (uint64_t));
		}
	}

	if ((error = zfs_zget(zsb, lr->lr_foid, &zp)) != 0)
		return (error);
    zfs_znode_wait_vnode(zp);

	bzero(&vsa, sizeof (vsa));
	vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS;
	vsa.vsa_aclcnt = lr->lr_aclcnt;
	vsa.vsa_aclentp = ace;
	vsa.vsa_aclentsz = lr->lr_acl_bytes;
	vsa.vsa_aclflags = lr->lr_acl_flags;

	if (lr->lr_fuidcnt) {
		void *fuidstart = (caddr_t)ace +
		    ZIL_ACE_LENGTH(lr->lr_acl_bytes);

		zsb->z_fuid_replay =
		    zfs_replay_fuids(fuidstart, &fuidstart,
		    lr->lr_fuidcnt, lr->lr_domcnt, 0, 0);
	}

	error = zfs_setsecattr(ZTOV(zp), &vsa, 0, kcred, NULL);

	if (zsb->z_fuid_replay)
		zfs_fuid_info_free(zsb->z_fuid_replay);

	zsb->z_fuid_replay = NULL;
	vnode_put(ZTOV(zp));

	return (error);
}
Ejemplo n.º 26
0
int
cttyselect(__unused dev_t dev, int flag, void* wql, __unused proc_t p)
{
	vnode_t ttyvp = cttyvp(current_proc());
	struct vfs_context context;
	int error;

	context.vc_thread = current_thread();
	context.vc_ucred = NOCRED;

	if (ttyvp == NULL)
		return (1);	/* try operation to get EOF/failure */
	error = VNOP_SELECT(ttyvp, flag, FREAD|FWRITE, wql, &context);
	vnode_put(ttyvp);
	return (error);
}
Ejemplo n.º 27
0
int
osi_lookupname(char *aname, enum uio_seg seg, int followlink,
	       struct vnode **vpp) {
    vfs_context_t ctx;
    int code, flags;

    flags = 0;
    if (!followlink)
	flags |= VNODE_LOOKUP_NOFOLLOW;
    ctx=vfs_context_create(NULL);
    code = vnode_lookup(aname, flags, vpp, ctx);
    if (!code) { /* get a usecount */
	vnode_ref(*vpp);
	vnode_put(*vpp);
    }
    vfs_context_rele(ctx);
    return code;
}
Ejemplo n.º 28
0
int
cttywrite(__unused dev_t dev, struct uio *uio, int flag)
{
	vnode_t ttyvp = cttyvp(current_proc());
	struct vfs_context context;
	int error;

	if (ttyvp == NULL)
		return (EIO);

	context.vc_thread = current_thread();
	context.vc_ucred = NOCRED;

	error = VNOP_WRITE(ttyvp, uio, flag, &context);
	vnode_put(ttyvp);

	return (error);
}
Ejemplo n.º 29
0
Archivo: tty_tty.c Proyecto: 0xffea/xnu
int
cttyopen(__unused dev_t dev, int flag, __unused int mode, proc_t p)
{
	vnode_t ttyvp = cttyvp(p);
	struct vfs_context context;
	int error;

	if (ttyvp == NULL)
		return (ENXIO);

	context.vc_thread = current_thread();
	context.vc_ucred = kauth_cred_proc_ref(p);

	error = VNOP_OPEN(ttyvp, flag, &context);
	vnode_put(ttyvp);
	kauth_cred_unref(&context.vc_ucred);

	return (error);
}
Ejemplo n.º 30
0
static int
nullfs_read(struct vnop_read_args * ap)
{
	int error = EIO;

	struct vnode *vp, *lvp;

	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);

	if (nullfs_checkspecialvp(ap->a_vp)) {
		return ENOTSUP; /* the special vnodes can't be read */
	}

	vp  = ap->a_vp;
	lvp = NULLVPTOLOWERVP(vp);

	/*
	 * First some house keeping
	 */
	if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) {
		if (!vnode_isreg(lvp) && !vnode_islnk(lvp)) {
			error = EPERM;
			goto end;
		}

		if (uio_resid(ap->a_uio) == 0) {
			error = 0;
			goto end;
		}

		/*
		 * Now ask VM/UBC/VFS to do our bidding
		 */

		error = VNOP_READ(lvp, ap->a_uio, ap->a_ioflag, ap->a_context);
		if (error) {
			NULLFSDEBUG("VNOP_READ failed: %d\n", error);
		}
	end:
		vnode_put(lvp);
	}
	return error;
}