Exemplo n.º 1
0
static int
nullfs_getattr(struct vnop_getattr_args * args)
{
	int error;
	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp));
	NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp);

	lck_mtx_lock(&null_mp->nullm_lock);
	if (nullfs_isspecialvp(args->a_vp)) {
		error = nullfs_special_getattr(args);
		lck_mtx_unlock(&null_mp->nullm_lock);
		return error;
	}
	lck_mtx_unlock(&null_mp->nullm_lock);

	/* this will return a different inode for third than read dir will */
	struct vnode * lowervp = NULLVPTOLOWERVP(args->a_vp);

	error = vnode_getwithref(lowervp);
	if (error == 0) {
		error = VNOP_GETATTR(lowervp, args->a_vap, args->a_context);
		vnode_put(lowervp);

		if (error == 0) {
			/* fix up fsid so it doesn't say the underlying fs*/
			VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]);
		}
	}

	return error;
}
Exemplo n.º 2
0
static int
nullfs_readdir(struct vnop_readdir_args * ap)
{
	struct vnode *vp, *lvp;
	int error;
	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));

	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);
	/* assumption is that any vp that comes through here had to go through lookup
	 */

	lck_mtx_lock(&null_mp->nullm_lock);
	if (nullfs_isspecialvp(ap->a_vp)) {
		error = nullfs_special_readdir(ap);
		lck_mtx_unlock(&null_mp->nullm_lock);
		return error;
	}
	lck_mtx_unlock(&null_mp->nullm_lock);

	vp    = ap->a_vp;
	lvp   = NULLVPTOLOWERVP(vp);
	error = vnode_getwithref(lvp);
	if (error == 0) {
		error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context);
		vnode_put(lvp);
	}

	return error;
}
Exemplo n.º 3
0
int
fuse_vncache_lookup(vnode_t dvp, vnode_t *vpp, struct componentname *cnp)
{
#if M_OSXFUSE_ENABLE_BIG_LOCK
    /*
     * Make sure that biglock is actually held by the thread calling us before
     * trying to unlock it. fuse_vncache_lookup is called by notification
     * handlers that do not hold biglock. Trying to unlock it in this case would
     * result in a kernel panic.
     */

    struct fuse_data *data = fuse_get_mpdata(vnode_mount(dvp));
    bool biglock_locked = fuse_biglock_have_lock(data->biglock);

    if (biglock_locked) {
        fuse_biglock_unlock(data->biglock);
    }
#endif /* M_OSXFUSE_ENABLE_BIG_LOCK */
    int ret = cache_lookup(dvp, vpp, cnp);
#if M_OSXFUSE_ENABLE_BIG_LOCK
    if (biglock_locked) {
        fuse_biglock_lock(data->biglock);
    }
#endif

#if FUSE_TRACE_VNCACHE
    IOLog("osxfuse: cache lookup ret=%d, dvp=%p, *vpp=%p, %s\n",
          ret, dvp, *vpp, cnp->cn_nameptr);
#endif
    return ret;
}
Exemplo n.º 4
0
/* get lvp's parent, if possible, even if it isn't set.

   lvp is expected to have an iocount before and after this call.

   if a dvpp is populated the returned vnode has an iocount. */
static int
null_get_lowerparent(vnode_t lvp, vnode_t * dvpp, vfs_context_t ctx)
{
	int error = 0;
	struct vnode_attr va;
	mount_t mp  = vnode_mount(lvp);
	vnode_t dvp = vnode_parent(lvp);

	if (dvp) {
		error = vnode_get(dvp);
		goto end;
	}

	error = ENOENT;
	if (!(mp->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
		goto end;
	}

	VATTR_INIT(&va);
	VATTR_WANTED(&va, va_parentid);

	error = vnode_getattr(lvp, &va, ctx);

	if (error || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
		goto end;
	}

	error = VFS_VGET(mp, (ino64_t)va.va_parentid, &dvp, ctx);

end:
	if (error == 0) {
		*dvpp = dvp;
	}
	return error;
}
Exemplo n.º 5
0
/**
 * Wrapper function for vboxvfs_guest_path_to_shflstring_path_internal() which
 * converts guest path to host path using vnode object information.
 *
 * @param vnode     Guest's VFS object
 * @param ppResult  Allocated  PSHFLSTRING object which contain a path
 *
 * @return 0 on success, error code otherwise.
 */
int
vboxvfs_guest_vnode_to_shflstring_path_internal(vnode_t vnode, PSHFLSTRING *ppResult)
{
    mount_t     mp;
    int         rc;

    char       *pszPath;
    int         cbPath = MAXPATHLEN;

    AssertReturn(ppResult, EINVAL);
    AssertReturn(vnode,    EINVAL);
    mp = vnode_mount(vnode);
    AssertReturn(mp, EINVAL);

    pszPath = (char *)RTMemAllocZ(cbPath);
    if (pszPath)
    {
        rc = vn_getpath(vnode, pszPath, &cbPath);
        if (rc == 0)
        {
            return vboxvfs_guest_path_to_shflstring_path_internal(mp, pszPath, cbPath, ppResult);
        }
    }
    else
    {
        rc = ENOMEM;
    }

    return rc;
}
Exemplo n.º 6
0
/**
 * Wrapper function for vboxvfs_guest_path_to_char_path_internal() which
 * converts guest path to host path using vnode object information.
 *
 * @param vnode         Guest's VFS object
 * @param ppHostPath    Allocated  char * which contain a path
 * @param pcbPath       Size of ppPath
 *
 * @return 0 on success, error code otherwise.
 */
int
vboxvfs_guest_vnode_to_char_path_internal(vnode_t vnode, char **ppHostPath, int *pcbHostPath)
{
    mount_t     mp;
    int         rc;

    char       *pszPath;
    int         cbPath = MAXPATHLEN;

    AssertReturn(ppHostPath,   EINVAL);
    AssertReturn(pcbHostPath,  EINVAL);
    AssertReturn(vnode,    EINVAL);
    mp = vnode_mount(vnode);
    AssertReturn(mp, EINVAL);

    pszPath = (char *)RTMemAllocZ(cbPath);
    if (pszPath)
    {
        rc = vn_getpath(vnode, pszPath, &cbPath);
        if (rc == 0)
        {
            return vboxvfs_guest_path_to_char_path_internal(mp, pszPath, cbPath, ppHostPath, pcbHostPath);
        }
    }
    else
    {
        rc = ENOMEM;
    }

    return rc;
}
Exemplo n.º 7
0
int
fuse_internal_newentry_core(struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp,
    enum vtype vtyp,
    struct fuse_dispatcher *fdip)
{
	int err = 0;
	struct fuse_entry_out *feo;
	struct mount *mp = vnode_mount(dvp);

	if ((err = fdisp_wait_answ(fdip))) {
		return err;
	}
	feo = fdip->answ;

	if ((err = fuse_internal_checkentry(feo, vtyp))) {
		return err;
	}
	err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, vtyp);
	if (err) {
		fuse_internal_forget_send(mp, cnp->cn_thread, cnp->cn_cred,
		    feo->nodeid, 1);
		return err;
	}
	cache_attrs(*vpp, feo);

	return err;
}
Exemplo n.º 8
0
static int
vboxvfs_vnode_close(struct vnop_close_args *args)
{
    vnode_t          vnode;
    mount_t          mp;
    vboxvfs_vnode_t *pVnodeData;
    vboxvfs_mount_t *pMount;

    int rc;

    PDEBUG("Closing vnode...");

    AssertReturn(args, EINVAL);

    vnode           = args->a_vp;                              AssertReturn(vnode,      EINVAL);
    pVnodeData      = (vboxvfs_vnode_t *)vnode_fsnode(vnode);  AssertReturn(pVnodeData, EINVAL);
    mp              = vnode_mount(vnode);                      AssertReturn(mp,         EINVAL);
    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);             AssertReturn(pMount,     EINVAL);

    lck_rw_lock_exclusive(pVnodeData->pLock);

    if (vnode_isinuse(vnode, 0))
    {
        PDEBUG("vnode '%s' (handle 0x%X) is still in use, just return ok",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle);

        lck_rw_unlock_exclusive(pVnodeData->pLock);
        return 0;
    }

    /* At this point we must make sure that vnode has VBoxVFS object handle assigned */
    if (pVnodeData->pHandle == SHFL_HANDLE_NIL)
    {
        PDEBUG("vnode has no active VBoxVFS object handle set, aborting");
        lck_rw_unlock_exclusive(pVnodeData->pLock);
        return EINVAL;
    }

    rc = vboxvfs_close_internal(pMount, pVnodeData->pHandle);
    if (rc == 0)
    {
        PDEBUG("Close success: '%s' (handle 0x%X)",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle);

        /* Forget about previously assigned VBoxVFS object handle */
        pVnodeData->pHandle = SHFL_HANDLE_NIL;
    }
    else
    {
        PDEBUG("Unable to close: '%s' (handle 0x%X): %d",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle, rc);
    }

    lck_rw_unlock_exclusive(pVnodeData->pLock);

    return rc;
}
Exemplo n.º 9
0
/* Initialize the cache operations. Called while initializing cache files. */
void
afs_InitDualFSCacheOps(struct vnode *vp)
{
    int code;
    static int inited = 0;
#ifdef AFS_DARWIN80_ENV
    char *buffer = (char*)_MALLOC(MFSNAMELEN, M_TEMP, M_WAITOK);
#endif

    if (inited)
	return;
    inited = 1;

    if (vp == NULL)
	return;
#ifdef AFS_DARWIN80_ENV
    vfs_name(vnode_mount(vp), buffer);
    if (strncmp("hfs", buffer, 3) == 0)
#else
    if (strncmp("hfs", vp->v_mount->mnt_vfc->vfc_name, 3) == 0)
#endif
	afs_CacheFSType = AFS_APPL_HFS_CACHE;
#ifdef AFS_DARWIN80_ENV
    else if (strncmp("ufs", buffer, 3) == 0)
#else
    else if (strncmp("ufs", vp->v_mount->mnt_vfc->vfc_name, 3) == 0)
#endif
	afs_CacheFSType = AFS_APPL_UFS_CACHE;
    else
	osi_Panic("Unknown cache vnode type\n");
#ifdef AFS_DARWIN80_ENV
    _FREE(buffer, M_TEMP);
#endif
}
Exemplo n.º 10
0
__private_extern__
int
fuse_internal_newentry(vnode_t               dvp,
                       vnode_t              *vpp,
                       struct componentname *cnp,
                       enum fuse_opcode      op,
                       void                 *buf,
                       size_t                bufsize,
                       enum vtype            vtype,
                       vfs_context_t         context)
{   
    int err;
    struct fuse_dispatcher fdi;
    mount_t mp = vnode_mount(dvp);
    
    if (fuse_skip_apple_double_mp(mp, cnp->cn_nameptr, cnp->cn_namelen)) {
        return EACCES;
    }
    
    fdisp_init(&fdi, 0);
    fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf,
                                       bufsize, &fdi, context);
    err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi, context);
    fuse_invalidate_attr(dvp);            
                   
    return err;  
}         
Exemplo n.º 11
0
/*
    struct vnop_fsync_args {
	struct vnodeop_desc *a_desc;
	struct vnode * a_vp;
	struct ucred * a_cred;
	int  a_waitfor;
	struct thread * a_td;
    };
*/
static int
fuse_vnop_fsync(struct vop_fsync_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct thread *td = ap->a_td;

	struct fuse_filehandle *fufh;
	struct fuse_vnode_data *fvdat = VTOFUD(vp);

	int type, err = 0;

	fuse_trace_printf_vnop();

	if (fuse_isdeadfs(vp)) {
		return 0;
	}
	if ((err = vop_stdfsync(ap)))
		return err;

	if (!fsess_isimpl(vnode_mount(vp),
	    (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) {
		goto out;
	}
	for (type = 0; type < FUFH_MAXTYPE; type++) {
		fufh = &(fvdat->fufh[type]);
		if (FUFH_IS_VALID(fufh)) {
			fuse_internal_fsync(vp, td, NULL, fufh);
		}
	}

out:
	return 0;
}
Exemplo n.º 12
0
int
fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type,
                    fuse_op_waitfor_t waitfor)
{
    struct fuse_data       *data;
    struct fuse_dispatcher  fdi;
    struct fuse_abi_data    fri;
    struct fuse_vnode_data *fvdat = VTOFUD(vp);
    struct fuse_filehandle *fufh  = NULL;

    int err   = 0;
    int op    = FUSE_RELEASE;

    fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n",
                      vp, fufh_type);

    fufh = &(fvdat->fufh[fufh_type]);

    if (FUFH_IS_VALID(fufh)) {
        panic("osxfuse: filehandle_put called on a valid fufh (type=%d)",
              fufh_type);
        /* NOTREACHED */
    }

    if (fuse_isdeadfs(vp)) {
        goto out;
    }

    data = fuse_get_mpdata(vnode_mount(vp));

    if (vnode_isdir(vp)) {
        op = FUSE_RELEASEDIR;
    }

    fdisp_init_abi(&fdi, fuse_release_in, data);
    fdisp_make_vp(&fdi, op, vp, context);
    fuse_abi_data_init(&fri, DATOI(data), fdi.indata);

    fuse_release_in_set_fh(&fri, fufh->fh_id);
    fuse_release_in_set_flags(&fri, fufh->open_flags);

    if (waitfor == FUSE_OP_FOREGROUNDED) {
        err = fdisp_wait_answ(&fdi);
        if (err) {
            goto out;
        }
    } else {
        fuse_insert_message(fdi.tick);
    }

    fuse_ticket_release(fdi.tick);

out:
    FUSE_OSAddAtomic(-1, (SInt32 *)&fuse_fh_current);
    fuse_invalidate_attr(vp);

    return err;
}
Exemplo n.º 13
0
__private_extern__
int
fuse_internal_remove(vnode_t               dvp,
                     vnode_t               vp,
                     struct componentname *cnp,
                     enum fuse_opcode      op,
                     vfs_context_t         context)
{
    struct fuse_dispatcher fdi;

    struct vnode_attr *vap = VTOVA(vp);
    int need_invalidate = 0;
    uint64_t target_nlink = 0;
    mount_t mp = vnode_mount(vp);

    int err = 0;

    fdisp_init(&fdi, cnp->cn_namelen + 1);
    fdisp_make_vp(&fdi, op, dvp, context);

    memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
    ((char *)fdi.indata)[cnp->cn_namelen] = '\0';

    if ((vap->va_nlink > 1) && vnode_isreg(vp)) {
        need_invalidate = 1;
        target_nlink = vap->va_nlink;
    }

    if (!(err = fdisp_wait_answ(&fdi))) {
        fuse_ticket_drop(fdi.tick);
    }

    fuse_invalidate_attr(dvp);
    fuse_invalidate_attr(vp);

    /*
     * XXX: M_MACFUSE_INVALIDATE_CACHED_VATTRS_UPON_UNLINK
     *
     * Consider the case where vap->va_nlink > 1 for the entity being
     * removed. In our world, other in-memory vnodes that share a link
     * count each with this one may not know right way that this one just
     * got deleted. We should let them know, say, through a vnode_iterate()
     * here and a callback that does fuse_invalidate_attr(vp) on each
     * relevant vnode.
     */
    if (need_invalidate && !err) {
        if (!vfs_busy(mp, LK_NOWAIT)) {
            vnode_iterate(mp, 0, fuse_internal_remove_callback,
                          (void *)&target_nlink);
            vfs_unbusy(mp);
        } else {
            IOLog("MacFUSE: skipping link count fixup upon remove\n");
        }
    }

    return err;
}
Exemplo n.º 14
0
int
fuse_internal_remove(struct vnode *dvp,
    struct vnode *vp,
    struct componentname *cnp,
    enum fuse_opcode op)
{
	struct fuse_dispatcher fdi;

	struct vattr *vap = VTOVA(vp);

#if INVALIDATE_CACHED_VATTRS_UPON_UNLINK
	int need_invalidate = 0;
	uint64_t target_nlink = 0;

#endif
	int err = 0;

	debug_printf("dvp=%p, cnp=%p, op=%d\n", vp, cnp, op);

	fdisp_init(&fdi, cnp->cn_namelen + 1);
	fdisp_make_vp(&fdi, op, dvp, cnp->cn_thread, cnp->cn_cred);

	memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
	((char *)fdi.indata)[cnp->cn_namelen] = '\0';

#if INVALIDATE_CACHED_VATTRS_UPON_UNLINK
	if (vap->va_nlink > 1) {
		need_invalidate = 1;
		target_nlink = vap->va_nlink;
	}
#endif

	err = fdisp_wait_answ(&fdi);
	fdisp_destroy(&fdi);

	fuse_invalidate_attr(dvp);
	fuse_invalidate_attr(vp);

#ifdef XXXIP
	/*
         * XXX: INVALIDATE_CACHED_VATTRS_UPON_UNLINK
         *
         * Consider the case where vap->va_nlink > 1 for the entity being
         * removed. In our world, other in-memory vnodes that share a link
         * count each with this one may not know right way that this one just
         * got deleted. We should let them know, say, through a vnode_iterate()
         * here and a callback that does fuse_invalidate_attr(vp) on each
         * relevant vnode.
         */
	if (need_invalidate && !err) {
		vnode_iterate(vnode_mount(vp), 0, fuse_internal_remove_callback,
		    (void *)&target_nlink);
	}
#endif

	return err;
}
Exemplo n.º 15
0
/*
    struct vnop_link_args {
	struct vnode *a_tdvp;
	struct vnode *a_vp;
	struct componentname *a_cnp;
    };
*/
static int
fuse_vnop_link(struct vop_link_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct vnode *tdvp = ap->a_tdvp;
	struct componentname *cnp = ap->a_cnp;

	struct vattr *vap = VTOVA(vp);

	struct fuse_dispatcher fdi;
	struct fuse_entry_out *feo;
	struct fuse_link_in fli;

	int err;

	fuse_trace_printf_vnop();

	if (fuse_isdeadfs(vp)) {
		return ENXIO;
	}
	if (vnode_mount(tdvp) != vnode_mount(vp)) {
		return EXDEV;
	}
	if (vap->va_nlink >= FUSE_LINK_MAX) {
		return EMLINK;
	}
	fli.oldnodeid = VTOI(vp);

	fdisp_init(&fdi, 0);
	fuse_internal_newentry_makerequest(vnode_mount(tdvp), VTOI(tdvp), cnp,
	    FUSE_LINK, &fli, sizeof(fli), &fdi);
	if ((err = fdisp_wait_answ(&fdi))) {
		goto out;
	}
	feo = fdi.answ;

	err = fuse_internal_checkentry(feo, vnode_vtype(vp));
	fuse_invalidate_attr(tdvp);
	fuse_invalidate_attr(vp);

out:
	fdisp_destroy(&fdi);
	return err;
}
Exemplo n.º 16
0
static int
fuse_sync_callback(vnode_t vp, void *cargs)
{
    int type;
    struct fuse_sync_cargs *args;
    struct fuse_vnode_data *fvdat;
    struct fuse_filehandle *fufh;
    struct fuse_data       *data;
    mount_t mp;

    if (!vnode_hasdirtyblks(vp)) {
        return VNODE_RETURNED;
    }

    mp = vnode_mount(vp);

    if (fuse_isdeadfs_mp(mp)) {
        return VNODE_RETURNED_DONE;
    }

    data = fuse_get_mpdata(mp);

    if (!fuse_implemented(data, (vnode_isdir(vp)) ?
        FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) {
        return VNODE_RETURNED;
    }

    args = (struct fuse_sync_cargs *)cargs;
    fvdat = VTOFUD(vp);

#if M_OSXFUSE_ENABLE_BIG_LOCK
    fuse_biglock_unlock(data->biglock);
#endif
    cluster_push(vp, 0);
#if M_OSXFUSE_ENABLE_BIG_LOCK
    fuse_biglock_lock(data->biglock);
#endif

    for (type = 0; type < FUFH_MAXTYPE; type++) {
        fufh = &(fvdat->fufh[type]);
        if (FUFH_IS_VALID(fufh)) {
            (void)fuse_internal_fsync_fh(vp, args->context, fufh,
                                         FUSE_OP_FOREGROUNDED);
        }
    }

    /*
     * In general:
     *
     * - can use vnode_isinuse() if the need be
     * - vnode and UBC are in lock-step
     * - note that umount will call ubc_sync_range()
     */

    return VNODE_RETURNED;
}
Exemplo n.º 17
0
int
fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag,
    struct ucred *cred)
{
	struct fuse_filehandle *fufh;
	int err, directio;

	MPASS(vp->v_type == VREG || vp->v_type == VDIR);

	err = fuse_filehandle_getrw(vp,
	    (uio->uio_rw == UIO_READ) ? FUFH_RDONLY : FUFH_WRONLY, &fufh);
	if (err) {
		printf("FUSE: io dispatch: filehandles are closed\n");
		return err;
	}
	/*
         * Ideally, when the daemon asks for direct io at open time, the
         * standard file flag should be set according to this, so that would
         * just change the default mode, which later on could be changed via
         * fcntl(2).
         * But this doesn't work, the O_DIRECT flag gets cleared at some point
         * (don't know where). So to make any use of the Fuse direct_io option,
         * we hardwire it into the file's private data (similarly to Linux,
         * btw.).
         */
	directio = (ioflag & IO_DIRECT) || !fsess_opt_datacache(vnode_mount(vp));

	switch (uio->uio_rw) {
	case UIO_READ:
		if (directio) {
			FS_DEBUG("direct read of vnode %ju via file handle %ju\n",
			    (uintmax_t)VTOILLU(vp), (uintmax_t)fufh->fh_id);
			err = fuse_read_directbackend(vp, uio, cred, fufh);
		} else {
			FS_DEBUG("buffered read of vnode %ju\n", 
			      (uintmax_t)VTOILLU(vp));
			err = fuse_read_biobackend(vp, uio, cred, fufh);
		}
		break;
	case UIO_WRITE:
		if (directio) {
			FS_DEBUG("direct write of vnode %ju via file handle %ju\n",
			    (uintmax_t)VTOILLU(vp), (uintmax_t)fufh->fh_id);
			err = fuse_write_directbackend(vp, uio, cred, fufh, ioflag);
		} else {
			FS_DEBUG("buffered write of vnode %ju\n", 
			      (uintmax_t)VTOILLU(vp));
			err = fuse_write_biobackend(vp, uio, cred, fufh, ioflag);
		}
		break;
	default:
		panic("uninterpreted mode passed to fuse_io_dispatch");
	}

	return (err);
}
Exemplo n.º 18
0
__private_extern__
int
fuse_internal_readdir(vnode_t                 vp,
                      uio_t                   uio,
                      vfs_context_t           context,
                      struct fuse_filehandle *fufh,
                      struct fuse_iov        *cookediov,
                      int                    *numdirent)
{
    int err = 0;
    struct fuse_dispatcher fdi;
    struct fuse_read_in   *fri;
    struct fuse_data      *data;

    if (uio_resid(uio) == 0) {
        return 0;
    }

    fdisp_init(&fdi, 0);

    /* Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p I/O). */

    while (uio_resid(uio) > 0) {

        fdi.iosize = sizeof(*fri);
        fdisp_make_vp(&fdi, FUSE_READDIR, vp, context);

        fri = fdi.indata;
        fri->fh = fufh->fh_id;
        fri->offset = uio_offset(uio);
        data = fuse_get_mpdata(vnode_mount(vp));
        fri->size = (typeof(fri->size))min((size_t)uio_resid(uio), data->iosize);

        if ((err = fdisp_wait_answ(&fdi))) {
            goto out;
        }

        if ((err = fuse_internal_readdir_processdata(vp,
                                                     uio,
                                                     fri->size,
                                                     fdi.answ,
                                                     fdi.iosize,
                                                     cookediov,
                                                     numdirent))) {
            break;
        }
    }

/* done: */

    fuse_ticket_drop(fdi.tick);

out:
    return ((err == -1) ? 0 : err);
}
Exemplo n.º 19
0
static int
vnop_strategy_9p(struct vnop_strategy_args *ap)
{
	mount_t mp;
	struct buf *bp;
	node_9p *np;
	caddr_t addr;
	uio_t uio;
	int e, flags;

	TRACE();
	bp = ap->a_bp;
	np = NTO9P(buf_vnode(bp));
	flags = buf_flags(bp);
	uio = NULL;
	addr = NULL;

	mp = vnode_mount(buf_vnode(bp));
	if (mp == NULL)
		return ENXIO;

	if ((e=buf_map(bp, &addr)))
		goto error;

	uio = uio_create(1, buf_blkno(bp) * vfs_statfs(mp)->f_bsize, UIO_SYSSPACE,
					 ISSET(flags, B_READ)? UIO_READ: UIO_WRITE);
	if (uio == NULL) {
		e = ENOMEM;
		goto error;
	}
	
	uio_addiov(uio, CAST_USER_ADDR_T(addr), buf_count(bp));
	if (ISSET(flags, B_READ)) {
		if((e=nread_9p(np, uio)))
			goto error;
		/* zero the rest of the page if we reached EOF */
		if (uio_resid(uio) > 0) {
			bzero(addr+buf_count(bp)-uio_resid(uio), uio_resid(uio));
			uio_update(uio, uio_resid(uio));
		}
	} else {
		if ((e=nwrite_9p(np, uio)))
			goto error;
	}
	buf_setresid(bp, uio_resid(uio));
error:
	if (uio)
		uio_free(uio);
	if (addr)
		buf_unmap(bp);
	buf_seterror(bp, e);
	buf_biodone(bp);
	return e;
}
Exemplo n.º 20
0
int
fuse_vnode_savesize(struct vnode *vp, struct ucred *cred)
{
    struct fuse_vnode_data *fvdat = VTOFUD(vp);
    struct thread *td = curthread;
    struct fuse_filehandle *fufh = NULL;
    struct fuse_dispatcher  fdi;
    struct fuse_setattr_in *fsai;
    int err = 0;

    DEBUG("inode=%jd size=%jd\n", VTOI(vp), fvdat->filesize);
    ASSERT_VOP_ELOCKED(vp, "fuse_io_extend");

    if (fuse_isdeadfs(vp)) {
        return EBADF;
    }

    if (vnode_vtype(vp) == VDIR) {
        return EISDIR;
    }

    if (vfs_isrdonly(vnode_mount(vp))) {
        return EROFS;
    }

    if (cred == NULL) {
        cred = td->td_ucred;
    }

    fdisp_init(&fdi, sizeof(*fsai));
    fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
    fsai = fdi.indata;
    fsai->valid = 0;

    // Truncate to a new value.
    fsai->size = fvdat->filesize;
    fsai->valid |= FATTR_SIZE;

    fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh);
    if (fufh) {
        fsai->fh = fufh->fh_id;
        fsai->valid |= FATTR_FH;
    }

    err = fdisp_wait_answ(&fdi);
    fdisp_destroy(&fdi);
    if (err == 0)
        fvdat->flag &= ~FN_SIZECHANGE;

    fuse_invalidate_attr(vp);

    return err;
}
Exemplo n.º 21
0
static int
vnop_offtoblk_9p(struct vnop_offtoblk_args *ap)
{
	mount_t mp;
	
	TRACE();
	mp = vnode_mount(ap->a_vp);
	if (mp == NULL)
		return ENXIO;
	*ap->a_lblkno = ap->a_offset / vfs_statfs(mp)->f_bsize;
	return 0;
}
Exemplo n.º 22
0
void
fdisp_make_vp(struct fuse_dispatcher *fdip,
    enum fuse_opcode op,
    struct vnode *vp,
    struct thread *td,
    struct ucred *cred)
{
	debug_printf("fdip=%p, op=%d, vp=%p\n", fdip, op, vp);
	RECTIFY_TDCR(td, cred);
	return fdisp_make_pid(fdip, op, vnode_mount(vp), VTOI(vp),
	    td->td_proc->p_pid, cred);
}
Exemplo n.º 23
0
static int
null_reclaim(struct vnop_reclaim_args * ap)
{
	struct vnode * vp;
	struct null_node * xp;
	struct vnode * lowervp;
	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp));

	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);

	vp = ap->a_vp;

	xp      = VTONULL(vp);
	lowervp = xp->null_lowervp;

	lck_mtx_lock(&null_mp->nullm_lock);

	vnode_removefsref(vp);

	if (lowervp != NULL) {
		/* root and second don't have a lowervp, so nothing to release and nothing
		 * got hashed */
		if (xp->null_flags & NULL_FLAG_HASHED) {
			/* only call this if we actually made it into the hash list. reclaim gets
			   called also to
			   clean up a vnode that got created when it didn't need to under race
			   conditions */
			null_hashrem(xp);
		}
		vnode_getwithref(lowervp);
		vnode_rele(lowervp);
		vnode_put(lowervp);
	}

	if (vp == null_mp->nullm_rootvp) {
		null_mp->nullm_rootvp = NULL;
	} else if (vp == null_mp->nullm_secondvp) {
		null_mp->nullm_secondvp = NULL;
	} else if (vp == null_mp->nullm_thirdcovervp) {
		null_mp->nullm_thirdcovervp = NULL;
	}

	lck_mtx_unlock(&null_mp->nullm_lock);

	cache_purge(vp);
	vnode_clearfsnode(vp);

	FREE(xp, M_TEMP);

	return 0;
}
Exemplo n.º 24
0
static int
fuse_sync_callback(vnode_t vp, void *cargs)
{
    int type;
    struct fuse_sync_cargs *args;
    struct fuse_vnode_data *fvdat;
    struct fuse_dispatcher  fdi;
    struct fuse_filehandle *fufh;
    struct fuse_data       *data;
    mount_t mp;

    if (!vnode_hasdirtyblks(vp)) {
        return VNODE_RETURNED;
    }

    mp = vnode_mount(vp);

    if (fuse_isdeadfs(vp)) {
        return VNODE_RETURNED_DONE;
    }

    data = fuse_get_mpdata(mp);

    if (!fuse_implemented(data, (vnode_isdir(vp)) ?
        FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) {
        return VNODE_RETURNED;
    }

    args = (struct fuse_sync_cargs *)cargs;
    fvdat = VTOFUD(vp);

    cluster_push(vp, 0);

    fuse_dispatcher_init(&fdi, 0);
    for (type = 0; type < FUFH_MAXTYPE; type++) {
        fufh = &(fvdat->fufh[type]);
        if (FUFH_IS_VALID(fufh)) {
            (void)fuse_internal_fsync(vp, args->context, fufh, &fdi);
        }
    }

    /*
     * In general:
     *
     * - can use vnode_isinuse() if the need be
     * - vnode and UBC are in lock-step
     * - note that umount will call ubc_sync_range()
     */

    return VNODE_RETURNED;
}
Exemplo n.º 25
0
/* helper function to handle locking where possible */
static int
nullfs_checkspecialvp(struct vnode* vp)
{
	int result = 0;
	struct null_mount * null_mp;

	null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));

	lck_mtx_lock(&null_mp->nullm_lock);
	result = (nullfs_isspecialvp(vp));
	lck_mtx_unlock(&null_mp->nullm_lock);

	return result;
}
Exemplo n.º 26
0
/* the mountpoint lock should be held going into this function */
static int
nullfs_isspecialvp(struct vnode * vp)
{
	struct null_mount * null_mp;

	null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));

	/* only check for root and second here, third is special in a different way,
	 * related only to lookup and readdir */
	if (vp && (vp == null_mp->nullm_rootvp || vp == null_mp->nullm_secondvp)) {
		return 1;
	}
	return 0;
}
Exemplo n.º 27
0
/*
    struct vnop_readlink_args {
	struct vnode *a_vp;
	struct uio *a_uio;
	struct ucred *a_cred;
    };
*/
static int
fuse_vnop_readlink(struct vop_readlink_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct uio *uio = ap->a_uio;
	struct ucred *cred = ap->a_cred;

	struct fuse_dispatcher fdi;
	int err;

	FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));

	if (fuse_isdeadfs(vp)) {
		return ENXIO;
	}
	if (!vnode_islnk(vp)) {
		return EINVAL;
	}
	fdisp_init(&fdi, 0);
	err = fdisp_simple_putget_vp(&fdi, FUSE_READLINK, vp, curthread, cred);
	if (err) {
		goto out;
	}
	if (((char *)fdi.answ)[0] == '/' &&
	    fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_PUSH_SYMLINKS_IN) {
		char *mpth = vnode_mount(vp)->mnt_stat.f_mntonname;

		err = uiomove(mpth, strlen(mpth), uio);
	}
	if (!err) {
		err = uiomove(fdi.answ, fdi.iosize, uio);
	}
out:
	fdisp_destroy(&fdi);
	return err;
}
Exemplo n.º 28
0
static int
vnop_blockmap_9p(struct vnop_blockmap_args *ap)
{
	mount_t mp;
	
	TRACE();
	mp = vnode_mount(ap->a_vp);
	if (mp == NULL)
		return ENXIO;

	if (ap->a_run)
		*ap->a_run = ap->a_size;
	if (ap->a_bpn)
		*ap->a_bpn = ap->a_foffset / vfs_statfs(mp)->f_bsize;
	if (ap->a_poff)
		*(int32_t*)ap->a_poff = 0;
	return 0;
}
Exemplo n.º 29
0
/*
 * Act like null_hashget, but add passed null_node to hash if no existing
 * node found.
 */
static int
null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp)
{
	struct null_node_hashhead * hd;
	struct null_node * oxp;
	struct vnode * ovp;
	int error = 0;

	hd = NULL_NHASH(xp->null_lowervp);
	lck_mtx_lock(&null_hashmtx);
	LIST_FOREACH(oxp, hd, null_hash)
	{
		if (oxp->null_lowervp == xp->null_lowervp && vnode_mount(NULLTOV(oxp)) == mp) {
			/*
			 * See null_hashget for a description of this
			 * operation.
			 */
			ovp = NULLTOV(oxp);
			if (oxp->null_lowervid != vnode_vid(oxp->null_lowervp)) {
				/*vp doesn't exist so return null (not sure we are actually gonna catch
				 recycle right now
				 This is an exceptional case right now, it suggests the vnode we are
				 trying to add has been recycled
				 don't add it.*/
				error = EIO;
				goto end;
			}
			/* if we found something in the hash map then grab an iocount */
			error = vnode_getwithvid(ovp, oxp->null_myvid);
			if (error == 0) {
				*vpp = ovp;
			}
			goto end;
		}
	}
	/* if it wasn't in the hash map then the vnode pointed to by xp already has a
	 * iocount so don't bother */
	LIST_INSERT_HEAD(hd, xp, null_hash);
	xp->null_flags |= NULL_FLAG_HASHED;
end:
	lck_mtx_unlock(&null_hashmtx);
	return error;
}
Exemplo n.º 30
0
__inline__
int
fuse_vncache_lookup(vnode_t dvp, vnode_t *vpp, struct componentname *cnp)
{
#if M_OSXFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_OSXFUSE_ENABLE_HUGE_LOCK
    struct fuse_data *data = fuse_get_mpdata(vnode_mount(dvp));
    fuse_biglock_unlock(data->biglock);
#endif
    int ret = cache_lookup(dvp, vpp, cnp);
#if M_OSXFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_OSXFUSE_ENABLE_HUGE_LOCK
    fuse_biglock_lock(data->biglock);
#endif

#if FUSE_TRACE_VNCACHE
    IOLog("OSXFUSE: cache lookup ret=%d, dvp=%p, *vpp=%p, %s\n",
          ret, dvp, *vpp, cnp->cn_nameptr);
#endif
    return ret;
}