void fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td) { /* * Funcation is called for every vnode open. * Merge fuse_open_flags it may be 0 * * XXXIP: Handle FOPEN_KEEP_CACHE */ /* * Ideally speaking, direct io should be enabled on * fd's but do not see of any way of providing that * this implementation. * Also cannot think of a reason why would two * different fd's on same vnode would like * have DIRECT_IO turned on and off. But linux * based implementation works on an fd not an * inode and provides such a feature. * * XXXIP: Handle fd based DIRECT_IO */ if (fuse_open_flags & FOPEN_DIRECT_IO) { VTOFUD(vp)->flag |= FN_DIRECTIO; } else { VTOFUD(vp)->flag &= ~FN_DIRECTIO; } if (vnode_vtype(vp) == VREG) { /* XXXIP prevent getattr, by using cached node size */ vnode_create_vobject(vp, 0, td); } }
void fuse_vnode_refreshsize(struct vnode *vp, struct ucred *cred) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct vattr va; if ((fvdat->flag & FN_SIZECHANGE) != 0 || (fuse_refresh_size == 0 && fvdat->filesize != 0)) return; VOP_GETATTR(vp, &va, cred); FS_DEBUG("refreshed file size: %jd\n", (intmax_t)VTOFUD(vp)->filesize); }
static int fuse_internal_print_vnodes_callback(vnode_t vp, __unused void *cargs) { const char *vname = NULL; struct fuse_vnode_data *fvdat = VTOFUD(vp); #if M_MACFUSE_ENABLE_UNSUPPORTED vname = vnode_getname(vp); #endif /* M_MACFUSE_ENABLE_UNSUPPORTED */ if (vname) { IOLog("MacFUSE: vp=%p ino=%lld parent=%lld inuse=%d %s\n", vp, fvdat->nodeid, fvdat->parent_nodeid, vnode_isinuse(vp, 0), vname); } else { if (fvdat->nodeid == FUSE_ROOT_ID) { IOLog("MacFUSE: vp=%p ino=%lld parent=%lld inuse=%d /\n", vp, fvdat->nodeid, fvdat->parent_nodeid, vnode_isinuse(vp, 0)); } else { IOLog("MacFUSE: vp=%p ino=%lld parent=%lld inuse=%d\n", vp, fvdat->nodeid, fvdat->parent_nodeid, vnode_isinuse(vp, 0)); } } #if M_MACFUSE_ENABLE_UNSUPPORTED if (vname) { vnode_putname(vname); } #endif /* M_MACFUSE_ENABLE_UNSUPPORTED */ return VNODE_RETURNED; }
int fuse_vnode_get(struct mount *mp, uint64_t nodeid, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp) { struct thread *td = (cnp != NULL ? cnp->cn_thread : curthread); int err = 0; debug_printf("dvp=%p\n", dvp); err = fuse_vnode_alloc(mp, td, nodeid, vtyp, vpp); if (err) { return err; } if (dvp != NULL) { MPASS((cnp->cn_flags & ISDOTDOT) == 0); MPASS(!(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.')); fuse_vnode_setparent(*vpp, dvp); } if (dvp != NULL && cnp != NULL && (cnp->cn_flags & MAKEENTRY) != 0) { ASSERT_VOP_LOCKED(*vpp, "fuse_vnode_get"); ASSERT_VOP_LOCKED(dvp, "fuse_vnode_get"); cache_enter(dvp, *vpp, cnp); } VTOFUD(*vpp)->nlookup++; return 0; }
/* struct vnop_fsync_args { struct vnodeop_desc *a_desc; struct vnode * a_vp; struct ucred * a_cred; int a_waitfor; struct thread * a_td; }; */ static int fuse_vnop_fsync(struct vop_fsync_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = ap->a_td; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); int type, err = 0; fuse_trace_printf_vnop(); if (fuse_isdeadfs(vp)) { return 0; } if ((err = vop_stdfsync(ap))) return err; if (!fsess_isimpl(vnode_mount(vp), (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { goto out; } for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { fuse_internal_fsync(vp, td, NULL, fufh); } } out: return 0; }
int fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type, fuse_op_waitfor_t waitfor) { struct fuse_data *data; struct fuse_dispatcher fdi; struct fuse_abi_data fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int err = 0; int op = FUSE_RELEASE; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("osxfuse: filehandle_put called on a valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } data = fuse_get_mpdata(vnode_mount(vp)); if (vnode_isdir(vp)) { op = FUSE_RELEASEDIR; } fdisp_init_abi(&fdi, fuse_release_in, data); fdisp_make_vp(&fdi, op, vp, context); fuse_abi_data_init(&fri, DATOI(data), fdi.indata); fuse_release_in_set_fh(&fri, fufh->fh_id); fuse_release_in_set_flags(&fri, fufh->open_flags); if (waitfor == FUSE_OP_FOREGROUNDED) { err = fdisp_wait_answ(&fdi); if (err) { goto out; } } else { fuse_insert_message(fdi.tick); } fuse_ticket_release(fdi.tick); out: FUSE_OSAddAtomic(-1, (SInt32 *)&fuse_fh_current); fuse_invalidate_attr(vp); return err; }
int fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type) { struct fuse_dispatcher fdi; struct fuse_release_in *fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int err = 0; int isdir = 0; int op = FUSE_RELEASE; const bool wait_for_completion = true; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("fuse4x: filehandle_put called on a valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } if (vnode_isdir(vp)) { op = FUSE_RELEASEDIR; isdir = 1; } fdisp_init(&fdi, sizeof(*fri)); fdisp_make_vp(&fdi, op, vp, context); fri = fdi.indata; fri->fh = fufh->fh_id; fri->flags = fufh->open_flags; if (wait_for_completion) { if ((err = fdisp_wait_answ(&fdi))) { goto out; } else { fuse_ticket_drop(fdi.tick); } } else { fuse_insert_callback(fdi.tick, NULL); fuse_insert_message(fdi.tick); } out: OSAddAtomic(-1, (SInt32 *)&fuse_fh_current); fuse_invalidate_attr(vp); return err; }
int fuse_isvalid_attr(struct vnode *vp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct timespec uptsp; nanouptime(&uptsp); return fuse_timespec_cmp(&uptsp, &fvdat->cached_attrs_valid, <=); }
int fuse_filehandle_valid(struct vnode *vp, fufh_type_t fufh_type) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh; fufh = &(fvdat->fufh[fufh_type]); return FUFH_IS_VALID(fufh); }
void fuse_internal_vnode_disappear(struct vnode *vp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear"); fvdat->flag |= FN_REVOKED; cache_purge(vp); }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs_mp(mp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif cluster_push(vp, 0); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync_fh(vp, args->context, fufh, FUSE_OP_FOREGROUNDED); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }
/* * Flush and invalidate all dirty buffers. If another process is already * doing the flush, just wait for completion. */ int fuse_io_invalbuf(struct vnode *vp, struct thread *td) { struct fuse_vnode_data *fvdat = VTOFUD(vp); int error = 0; if (vp->v_iflag & VI_DOOMED) return 0; ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf"); while (fvdat->flag & FN_FLUSHINPROG) { struct proc *p = td->td_proc; if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) return EIO; fvdat->flag |= FN_FLUSHWANT; tsleep(&fvdat->flag, PRIBIO + 2, "fusevinv", 2 * hz); error = 0; if (p != NULL) { PROC_LOCK(p); if (SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist)) error = EINTR; PROC_UNLOCK(p); } if (error == EINTR) return EINTR; } fvdat->flag |= FN_FLUSHINPROG; if (vp->v_bufobj.bo_object != NULL) { VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); } error = vinvalbuf(vp, V_SAVE, PCATCH, 0); while (error) { if (error == ERESTART || error == EINTR) { fvdat->flag &= ~FN_FLUSHINPROG; if (fvdat->flag & FN_FLUSHWANT) { fvdat->flag &= ~FN_FLUSHWANT; wakeup(&fvdat->flag); } return EINTR; } error = vinvalbuf(vp, V_SAVE, PCATCH, 0); } fvdat->flag &= ~FN_FLUSHINPROG; if (fvdat->flag & FN_FLUSHWANT) { fvdat->flag &= ~FN_FLUSHWANT; wakeup(&fvdat->flag); } return (error); }
/* struct vnop_print_args { struct vnode *a_vp; }; */ static int fuse_vnop_print(struct vop_print_args *ap) { struct fuse_vnode_data *fvdat = VTOFUD(ap->a_vp); printf("nodeid: %ju, parent nodeid: %ju, nlookup: %ju, flag: %#x\n", (uintmax_t)VTOILLU(ap->a_vp), (uintmax_t)fvdat->parent_nid, (uintmax_t)fvdat->nlookup, fvdat->flag); return 0; }
int fuse_vnode_savesize(struct vnode *vp, struct ucred *cred) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct thread *td = curthread; struct fuse_filehandle *fufh = NULL; struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; int err = 0; DEBUG("inode=%jd size=%jd\n", VTOI(vp), fvdat->filesize); ASSERT_VOP_ELOCKED(vp, "fuse_io_extend"); if (fuse_isdeadfs(vp)) { return EBADF; } if (vnode_vtype(vp) == VDIR) { return EISDIR; } if (vfs_isrdonly(vnode_mount(vp))) { return EROFS; } if (cred == NULL) { cred = td->td_ucred; } fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); fsai = fdi.indata; fsai->valid = 0; // Truncate to a new value. fsai->size = fvdat->filesize; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == 0) fvdat->flag &= ~FN_SIZECHANGE; fuse_invalidate_attr(vp); return err; }
int fuse_notify_inval_inode(struct fuse_data *data, struct fuse_iov *iov) { int err = 0; struct fuse_notify_inval_inode_out fniio; HNodeRef hp; vnode_t vp; fuse_abi_out(fuse_notify_inval_inode_out, DTOABI(data), iov->base, &fniio); err = (int)HNodeLookupRealQuickIfExists(data->fdev, (ino_t)fniio.ino, 0 /* fork index */, &hp, &vp); if (err) { return err; } assert(vp != NULL); fuse_nodelock_lock(VTOFUD(vp), FUSEFS_EXCLUSIVE_LOCK); fuse_invalidate_attr(vp); if (fniio.off >= 0) { off_t end_off; if (fniio.len > 0) { end_off = (off_t) min(fniio.off + fniio.len, ubc_getsize(vp)); } else { end_off = ubc_getsize(vp); } ubc_msync(vp, (off_t)fniio.off, end_off, NULL, UBC_PUSHDIRTY | UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); } FUSE_KNOTE(vp, NOTE_ATTRIB); fuse_nodelock_unlock(VTOFUD(vp)); vnode_put(vp); return err; }
static int fuse_write_directbackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh, int ioflag) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_write_in *fwi; struct fuse_dispatcher fdi; size_t chunksize; int diff; int err = 0; if (uio->uio_resid == 0) return (0); if (ioflag & IO_APPEND) uio_setoffset(uio, fvdat->filesize); fdisp_init(&fdi, 0); while (uio->uio_resid > 0) { chunksize = MIN(uio->uio_resid, fuse_get_mpdata(vp->v_mount)->max_write); fdi.iosize = sizeof(*fwi) + chunksize; fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred); fwi = fdi.indata; fwi->fh = fufh->fh_id; fwi->offset = uio->uio_offset; fwi->size = chunksize; if ((err = uiomove((char *)fdi.indata + sizeof(*fwi), chunksize, uio))) break; if ((err = fdisp_wait_answ(&fdi))) break; diff = chunksize - ((struct fuse_write_out *)fdi.answ)->size; if (diff < 0) { err = EINVAL; break; } uio->uio_resid += diff; uio->uio_offset -= diff; if (uio->uio_offset > fvdat->filesize) fuse_vnode_setsize(vp, cred, uio->uio_offset); } fdisp_destroy(&fdi); return (err); }
/* struct vnop_rmdir_args { struct vnode *a_dvp; struct vnode *a_vp; struct componentname *a_cnp; } *ap; */ static int fuse_vnop_rmdir(struct vop_rmdir_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode *vp = ap->a_vp; int err; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); if (fuse_isdeadfs(vp)) { return ENXIO; } if (VTOFUD(vp) == VTOFUD(dvp)) { return EINVAL; } err = fuse_internal_remove(dvp, vp, ap->a_cnp, FUSE_RMDIR); if (err == 0) fuse_internal_vnode_disappear(vp); return err; }
int fuse_filehandle_getrw(struct vnode *vp, fufh_type_t fufh_type, struct fuse_filehandle **fufhp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh; fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { fufh_type = FUFH_RDWR; } return fuse_filehandle_get(vp, fufh_type, fufhp); }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_dispatcher fdi; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs(vp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); cluster_push(vp, 0); fuse_dispatcher_init(&fdi, 0); for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync(vp, args->context, fufh, &fdi); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }
int fuse_filehandle_close(struct vnode *vp, fufh_type_t fufh_type, struct thread *td, struct ucred *cred) { struct fuse_dispatcher fdi; struct fuse_release_in *fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int err = 0; int isdir = 0; int op = FUSE_RELEASE; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { panic("FUSE: filehandle_put called on invalid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } if (vnode_isdir(vp)) { op = FUSE_RELEASEDIR; isdir = 1; } fdisp_init(&fdi, sizeof(*fri)); fdisp_make_vp(&fdi, op, vp, td, cred); fri = fdi.indata; fri->fh = fufh->fh_id; fri->flags = fuse_filehandle_xlate_to_oflags(fufh_type); err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); out: atomic_subtract_acq_int(&fuse_fh_count, 1); fufh->fh_id = (uint64_t)-1; fufh->fh_type = FUFH_INVALID; fuse_invalidate_attr(vp); return err; }
int fuse_filehandle_get(struct vnode *vp, fufh_type_t fufh_type, struct fuse_filehandle **fufhp) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh; fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) return EBADF; if (fufhp != NULL) *fufhp = fufh; return 0; }
__private_extern__ int fuse_internal_revoke(vnode_t vp, int flags, vfs_context_t context, int how) { int ret = 0; struct fuse_vnode_data *fvdat = VTOFUD(vp); fvdat->flag |= FN_REVOKED; if (how == REVOKE_HARD) { ret = vn_revoke(vp, flags, context); } return ret; }
void fuse_vnode_refreshsize(struct vnode *vp, struct ucred *cred) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct vattr va; if ((fvdat->flag & FN_SIZECHANGE) != 0 || fuse_data_cache_mode == FUSE_CACHE_UC || (fuse_refresh_size == 0 && fvdat->filesize != 0)) return; VOP_GETATTR(vp, &va, cred); SDT_PROBE2(fuse, , node, trace, 1, "refreshed file size"); }
/* struct vnop_inactive_args { struct vnode *a_vp; struct thread *a_td; }; */ static int fuse_vnop_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = ap->a_td; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int type, need_flush = 1; FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp)); for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { if (need_flush && vp->v_type == VREG) { if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) { fuse_vnode_savesize(vp, NULL); } if (fuse_data_cache_invalidate || (fvdat->flag & FN_REVOKED) != 0) fuse_io_invalbuf(vp, td); else fuse_io_flushbuf(vp, MNT_WAIT, td); need_flush = 0; } fuse_filehandle_close(vp, type, td, NULL); } } if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) { vrecycle(vp); } return 0; }
/* struct vnop_open_args { struct vnode *a_vp; int a_mode; struct ucred *a_cred; struct thread *a_td; int a_fdidx; / struct file *a_fp; }; */ static int fuse_vnop_open(struct vop_open_args *ap) { struct vnode *vp = ap->a_vp; int mode = ap->a_mode; struct thread *td = ap->a_td; struct ucred *cred = ap->a_cred; fufh_type_t fufh_type; struct fuse_vnode_data *fvdat; int error, isdir = 0; int32_t fuse_open_flags; FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode); if (fuse_isdeadfs(vp)) { return ENXIO; } fvdat = VTOFUD(vp); if (vnode_isdir(vp)) { isdir = 1; } fuse_open_flags = 0; if (isdir) { fufh_type = FUFH_RDONLY; } else { fufh_type = fuse_filehandle_xlate_from_fflags(mode); /* * For WRONLY opens, force DIRECT_IO. This is necessary * since writing a partial block through the buffer cache * will result in a read of the block and that read won't * be allowed by the WRONLY open. */ if (fufh_type == FUFH_WRONLY || (fvdat->flag & FN_DIRECTIO) != 0) fuse_open_flags = FOPEN_DIRECT_IO; } if (fuse_filehandle_validrw(vp, fufh_type) != FUFH_INVALID) { fuse_vnode_open(vp, fuse_open_flags, td); return 0; } error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred); return error; }
/* struct vnop_readdir_args { struct vnode *a_vp; struct uio *a_uio; struct ucred *a_cred; int *a_eofflag; int *ncookies; u_long **a_cookies; }; */ static int fuse_vnop_readdir(struct vop_readdir_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct ucred *cred = ap->a_cred; struct fuse_filehandle *fufh = NULL; struct fuse_vnode_data *fvdat; struct fuse_iov cookediov; int err = 0; int freefufh = 0; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); if (fuse_isdeadfs(vp)) { return ENXIO; } if ( /* XXXIP ((uio_iovcnt(uio) > 1)) || */ (uio_resid(uio) < sizeof(struct dirent))) { return EINVAL; } fvdat = VTOFUD(vp); if (!fuse_filehandle_valid(vp, FUFH_RDONLY)) { FS_DEBUG("calling readdir() before open()"); err = fuse_filehandle_open(vp, FUFH_RDONLY, &fufh, NULL, cred); freefufh = 1; } else { err = fuse_filehandle_get(vp, FUFH_RDONLY, &fufh); } if (err) { return (err); } #define DIRCOOKEDSIZE FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + MAXNAMLEN + 1) fiov_init(&cookediov, DIRCOOKEDSIZE); err = fuse_internal_readdir(vp, uio, fufh, &cookediov); fiov_teardown(&cookediov); if (freefufh) { fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred); } return err; }
int fuse_vnode_setsize(struct vnode *vp, struct ucred *cred, off_t newsize) { struct fuse_vnode_data *fvdat = VTOFUD(vp); off_t oldsize; int err = 0; ASSERT_VOP_ELOCKED(vp, "fuse_vnode_setsize"); oldsize = fvdat->filesize; fvdat->filesize = newsize; fvdat->flag |= FN_SIZECHANGE; if (newsize < oldsize) { err = vtruncbuf(vp, cred, newsize, fuse_iosize(vp)); } vnode_pager_setsize(vp, newsize); return err; }
int fuse_vnode_get(struct mount *mp, struct fuse_entry_out *feo, uint64_t nodeid, struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, enum vtype vtyp) { struct thread *td = (cnp != NULL ? cnp->cn_thread : curthread); int err = 0; err = fuse_vnode_alloc(mp, td, nodeid, vtyp, vpp); if (err) { return err; } if (dvp != NULL) { MPASS((cnp->cn_flags & ISDOTDOT) == 0); MPASS(!(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.')); fuse_vnode_setparent(*vpp, dvp); } if (dvp != NULL && cnp != NULL && (cnp->cn_flags & MAKEENTRY) != 0 && feo != NULL && (feo->entry_valid != 0 || feo->entry_valid_nsec != 0)) { ASSERT_VOP_LOCKED(*vpp, "fuse_vnode_get"); ASSERT_VOP_LOCKED(dvp, "fuse_vnode_get"); cache_enter(dvp, *vpp, cnp); } /* * In userland, libfuse uses cached lookups for dot and dotdot entries, * thus it does not really bump the nlookup counter for forget. * Follow the same semantic and avoid tu bump it in order to keep * nlookup counters consistent. */ if (cnp == NULL || ((cnp->cn_flags & ISDOTDOT) == 0 && (cnp->cn_namelen != 1 || cnp->cn_nameptr[0] != '.'))) VTOFUD(*vpp)->nlookup++; return 0; }
int fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type) { struct fuse_dispatcher fdi; struct fuse_release_in *fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("fuse4x: filehandle_put called on a valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } int op = vnode_isdir(vp) ? FUSE_RELEASEDIR : FUSE_RELEASE; fuse_dispatcher_init(&fdi, sizeof(*fri)); fuse_dispatcher_make_vp(&fdi, op, vp, context); fri = fdi.indata; fri->fh = fufh->fh_id; fri->flags = fufh->open_flags; int err = fuse_dispatcher_wait_answer(&fdi); if (!err) { fuse_ticket_drop(fdi.ticket); } out: OSDecrementAtomic((SInt32 *)&fuse_fh_current); fuse_invalidate_attr(vp); return err; }
/* struct vnop_write_args { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; struct ucred *a_cred; }; */ static int fuse_vnop_write(struct vop_write_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; int ioflag = ap->a_ioflag; struct ucred *cred = ap->a_cred; fuse_trace_printf_vnop(); if (fuse_isdeadfs(vp)) { return ENXIO; } fuse_vnode_refreshsize(vp, cred); if (VTOFUD(vp)->flag & FN_DIRECTIO) { ioflag |= IO_DIRECT; } return fuse_io_dispatch(vp, uio, ioflag, cred); }