int fuse_vncache_lookup(vnode_t dvp, vnode_t *vpp, struct componentname *cnp) { #if M_OSXFUSE_ENABLE_BIG_LOCK /* * Make sure that biglock is actually held by the thread calling us before * trying to unlock it. fuse_vncache_lookup is called by notification * handlers that do not hold biglock. Trying to unlock it in this case would * result in a kernel panic. */ struct fuse_data *data = fuse_get_mpdata(vnode_mount(dvp)); bool biglock_locked = fuse_biglock_have_lock(data->biglock); if (biglock_locked) { fuse_biglock_unlock(data->biglock); } #endif /* M_OSXFUSE_ENABLE_BIG_LOCK */ int ret = cache_lookup(dvp, vpp, cnp); #if M_OSXFUSE_ENABLE_BIG_LOCK if (biglock_locked) { fuse_biglock_lock(data->biglock); } #endif #if FUSE_TRACE_VNCACHE IOLog("osxfuse: cache lookup ret=%d, dvp=%p, *vpp=%p, %s\n", ret, dvp, *vpp, cnp->cn_nameptr); #endif return ret; }
static int fuse_vfsop_root(struct mount *mp, int lkflags, struct vnode **vpp) { struct fuse_data *data = fuse_get_mpdata(mp); int err = 0; if (data->vroot != NULL) { err = vget(data->vroot, lkflags, curthread); if (err == 0) *vpp = data->vroot; } else { err = fuse_vnode_get(mp, FUSE_ROOT_ID, NULL, vpp, NULL, VDIR); if (err == 0) { FUSE_LOCK(); MPASS(data->vroot == NULL || data->vroot == *vpp); if (data->vroot == NULL) { FS_DEBUG("new root vnode\n"); data->vroot = *vpp; FUSE_UNLOCK(); vref(*vpp); } else if (data->vroot != *vpp) { FS_DEBUG("root vnode race\n"); FUSE_UNLOCK(); VOP_UNLOCK(*vpp, 0); vrele(*vpp); vrecycle(*vpp); *vpp = data->vroot; } else FUSE_UNLOCK(); } } return err; }
static errno_t fuse_vfsop_root(mount_t mp, struct vnode **vpp, vfs_context_t context) { int err = 0; vnode_t vp = NULLVP; struct fuse_entry_out feo_root; struct fuse_data *data = fuse_get_mpdata(mp); fuse_trace_printf_vfsop(); if (data->rootvp != NULLVP) { *vpp = data->rootvp; return vnode_get(*vpp); } bzero(&feo_root, sizeof(feo_root)); feo_root.nodeid = FUSE_ROOT_ID; feo_root.generation = 0; feo_root.attr.ino = FUSE_ROOT_ID; feo_root.attr.size = FUSE_ROOT_SIZE; feo_root.attr.mode = VTTOIF(VDIR); err = FSNodeGetOrCreateFileVNodeByID(&vp, FN_IS_ROOT, &feo_root, mp, NULLVP /* dvp */, context, NULL /* oflags */); *vpp = vp; if (!err) { data->rootvp = *vpp; } return err; }
int fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type, fuse_op_waitfor_t waitfor) { struct fuse_data *data; struct fuse_dispatcher fdi; struct fuse_abi_data fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int err = 0; int op = FUSE_RELEASE; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("osxfuse: filehandle_put called on a valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } data = fuse_get_mpdata(vnode_mount(vp)); if (vnode_isdir(vp)) { op = FUSE_RELEASEDIR; } fdisp_init_abi(&fdi, fuse_release_in, data); fdisp_make_vp(&fdi, op, vp, context); fuse_abi_data_init(&fri, DATOI(data), fdi.indata); fuse_release_in_set_fh(&fri, fufh->fh_id); fuse_release_in_set_flags(&fri, fufh->open_flags); if (waitfor == FUSE_OP_FOREGROUNDED) { err = fdisp_wait_answ(&fdi); if (err) { goto out; } } else { fuse_insert_message(fdi.tick); } fuse_ticket_release(fdi.tick); out: FUSE_OSAddAtomic(-1, (SInt32 *)&fuse_fh_current); fuse_invalidate_attr(vp); return err; }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs_mp(mp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif cluster_push(vp, 0); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync_fh(vp, args->context, fufh, FUSE_OP_FOREGROUNDED); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }
__private_extern__ int fuse_internal_readdir(vnode_t vp, uio_t uio, vfs_context_t context, struct fuse_filehandle *fufh, struct fuse_iov *cookediov, int *numdirent) { int err = 0; struct fuse_dispatcher fdi; struct fuse_read_in *fri; struct fuse_data *data; if (uio_resid(uio) == 0) { return 0; } fdisp_init(&fdi, 0); /* Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p I/O). */ while (uio_resid(uio) > 0) { fdi.iosize = sizeof(*fri); fdisp_make_vp(&fdi, FUSE_READDIR, vp, context); fri = fdi.indata; fri->fh = fufh->fh_id; fri->offset = uio_offset(uio); data = fuse_get_mpdata(vnode_mount(vp)); fri->size = (typeof(fri->size))min((size_t)uio_resid(uio), data->iosize); if ((err = fdisp_wait_answ(&fdi))) { goto out; } if ((err = fuse_internal_readdir_processdata(vp, uio, fri->size, fdi.answ, fdi.iosize, cookediov, numdirent))) { break; } } /* done: */ fuse_ticket_drop(fdi.tick); out: return ((err == -1) ? 0 : err); }
static errno_t fuse_vfsop_sync(mount_t mp, int waitfor, vfs_context_t context) { uint64_t mntflags; struct fuse_sync_cargs args; int allerror = 0; fuse_trace_printf_vfsop(); mntflags = vfs_flags(mp); if (fuse_isdeadfs_mp(mp)) { return 0; } if (vfs_isupdate(mp)) { return 0; } if (vfs_isrdonly(mp)) { return EROFS; // should panic!? } /* * Write back each (modified) fuse node. */ args.context = context; args.waitfor = waitfor; args.error = 0; #if M_FUSE4X_ENABLE_BIGLOCK struct fuse_data *data = fuse_get_mpdata(mp); fuse_biglock_unlock(data->biglock); #endif vnode_iterate(mp, 0, fuse_sync_callback, (void *)&args); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif if (args.error) { allerror = args.error; } /* * For other types of stale file system information, such as: * * - fs control info * - quota information * - modified superblock */ return allerror; }
static int fuse_read_directbackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh) { struct fuse_dispatcher fdi; struct fuse_read_in *fri; int err = 0; if (uio->uio_resid == 0) return (0); fdisp_init(&fdi, 0); /* * XXX In "normal" case we use an intermediate kernel buffer for * transmitting data from daemon's context to ours. Eventually, we should * get rid of this. Anyway, if the target uio lives in sysspace (we are * called from pageops), and the input data doesn't need kernel-side * processing (we are not called from readdir) we can already invoke * an optimized, "peer-to-peer" I/O routine. */ while (uio->uio_resid > 0) { fdi.iosize = sizeof(*fri); fdisp_make_vp(&fdi, FUSE_READ, vp, uio->uio_td, cred); fri = fdi.indata; fri->fh = fufh->fh_id; fri->offset = uio->uio_offset; fri->size = MIN(uio->uio_resid, fuse_get_mpdata(vp->v_mount)->max_read); FS_DEBUG2G("fri->fh %ju, fri->offset %ju, fri->size %ju\n", (uintmax_t)fri->fh, (uintmax_t)fri->offset, (uintmax_t)fri->size); if ((err = fdisp_wait_answ(&fdi))) goto out; FS_DEBUG2G("complete: got iosize=%d, requested fri.size=%zd; " "resid=%zd offset=%ju\n", fri->size, fdi.iosize, uio->uio_resid, (uintmax_t)uio->uio_offset); if ((err = uiomove(fdi.answ, MIN(fri->size, fdi.iosize), uio))) break; if (fdi.iosize < fri->size) break; } out: fdisp_destroy(&fdi); return (err); }
static int fuse_write_directbackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh, int ioflag) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_write_in *fwi; struct fuse_dispatcher fdi; size_t chunksize; int diff; int err = 0; if (uio->uio_resid == 0) return (0); if (ioflag & IO_APPEND) uio_setoffset(uio, fvdat->filesize); fdisp_init(&fdi, 0); while (uio->uio_resid > 0) { chunksize = MIN(uio->uio_resid, fuse_get_mpdata(vp->v_mount)->max_write); fdi.iosize = sizeof(*fwi) + chunksize; fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred); fwi = fdi.indata; fwi->fh = fufh->fh_id; fwi->offset = uio->uio_offset; fwi->size = chunksize; if ((err = uiomove((char *)fdi.indata + sizeof(*fwi), chunksize, uio))) break; if ((err = fdisp_wait_answ(&fdi))) break; diff = chunksize - ((struct fuse_write_out *)fdi.answ)->size; if (diff < 0) { err = EINVAL; break; } uio->uio_resid += diff; uio->uio_offset -= diff; if (uio->uio_offset > fvdat->filesize) fuse_vnode_setsize(vp, cred, uio->uio_offset); } fdisp_destroy(&fdi); return (err); }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_dispatcher fdi; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs(vp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); cluster_push(vp, 0); fuse_dispatcher_init(&fdi, 0); for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync(vp, args->context, fufh, &fdi); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }
__inline__ int fuse_vncache_lookup(vnode_t dvp, vnode_t *vpp, struct componentname *cnp) { #if M_OSXFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_OSXFUSE_ENABLE_HUGE_LOCK struct fuse_data *data = fuse_get_mpdata(vnode_mount(dvp)); fuse_biglock_unlock(data->biglock); #endif int ret = cache_lookup(dvp, vpp, cnp); #if M_OSXFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_OSXFUSE_ENABLE_HUGE_LOCK fuse_biglock_lock(data->biglock); #endif #if FUSE_TRACE_VNCACHE IOLog("OSXFUSE: cache lookup ret=%d, dvp=%p, *vpp=%p, %s\n", ret, dvp, *vpp, cnp->cn_nameptr); #endif return ret; }
void fuse_vncache_enter(vnode_t dvp, vnode_t vp, struct componentname *cnp) { #if FUSE_TRACE_VNCACHE IOLog("osxfuse: cache enter dvp=%p, vp=%p, %s\n", dvp, vp, cnp->cn_nameptr); #endif #if M_OSXFUSE_ENABLE_BIG_LOCK struct fuse_data *data = fuse_get_mpdata(vnode_mount(dvp)); bool biglock_locked = fuse_biglock_have_lock(data->biglock); if (biglock_locked) { fuse_biglock_unlock(data->biglock); } #endif /* M_OSXFUSE_ENABLE_BIG_LOCK */ cache_enter(dvp, vp, cnp); #if M_OSXFUSE_ENABLE_BIG_LOCK if (biglock_locked) { fuse_biglock_lock(data->biglock); } #endif }
void fuse_vncache_purge(vnode_t vp) { #if FUSE_TRACE_VNCACHE IOLog("osxfuse: cache purge vp=%p\n", vp); #endif #if M_OSXFUSE_ENABLE_BIG_LOCK struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); bool biglock_locked = fuse_biglock_have_lock(data->biglock); if (biglock_locked) { fuse_biglock_unlock(data->biglock); } #endif /* M_OSXFUSE_ENABLE_BIG_LOCK */ cache_purge(vp); #if M_OSXFUSE_ENABLE_BIG_LOCK if (biglock_locked) { fuse_biglock_lock(data->biglock); } #endif }
__private_extern__ int fuse_setextendedsecurity(mount_t mp, int state) { int err = EINVAL; struct fuse_data *data; data = fuse_get_mpdata(mp); if (!data) { return ENXIO; } if (state == 1) { /* Turning on extended security. */ if ((data->dataflags & FSESS_NO_VNCACHE) || (data->dataflags & FSESS_DEFER_PERMISSIONS)) { return EINVAL; } data->dataflags |= (FSESS_EXTENDED_SECURITY | FSESS_DEFAULT_PERMISSIONS);; if (vfs_authopaque(mp)) { vfs_clearauthopaque(mp); } if (vfs_authopaqueaccess(mp)) { vfs_clearauthopaqueaccess(mp); } vfs_setextendedsecurity(mp); err = 0; } else if (state == 0) { /* Turning off extended security. */ data->dataflags &= ~FSESS_EXTENDED_SECURITY; vfs_clearextendedsecurity(mp); err = 0; } return err; }
/* struct vnop_access_args { struct vnode *a_vp; #if VOP_ACCESS_TAKES_ACCMODE_T accmode_t a_accmode; #else int a_mode; #endif struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_access(struct vop_access_args *ap) { struct vnode *vp = ap->a_vp; int accmode = ap->a_accmode; struct ucred *cred = ap->a_cred; struct fuse_access_param facp; struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); int err; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); if (fuse_isdeadfs(vp)) { if (vnode_isvroot(vp)) { return 0; } return ENXIO; } if (!(data->dataflags & FSESS_INITED)) { if (vnode_isvroot(vp)) { if (priv_check_cred(cred, PRIV_VFS_ADMIN, 0) || (fuse_match_cred(data->daemoncred, cred) == 0)) { return 0; } } return EBADF; } if (vnode_islnk(vp)) { return 0; } bzero(&facp, sizeof(facp)); err = fuse_internal_access(vp, accmode, &facp, ap->a_td, ap->a_cred); FS_DEBUG2G("err=%d accmode=0x%x\n", err, accmode); return err; }
/* struct vnop_readlink_args { struct vnode *a_vp; struct uio *a_uio; struct ucred *a_cred; }; */ static int fuse_vnop_readlink(struct vop_readlink_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct ucred *cred = ap->a_cred; struct fuse_dispatcher fdi; int err; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); if (fuse_isdeadfs(vp)) { return ENXIO; } if (!vnode_islnk(vp)) { return EINVAL; } fdisp_init(&fdi, 0); err = fdisp_simple_putget_vp(&fdi, FUSE_READLINK, vp, curthread, cred); if (err) { goto out; } if (((char *)fdi.answ)[0] == '/' && fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_PUSH_SYMLINKS_IN) { char *mpth = vnode_mount(vp)->mnt_stat.f_mntonname; err = uiomove(mpth, strlen(mpth), uio); } if (!err) { err = uiomove(fdi.answ, fdi.iosize, uio); } out: fdisp_destroy(&fdi); return err; }
void fdisp_make_pid(struct fuse_dispatcher *fdip, enum fuse_opcode op, struct mount *mp, uint64_t nid, pid_t pid, struct ucred *cred) { struct fuse_data *data = fuse_get_mpdata(mp); debug_printf("fdip=%p, op=%d, mp=%p, nid=%ju\n", fdip, op, mp, (uintmax_t)nid); if (fdip->tick) { fticket_refresh(fdip->tick); } else { fdip->tick = fuse_ticket_fetch(data); } FUSE_DIMALLOC(&fdip->tick->tk_ms_fiov, fdip->finh, fdip->indata, fdip->iosize); fuse_setup_ihead(fdip->finh, fdip->tick, nid, op, fdip->iosize, pid, cred); }
static errno_t fuse_vfsop_setattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t context) { int error = 0; fuse_trace_printf_vfsop(); kauth_cred_t cred = vfs_context_ucred(context); if (!fuse_vfs_context_issuser(context) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner)) { return EACCES; } struct fuse_data *data = fuse_get_mpdata(mp); if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) { if (!fuse_implemented(data, FSESS_NOIMPLBIT(SETVOLNAME))) { error = ENOTSUP; goto out; } if (fsap->f_vol_name[0] == 0) { error = EINVAL; goto out; } size_t namelen = strlen(fsap->f_vol_name); if (namelen >= MAXPATHLEN) { error = ENAMETOOLONG; goto out; } vnode_t root_vp; error = fuse_vfsop_root(mp, &root_vp, context); if (error) { goto out; } struct fuse_dispatcher fdi; fdisp_init(&fdi, namelen + 1); fdisp_make_vp(&fdi, FUSE_SETVOLNAME, root_vp, context); memcpy((char *)fdi.indata, fsap->f_vol_name, namelen); ((char *)fdi.indata)[namelen] = '\0'; if (!(error = fdisp_wait_answ(&fdi))) { fuse_ticket_drop(fdi.tick); } (void)vnode_put(root_vp); if (error) { if (error == ENOSYS) { error = ENOTSUP; fuse_clear_implemented(data, FSESS_NOIMPLBIT(SETVOLNAME)); } goto out; } copystr(fsap->f_vol_name, data->volname, MAXPATHLEN - 1, &namelen); bzero(data->volname + namelen, MAXPATHLEN - namelen); VFSATTR_SET_SUPPORTED(fsap, f_vol_name); } out: return error; }
static int fuse_vfsop_statfs(struct mount *mp, struct statfs *sbp) { struct fuse_dispatcher fdi; int err = 0; struct fuse_statfs_out *fsfo; struct fuse_data *data; FS_DEBUG2G("mp %p: %s\n", mp, mp->mnt_stat.f_mntfromname); data = fuse_get_mpdata(mp); if (!(data->dataflags & FSESS_INITED)) goto fake; fdisp_init(&fdi, 0); fdisp_make(&fdi, FUSE_STATFS, mp, FUSE_ROOT_ID, NULL, NULL); err = fdisp_wait_answ(&fdi); if (err) { fdisp_destroy(&fdi); if (err == ENOTCONN) { /* * We want to seem a legitimate fs even if the daemon * is stiff dead... (so that, eg., we can still do path * based unmounting after the daemon dies). */ goto fake; } return err; } fsfo = fdi.answ; sbp->f_blocks = fsfo->st.blocks; sbp->f_bfree = fsfo->st.bfree; sbp->f_bavail = fsfo->st.bavail; sbp->f_files = fsfo->st.files; sbp->f_ffree = fsfo->st.ffree; /* cast from uint64_t to int64_t */ sbp->f_namemax = fsfo->st.namelen; sbp->f_bsize = fsfo->st.frsize; /* cast from uint32_t to uint64_t */ FS_DEBUG("fuse_statfs_out -- blocks: %llu, bfree: %llu, bavail: %llu, " "fil es: %llu, ffree: %llu, bsize: %i, namelen: %i\n", (unsigned long long)fsfo->st.blocks, (unsigned long long)fsfo->st.bfree, (unsigned long long)fsfo->st.bavail, (unsigned long long)fsfo->st.files, (unsigned long long)fsfo->st.ffree, fsfo->st.bsize, fsfo->st.namelen); fdisp_destroy(&fdi); return 0; fake: sbp->f_blocks = 0; sbp->f_bfree = 0; sbp->f_bavail = 0; sbp->f_files = 0; sbp->f_ffree = 0; sbp->f_namemax = 0; sbp->f_bsize = FUSE_DEFAULT_BLOCKSIZE; return 0; }
/* struct vnop_rename_args { struct vnode *a_fdvp; struct vnode *a_fvp; struct componentname *a_fcnp; struct vnode *a_tdvp; struct vnode *a_tvp; struct componentname *a_tcnp; }; */ static int fuse_vnop_rename(struct vop_rename_args *ap) { struct vnode *fdvp = ap->a_fdvp; struct vnode *fvp = ap->a_fvp; struct componentname *fcnp = ap->a_fcnp; struct vnode *tdvp = ap->a_tdvp; struct vnode *tvp = ap->a_tvp; struct componentname *tcnp = ap->a_tcnp; struct fuse_data *data; int err = 0; FS_DEBUG2G("from: inode=%ju name=%*s -> to: inode=%ju name=%*s\n", (uintmax_t)VTOI(fvp), (int)fcnp->cn_namelen, fcnp->cn_nameptr, (uintmax_t)(tvp == NULL ? -1 : VTOI(tvp)), (int)tcnp->cn_namelen, tcnp->cn_nameptr); if (fuse_isdeadfs(fdvp)) { return ENXIO; } if (fvp->v_mount != tdvp->v_mount || (tvp && fvp->v_mount != tvp->v_mount)) { FS_DEBUG("cross-device rename: %s -> %s\n", fcnp->cn_nameptr, (tcnp != NULL ? tcnp->cn_nameptr : "(NULL)")); err = EXDEV; goto out; } cache_purge(fvp); /* * FUSE library is expected to check if target directory is not * under the source directory in the file system tree. * Linux performs this check at VFS level. */ data = fuse_get_mpdata(vnode_mount(tdvp)); sx_xlock(&data->rename_lock); err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp); if (err == 0) { if (tdvp != fdvp) fuse_vnode_setparent(fvp, tdvp); if (tvp != NULL) fuse_vnode_setparent(tvp, NULL); } sx_unlock(&data->rename_lock); if (tvp != NULL && tvp != fvp) { cache_purge(tvp); } if (vnode_isdir(fvp)) { if ((tvp != NULL) && vnode_isdir(tvp)) { cache_purge(tdvp); } cache_purge(fdvp); } out: if (tdvp == tvp) { vrele(tdvp); } else { vput(tdvp); } if (tvp != NULL) { vput(tvp); } vrele(fdvp); vrele(fvp); return err; }
__private_extern__ int fuse_internal_strategy(vnode_t vp, buf_t bp) { size_t biosize; size_t chunksize; size_t respsize; int mapped = FALSE; int mode; int op; int vtype = vnode_vtype(vp); int err = 0; caddr_t bufdat; off_t left; off_t offset; int32_t bflags = buf_flags(bp); fufh_type_t fufh_type; struct fuse_dispatcher fdi; struct fuse_data *data; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; mount_t mp = vnode_mount(vp); data = fuse_get_mpdata(mp); biosize = data->blocksize; if (!(vtype == VREG || vtype == VDIR)) { return ENOTSUP; } if (bflags & B_READ) { mode = FREAD; fufh_type = FUFH_RDONLY; /* FUFH_RDWR will also do */ } else { mode = FWRITE; fufh_type = FUFH_WRONLY; /* FUFH_RDWR will also do */ } if (fvdat->flag & FN_CREATING) { fuse_lck_mtx_lock(fvdat->createlock); if (fvdat->flag & FN_CREATING) { (void)fuse_msleep(fvdat->creator, fvdat->createlock, PDROP | PINOD | PCATCH, "fuse_internal_strategy", NULL); } else { fuse_lck_mtx_unlock(fvdat->createlock); } } fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { fufh_type = FUFH_RDWR; fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { fufh = NULL; } else { /* We've successfully fallen back to FUFH_RDWR. */ } } if (!fufh) { if (mode == FREAD) { fufh_type = FUFH_RDONLY; } else { fufh_type = FUFH_RDWR; } /* * Lets NOT do the filehandle preflight check here. */ err = fuse_filehandle_get(vp, NULL, fufh_type, 0 /* mode */); if (!err) { fufh = &(fvdat->fufh[fufh_type]); FUFH_AUX_INC(fufh); /* We've created a NEW fufh of type fufh_type. open_count is 1. */ } } else { /* good fufh */ FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_reuse_count); /* We're using an existing fufh of type fufh_type. */ } if (err) { /* A more typical error case. */ if ((err == ENOTCONN) || fuse_isdeadfs(vp)) { buf_seterror(bp, EIO); buf_biodone(bp); return EIO; } IOLog("MacFUSE: strategy failed to get fh " "(vtype=%d, fufh_type=%d, err=%d)\n", vtype, fufh_type, err); if (!vfs_issynchronous(mp)) { IOLog("MacFUSE: asynchronous write failed!\n"); } buf_seterror(bp, EIO); buf_biodone(bp); return EIO; } if (!fufh) { panic("MacFUSE: tried everything but still no fufh"); /* NOTREACHED */ } #define B_INVAL 0x00040000 /* Does not contain valid info. */ #define B_ERROR 0x00080000 /* I/O error occurred. */ if (bflags & B_INVAL) { IOLog("MacFUSE: buffer does not contain valid information\n"); } if (bflags & B_ERROR) { IOLog("MacFUSE: an I/O error has occured\n"); } if (buf_count(bp) == 0) { return 0; } fdisp_init(&fdi, 0); if (mode == FREAD) { struct fuse_read_in *fri; buf_setresid(bp, buf_count(bp)); offset = (off_t)((off_t)buf_blkno(bp) * biosize); if (offset >= fvdat->filesize) { /* Trying to read at/after EOF? */ if (offset != fvdat->filesize) { /* Trying to read after EOF? */ buf_seterror(bp, EINVAL); } buf_biodone(bp); return 0; } /* Note that we just made sure that offset < fvdat->filesize. */ if ((offset + buf_count(bp)) > fvdat->filesize) { /* Trimming read */ buf_setcount(bp, (uint32_t)(fvdat->filesize - offset)); } if (buf_map(bp, &bufdat)) { IOLog("MacFUSE: failed to map buffer in strategy\n"); return EFAULT; } else { mapped = TRUE; } while (buf_resid(bp) > 0) { chunksize = min((size_t)buf_resid(bp), data->iosize); fdi.iosize = sizeof(*fri); op = FUSE_READ; if (vtype == VDIR) { op = FUSE_READDIR; } fdisp_make_vp(&fdi, op, vp, (vfs_context_t)0); fri = fdi.indata; fri->fh = fufh->fh_id; /* * Historical note: * * fri->offset = ((off_t)(buf_blkno(bp))) * biosize; * * This wasn't being incremented!? */ fri->offset = offset; fri->size = (typeof(fri->size))chunksize; fdi.tick->tk_aw_type = FT_A_BUF; fdi.tick->tk_aw_bufdata = bufdat; if ((err = fdisp_wait_answ(&fdi))) { /* There was a problem with reading. */ goto out; } respsize = fdi.tick->tk_aw_bufsize; if (respsize < 0) { /* Cannot really happen... */ err = EIO; goto out; } buf_setresid(bp, (uint32_t)(buf_resid(bp) - respsize)); bufdat += respsize; offset += respsize; /* Did we hit EOF before being done? */ if ((respsize == 0) && (buf_resid(bp) > 0)) { /* * Historical note: * If we don't get enough data, just fill the rest with zeros. * In NFS context, this would mean a hole in the file. */ /* Zero-pad the incomplete buffer. */ bzero(bufdat, buf_resid(bp)); buf_setresid(bp, 0); break; } } /* while (buf_resid(bp) > 0) */ } else { /* write */ struct fuse_write_in *fwi; struct fuse_write_out *fwo; int merr = 0; off_t diff; if (buf_map(bp, &bufdat)) { IOLog("MacFUSE: failed to map buffer in strategy\n"); return EFAULT; } else { mapped = TRUE; } /* Write begin */ buf_setresid(bp, buf_count(bp)); offset = (off_t)((off_t)buf_blkno(bp) * biosize); /* XXX: TBD -- Check here for extension (writing past end) */ left = buf_count(bp); while (left) { fdi.iosize = sizeof(*fwi); op = FUSE_WRITE; fdisp_make_vp(&fdi, op, vp, (vfs_context_t)0); chunksize = min((size_t)left, data->iosize); fwi = fdi.indata; fwi->fh = fufh->fh_id; fwi->offset = offset; fwi->size = (typeof(fwi->size))chunksize; fdi.tick->tk_ms_type = FT_M_BUF; fdi.tick->tk_ms_bufdata = bufdat; fdi.tick->tk_ms_bufsize = chunksize; /* About to write <chunksize> at <offset> */ if ((err = fdisp_wait_answ(&fdi))) { merr = 1; break; } fwo = fdi.answ; diff = chunksize - fwo->size; if (diff < 0) { err = EINVAL; break; } left -= fwo->size; bufdat += fwo->size; offset += fwo->size; buf_setresid(bp, buf_resid(bp) - fwo->size); } if (merr) { goto out; } } if (fdi.tick) { fuse_ticket_drop(fdi.tick); } else { /* No ticket upon leaving */ } out: if (err) { buf_seterror(bp, err); } if (mapped == TRUE) { buf_unmap(bp); } buf_biodone(bp); return err; }
errno_t FSNodeGetOrCreateFileVNodeByID(vnode_t *vnPtr, uint32_t flags, struct fuse_abi_data *feo, mount_t mp, vnode_t dvp, vfs_context_t context, uint32_t *oflags) { int err; vnode_t vn = NULLVP; HNodeRef hn = NULL; struct fuse_vnode_data *fvdat = NULL; struct fuse_data *mntdata = NULL; fuse_device_t dummy_device; struct fuse_abi_data fa; enum vtype vtyp; fuse_abi_data_init(&fa, feo->fad_version, fuse_entry_out_get_attr(feo)); vtyp = IFTOVT(fuse_attr_get_mode(&fa)); if ((vtyp >= VBAD) || (vtyp == VNON)) { return EINVAL; } int markroot = (flags & FN_IS_ROOT) ? 1 : 0; uint64_t size = (flags & FN_IS_ROOT) ? 0 : fuse_attr_get_size(&fa); uint32_t rdev = (flags & FN_IS_ROOT) ? 0 : fuse_attr_get_rdev(&fa); uint64_t generation = fuse_entry_out_get_generation(feo); mntdata = fuse_get_mpdata(mp); dummy_device = mntdata->fdev; err = HNodeLookupCreatingIfNecessary(dummy_device, fuse_entry_out_get_nodeid(feo), 0 /* fork index */, &hn, &vn); if ((err == 0) && (vn == NULL)) { struct vnode_fsparam params; fvdat = (struct fuse_vnode_data *)FSNodeGenericFromHNode(hn); if (!fvdat->fInitialised) { fvdat->fInitialised = true; /* self */ fvdat->vp = NULLVP; /* hold on */ fvdat->nodeid = fuse_entry_out_get_nodeid(feo); fvdat->generation = generation; /* parent */ fvdat->parentvp = dvp; if (dvp) { fvdat->parent_nodeid = VTOI(dvp); } else { fvdat->parent_nodeid = 0; } /* I/O */ { int k; for (k = 0; k < FUFH_MAXTYPE; k++) { FUFH_USE_RESET(&(fvdat->fufh[k])); } } /* flags */ fvdat->flag = flags; fvdat->c_flag = 0; /* meta */ /* XXX: truncation */ fvdat->entry_valid.tv_sec = (time_t)fuse_entry_out_get_entry_valid(feo); fvdat->entry_valid.tv_nsec = fuse_entry_out_get_entry_valid_nsec(feo); /* XXX: truncation */ fvdat->attr_valid.tv_sec = 0; fvdat->attr_valid.tv_nsec = 0; /* XXX: truncation */ fvdat->modify_time.tv_sec = (time_t)fuse_attr_get_mtime(&fa); fvdat->modify_time.tv_nsec = fuse_attr_get_mtimensec(&fa); fvdat->filesize = size; fvdat->nlookup = 0; fvdat->vtype = vtyp; /* locking */ fvdat->createlock = lck_mtx_alloc_init(fuse_lock_group, fuse_lock_attr); fvdat->creator = current_thread(); #if M_OSXFUSE_ENABLE_TSLOCKING fvdat->nodelock = lck_rw_alloc_init(fuse_lock_group, fuse_lock_attr); fvdat->nodelockowner = NULL; fvdat->truncatelock = lck_rw_alloc_init(fuse_lock_group, fuse_lock_attr); #endif } if (err == 0) { params.vnfs_mp = mp; params.vnfs_vtype = vtyp; params.vnfs_str = NULL; params.vnfs_dvp = dvp; /* NULLVP for the root vnode */ params.vnfs_fsnode = hn; #if M_OSXFUSE_ENABLE_SPECFS if ((vtyp == VBLK) || (vtyp == VCHR)) { params.vnfs_vops = fuse_spec_operations; params.vnfs_rdev = (dev_t)rdev; #else if (0) { #endif #if M_OSXFUSE_ENABLE_FIFOFS } else if (vtyp == VFIFO) { params.vnfs_vops = fuse_fifo_operations; params.vnfs_rdev = 0; (void)rdev; #else } else if (0) { #endif } else { params.vnfs_vops = fuse_vnode_operations; params.vnfs_rdev = 0; (void)rdev; } params.vnfs_marksystem = 0; params.vnfs_cnp = NULL; params.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE; params.vnfs_filesize = size; params.vnfs_markroot = markroot; #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif err = vnode_create(VNCREATE_FLAVOR, (uint32_t)sizeof(params), ¶ms, &vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif } if (err == 0) { if (markroot) { fvdat->parentvp = vn; } else { fvdat->parentvp = dvp; } if (oflags) { *oflags |= MAKEENTRY; } /* Need VT_OSXFUSE from xnu */ vnode_settag(vn, VT_OTHER); cache_attrs(vn, fuse_entry_out, feo); HNodeAttachVNodeSucceeded(hn, 0 /* forkIndex */, vn); FUSE_OSAddAtomic(1, (SInt32 *)&fuse_vnodes_current); } else { if (HNodeAttachVNodeFailed(hn, 0 /* forkIndex */)) { FSNodeScrub(fvdat); HNodeScrubDone(hn); } } } if (err == 0) { if (vnode_vtype(vn) != vtyp) { IOLog("osxfuse: vnode changed type behind us (old=%d, new=%d)\n", vnode_vtype(vn), vtyp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif fuse_internal_vnode_disappear(vn, context, REVOKE_SOFT); vnode_put(vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif err = EIO; } else if (VTOFUD(vn)->generation != generation) { IOLog("osxfuse: vnode changed generation\n"); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif fuse_internal_vnode_disappear(vn, context, REVOKE_SOFT); vnode_put(vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif err = ESTALE; } } if (err == 0) { *vnPtr = vn; } /* assert((err == 0) == (*vnPtr != NULL); */ return err; } int fuse_vget_i(vnode_t *vpp, uint32_t flags, struct fuse_abi_data *feo, struct componentname *cnp, vnode_t dvp, mount_t mp, vfs_context_t context) { int err = 0; if (!feo) { return EINVAL; } err = FSNodeGetOrCreateFileVNodeByID(vpp, flags, feo, mp, dvp, context, NULL); if (err) { return err; } if (!fuse_isnovncache_mp(mp) && (cnp->cn_flags & MAKEENTRY)) { fuse_vncache_enter(dvp, *vpp, cnp); } /* found: */ VTOFUD(*vpp)->nlookup++; return 0; }
int fuse_internal_access(struct vnode *vp, mode_t mode, struct fuse_access_param *facp, struct thread *td, struct ucred *cred) { int err = 0; uint32_t mask = 0; int dataflags; int vtype; struct mount *mp; struct fuse_dispatcher fdi; struct fuse_access_in *fai; struct fuse_data *data; /* NOT YET DONE */ /* * If this vnop gives you trouble, just return 0 here for a lazy * kludge. */ /* return 0;*/ fuse_trace_printf_func(); mp = vnode_mount(vp); vtype = vnode_vtype(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; if ((mode & VWRITE) && vfs_isrdonly(mp)) { return EACCES; } /* Unless explicitly permitted, deny everyone except the fs owner. */ if (vnode_isvroot(vp) && !(facp->facc_flags & FACCESS_NOCHECKSPY)) { if (!(dataflags & FSESS_DAEMON_CAN_SPY)) { int denied = fuse_match_cred(data->daemoncred, cred); if (denied) { return EPERM; } } facp->facc_flags |= FACCESS_NOCHECKSPY; } if (!(facp->facc_flags & FACCESS_DO_ACCESS)) { return 0; } if (((vtype == VREG) && (mode & VEXEC))) { #ifdef NEED_MOUNT_ARGUMENT_FOR_THIS /* Let the kernel handle this through open / close heuristics.*/ return ENOTSUP; #else /* Let the kernel handle this. */ return 0; #endif } if (!fsess_isimpl(mp, FUSE_ACCESS)) { /* Let the kernel handle this. */ return 0; } if (dataflags & FSESS_DEFAULT_PERMISSIONS) { /* Let the kernel handle this. */ return 0; } if ((mode & VADMIN) != 0) { err = priv_check_cred(cred, PRIV_VFS_ADMIN, 0); if (err) { return err; } } if ((mode & (VWRITE | VAPPEND | VADMIN)) != 0) { mask |= W_OK; } if ((mode & VREAD) != 0) { mask |= R_OK; } if ((mode & VEXEC) != 0) { mask |= X_OK; } bzero(&fdi, sizeof(fdi)); fdisp_init(&fdi, sizeof(*fai)); fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred); fai = fdi.indata; fai->mask = F_OK; fai->mask |= mask; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_ACCESS); err = 0; } return err; }
static void handle_capabilities_and_attributes(mount_t mp, struct vfs_attr *attr) { struct fuse_data *data = fuse_get_mpdata(mp); if (!data) { panic("fuse4x: no private data for mount point?"); } attr->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] = 0 // | VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_SYMBOLICLINKS /* * Note that we don't really have hard links in a fuse4x file system * unless the user file system daemon provides persistent/consistent * inode numbers. Maybe instead of returning the "wrong" answer here * we should just deny knowledge of this capability in the valid bits * below. */ | VOL_CAP_FMT_HARDLINKS // | VOL_CAP_FMT_JOURNAL // | VOL_CAP_FMT_JOURNAL_ACTIVE | VOL_CAP_FMT_NO_ROOT_TIMES // | VOL_CAP_FMT_SPARSE_FILES // | VOL_CAP_FMT_ZERO_RUNS // | VOL_CAP_FMT_CASE_SENSITIVE | VOL_CAP_FMT_CASE_PRESERVING | VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_2TB_FILESIZE // | VOL_CAP_FMT_OPENDENYMODES // | VOL_CAP_FMT_HIDDEN_FILES // | VOL_CAP_FMT_PATH_FROM_ID ; attr->f_capabilities.valid[VOL_CAPABILITIES_FORMAT] = 0 | VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_SYMBOLICLINKS | VOL_CAP_FMT_HARDLINKS | VOL_CAP_FMT_JOURNAL | VOL_CAP_FMT_JOURNAL_ACTIVE | VOL_CAP_FMT_NO_ROOT_TIMES | VOL_CAP_FMT_SPARSE_FILES | VOL_CAP_FMT_ZERO_RUNS | VOL_CAP_FMT_CASE_SENSITIVE | VOL_CAP_FMT_CASE_PRESERVING | VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_2TB_FILESIZE | VOL_CAP_FMT_OPENDENYMODES | VOL_CAP_FMT_HIDDEN_FILES | VOL_CAP_FMT_PATH_FROM_ID ; attr->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] = 0 // | VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST // | VOL_CAP_INT_NFSEXPORT // | VOL_CAP_INT_READDIRATTR // | VOL_CAP_INT_EXCHANGEDATA // | VOL_CAP_INT_COPYFILE // | VOL_CAP_INT_ALLOCATE // | VOL_CAP_INT_VOL_RENAME | VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK | VOL_CAP_INT_EXTENDED_SECURITY // | VOL_CAP_INT_USERACCESS // | VOL_CAP_INT_MANLOCK // | VOL_CAP_INT_EXTENDED_ATTR // | VOL_CAP_INT_NAMEDSTREAMS ; if (data->dataflags & FSESS_NATIVE_XATTR) { attr->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_EXTENDED_ATTR; } /* Don't set the EXCHANGEDATA capability if it's known not to be * implemented in the FUSE daemon. */ if (fuse_implemented(data, FSESS_NOIMPLBIT(EXCHANGE))) { attr->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_EXCHANGEDATA; } attr->f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] = 0 | VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT | VOL_CAP_INT_READDIRATTR | VOL_CAP_INT_EXCHANGEDATA | VOL_CAP_INT_COPYFILE | VOL_CAP_INT_ALLOCATE | VOL_CAP_INT_VOL_RENAME | VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK | VOL_CAP_INT_EXTENDED_SECURITY | VOL_CAP_INT_USERACCESS | VOL_CAP_INT_MANLOCK | VOL_CAP_INT_EXTENDED_ATTR | VOL_CAP_INT_NAMEDSTREAMS ; attr->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED1] = 0; attr->f_capabilities.valid[VOL_CAPABILITIES_RESERVED1] = 0; attr->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED2] = 0; attr->f_capabilities.valid[VOL_CAPABILITIES_RESERVED2] = 0; VFSATTR_SET_SUPPORTED(attr, f_capabilities); attr->f_attributes.validattr.commonattr = 0 | ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | ATTR_CMN_OBJTYPE // | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID // | ATTR_CMN_OBJPERMANENTID | ATTR_CMN_PAROBJID // | ATTR_CMN_SCRIPT // | ATTR_CMN_CRTIME | ATTR_CMN_MODTIME // | ATTR_CMN_CHGTIME // | ATTR_CMN_ACCTIME // | ATTR_CMN_BKUPTIME // | ATTR_CMN_FNDRINFO | ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | ATTR_CMN_FLAGS // | ATTR_CMN_USERACCESS | ATTR_CMN_EXTENDED_SECURITY // | ATTR_CMN_UUID // | ATTR_CMN_GRPUUID // | ATTR_CMN_FILEID // | ATTR_CMN_PARENTID ; attr->f_attributes.validattr.volattr = 0 | ATTR_VOL_FSTYPE | ATTR_VOL_SIGNATURE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | ATTR_VOL_SPACEAVAIL // | ATTR_VOL_MINALLOCATION // | ATTR_VOL_ALLOCATIONCLUMP | ATTR_VOL_IOBLOCKSIZE // | ATTR_VOL_OBJCOUNT | ATTR_VOL_FILECOUNT // | ATTR_VOL_DIRCOUNT // | ATTR_VOL_MAXOBJCOUNT | ATTR_VOL_MOUNTPOINT | ATTR_VOL_NAME | ATTR_VOL_MOUNTFLAGS | ATTR_VOL_MOUNTEDDEVICE // | ATTR_VOL_ENCODINGSUSED | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES // | ATTR_VOL_INFO ; attr->f_attributes.validattr.dirattr = 0 | ATTR_DIR_LINKCOUNT // | ATTR_DIR_ENTRYCOUNT // | ATTR_DIR_MOUNTSTATUS ; attr->f_attributes.validattr.fileattr = 0 | ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | ATTR_FILE_ALLOCSIZE | ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE // | ATTR_FILE_FORKCOUNT // | ATTR_FILE_FORKLIST | ATTR_FILE_DATALENGTH | ATTR_FILE_DATAALLOCSIZE // | ATTR_FILE_RSRCLENGTH // | ATTR_FILE_RSRCALLOCSIZE ; attr->f_attributes.validattr.forkattr = 0; // | ATTR_FORK_TOTALSIZE // | ATTR_FORK_ALLOCSIZE ; // Handle some special cases if (!(data->dataflags & FSESS_CASE_INSENSITIVE)) { attr->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] |= VOL_CAP_FMT_CASE_SENSITIVE; } /* if (data->dataflags & FSESS_VOL_RENAME) { attr->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_VOL_RENAME; } else { fuse_clear_implemented(data, FSESS_NOIMPLBIT(SETVOLNAME)); } */ /* Not yet. */ fuse_clear_implemented(data, FSESS_NOIMPLBIT(SETVOLNAME)); if (data->dataflags & FSESS_XTIMES) { attr->f_attributes.validattr.commonattr |= (ATTR_CMN_BKUPTIME | ATTR_CMN_CHGTIME | ATTR_CMN_CRTIME); } else { fuse_clear_implemented(data, FSESS_NOIMPLBIT(GETXTIMES)); } // All attributes that we do support, we support natively. attr->f_attributes.nativeattr.commonattr = \ attr->f_attributes.validattr.commonattr; attr->f_attributes.nativeattr.volattr = \ attr->f_attributes.validattr.volattr; attr->f_attributes.nativeattr.dirattr = \ attr->f_attributes.validattr.dirattr; attr->f_attributes.nativeattr.fileattr = \ attr->f_attributes.validattr.fileattr; attr->f_attributes.nativeattr.forkattr = \ attr->f_attributes.validattr.forkattr; VFSATTR_SET_SUPPORTED(attr, f_attributes); }
static errno_t fuse_vfsop_unmount(mount_t mp, int mntflags, vfs_context_t context) { int err = 0; int flags = 0; fuse_device_t fdev; struct fuse_data *data; struct fuse_dispatcher fdi; vnode_t fuse_rootvp = NULLVP; fuse_trace_printf_vfsop(); if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; } data = fuse_get_mpdata(mp); if (!data) { panic("fuse4x: no mount private data in vfs_unmount"); } #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif fdev = data->fdev; if (fdata_dead_get(data)) { /* * If the file system daemon is dead, it's pointless to try to do * any unmount-time operations that go out to user space. Therefore, * we pretend that this is a force unmount. However, this isn't of much * use. That's because if any non-root vnode is in use, the vflush() * that the kernel does before calling our VFS_UNMOUNT will fail * if the original unmount wasn't forcible already. That earlier * vflush is called with SKIPROOT though, so it wouldn't bail out * on the root vnode being in use. * * If we want, we could set FORCECLOSE here so that a non-forced * unmount will be "upgraded" to a forced unmount if the root vnode * is busy (you are cd'd to the mount point, for example). It's not * quite pure to do that though. * * flags |= FORCECLOSE; * log("fuse4x: forcing unmount on a dead file system\n"); */ } else if (!(data->dataflags & FSESS_INITED)) { flags |= FORCECLOSE; log("fuse4x: forcing unmount on not-yet-alive file system\n"); fdata_set_dead(data); } fuse_rootvp = data->rootvp; fuse_trace_printf("%s: Calling vflush(mp, fuse_rootvp, flags=0x%X);\n", __FUNCTION__, flags); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif err = vflush(mp, fuse_rootvp, flags); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif fuse_trace_printf("%s: Done.\n", __FUNCTION__); if (err) { #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif return err; } if (vnode_isinuse(fuse_rootvp, 1) && !(flags & FORCECLOSE)) { #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif return EBUSY; } if (fdata_dead_get(data)) { goto alreadydead; } fdisp_init(&fdi, 0 /* no data to send along */); fdisp_make(&fdi, FUSE_DESTROY, mp, FUSE_ROOT_ID, context); fuse_trace_printf("%s: Waiting for reply from FUSE_DESTROY.\n", __FUNCTION__); err = fdisp_wait_answ(&fdi); fuse_trace_printf("%s: Reply received.\n", __FUNCTION__); if (!err) { fuse_ticket_drop(fdi.tick); } /* * Note that dounmount() signals a VQ_UNMOUNT VFS event. */ fdata_set_dead(data); alreadydead: fuse_trace_printf("%s: Calling vnode_rele(fuse_rootp);\n", __FUNCTION__); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif vnode_rele(fuse_rootvp); /* We got this reference in fuse_vfsop_mount(). */ #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif fuse_trace_printf("%s: Done.\n", __FUNCTION__); data->rootvp = NULLVP; fuse_trace_printf("%s: Calling vflush(mp, NULLVP, FORCECLOSE);\n", __FUNCTION__); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif (void)vflush(mp, NULLVP, FORCECLOSE); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif fuse_trace_printf("%s: Done.\n", __FUNCTION__); fuse_lck_mtx_lock(fdev->mtx); vfs_setfsprivate(mp, NULL); data->dataflags &= ~FSESS_MOUNTED; OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif if (!(data->dataflags & FSESS_OPENED)) { /* fdev->data was left for us to clean up */ fuse_device_close_final(fdev); /* fdev->data is gone now */ } fuse_lck_mtx_unlock(fdev->mtx); return 0; }
/* struct vnop_getattr_args { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct thread *td = curthread; struct fuse_vnode_data *fvdat = VTOFUD(vp); int err = 0; int dataflags; struct fuse_dispatcher fdi; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags; /* Note that we are not bailing out on a dead file system just yet. */ if (!(dataflags & FSESS_INITED)) { if (!vnode_isvroot(vp)) { fdata_set_dead(fuse_get_mpdata(vnode_mount(vp))); err = ENOTCONN; debug_printf("fuse_getattr b: returning ENOTCONN\n"); return err; } else { goto fake; } } fdisp_init(&fdi, 0); if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) { if ((err == ENOTCONN) && vnode_isvroot(vp)) { /* see comment at similar place in fuse_statfs() */ fdisp_destroy(&fdi); goto fake; } if (err == ENOENT) { fuse_internal_vnode_disappear(vp); } goto out; } cache_attrs(vp, (struct fuse_attr_out *)fdi.answ); if (vap != VTOVA(vp)) { memcpy(vap, VTOVA(vp), sizeof(*vap)); } if (vap->va_type != vnode_vtype(vp)) { fuse_internal_vnode_disappear(vp); err = ENOENT; goto out; } if ((fvdat->flag & FN_SIZECHANGE) != 0) vap->va_size = fvdat->filesize; if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) { /* * This is for those cases when the file size changed without us * knowing, and we want to catch up. */ off_t new_filesize = ((struct fuse_attr_out *) fdi.answ)->attr.size; if (fvdat->filesize != new_filesize) { fuse_vnode_setsize(vp, cred, new_filesize); } } debug_printf("fuse_getattr e: returning 0\n"); out: fdisp_destroy(&fdi); return err; fake: bzero(vap, sizeof(*vap)); vap->va_type = vnode_vtype(vp); return 0; }
__private_extern__ int fuse_internal_remove(vnode_t dvp, vnode_t vp, struct componentname *cnp, enum fuse_opcode op, vfs_context_t context) { struct fuse_dispatcher fdi; struct vnode_attr *vap = VTOVA(vp); int need_invalidate = 0; uint64_t target_nlink = 0; mount_t mp = vnode_mount(vp); int err = 0; fdisp_init(&fdi, cnp->cn_namelen + 1); fdisp_make_vp(&fdi, op, dvp, context); memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; if ((vap->va_nlink > 1) && vnode_isreg(vp)) { need_invalidate = 1; target_nlink = vap->va_nlink; } if (!(err = fdisp_wait_answ(&fdi))) { fuse_ticket_drop(fdi.tick); } fuse_invalidate_attr(dvp); fuse_invalidate_attr(vp); /* * XXX: M_MACFUSE_INVALIDATE_CACHED_VATTRS_UPON_UNLINK * * Consider the case where vap->va_nlink > 1 for the entity being * removed. In our world, other in-memory vnodes that share a link * count each with this one may not know right way that this one just * got deleted. We should let them know, say, through a vnode_iterate() * here and a callback that does fuse_invalidate_attr(vp) on each * relevant vnode. */ if (need_invalidate && !err) { if (!vfs_busy(mp, LK_NOWAIT)) { #if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK struct fuse_data *data = fuse_get_mpdata(mp); fuse_biglock_unlock(data->biglock); #endif vnode_iterate(mp, 0, fuse_internal_remove_callback, (void *)&target_nlink); #if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK fuse_biglock_lock(data->biglock); #endif vfs_unbusy(mp); } else { IOLog("MacFUSE: skipping link count fixup upon remove\n"); } } return err; }
/* * Because of the vagaries of how a filehandle can be used, we try not to * be too smart in here (we try to be smart elsewhere). It is required that * you come in here only if you really do not have the said filehandle--else * we panic. */ int fuse_filehandle_get(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type, int mode) { struct fuse_dispatcher fdi; struct fuse_abi_data foi; struct fuse_abi_data foo; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); int err = 0; int oflags = 0; int op = FUSE_OPEN; fuse_trace_printf("fuse_filehandle_get(vp=%p, fufh_type=%d, mode=%x)\n", vp, fufh_type, mode); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("osxfuse: filehandle_get called despite valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } /* * Note that this means we are effectively FILTERING OUT open() flags. */ (void)mode; oflags = fuse_filehandle_xlate_to_oflags(fufh_type); if (vnode_isdir(vp)) { op = FUSE_OPENDIR; if (fufh_type != FUFH_RDONLY) { IOLog("osxfuse: non-rdonly fufh requested for directory\n"); fufh_type = FUFH_RDONLY; } } if (vnode_islnk(vp) && (mode & O_SYMLINK)) { oflags |= O_SYMLINK; } if ((mode & O_TRUNC) && (data->dataflags & FSESS_ATOMIC_O_TRUNC)) { oflags |= O_TRUNC; } fdisp_init_abi(&fdi, fuse_open_in, data); fdisp_make_vp(&fdi, op, vp, context); fuse_abi_data_init(&foi, DATOI(data), fdi.indata); fuse_open_in_set_flags(&foi, oflags); FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_upcall_count); err = fdisp_wait_answ(&fdi); if (err) { #if M_OSXFUSE_ENABLE_UNSUPPORTED const char *vname = vnode_getname(vp); #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ if (err == ENOENT) { /* * See comment in fuse_vnop_reclaim(). */ cache_purge(vp); } #if M_OSXFUSE_ENABLE_UNSUPPORTED IOLog("osxfuse: filehandle_get: failed for %s " "(type=%d, err=%d, caller=%p)\n", (vname) ? vname : "?", fufh_type, err, __builtin_return_address(0)); if (vname) { vnode_putname(vname); } #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ if (err == ENOENT) { #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif fuse_internal_vnode_disappear(vp, context, REVOKE_SOFT); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif } return err; } FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_current); fuse_abi_data_init(&foo, DATOI(data), fdi.answ); fufh->fh_id = fuse_open_out_get_fh(&foo); fufh->open_count = 1; fufh->open_flags = oflags; fufh->fuse_open_flags = fuse_open_out_get_open_flags(&foo); fufh->aux_count = 0; fuse_ticket_release(fdi.tick); return 0; }
static errno_t fuse_vfsop_getattr(mount_t mp, struct vfs_attr *attr, vfs_context_t context) { int err = 0; bool deading = false, faking = false; struct fuse_dispatcher fdi; struct fuse_statfs_out *fsfo; struct fuse_statfs_out faked; struct fuse_data *data; fuse_trace_printf_vfsop(); data = fuse_get_mpdata(mp); if (!data) { panic("fuse4x: no private data for mount point?"); } if (!(data->dataflags & FSESS_INITED)) { // coreservices process requests ATTR_VOL_CAPABILITIES on the mountpoint right before // returning from mount() syscall. We need to fake the output because daemon might // not be ready to response yet (and deadlock will happen). faking = true; goto dostatfs; } fdisp_init(&fdi, 0); fdisp_make(&fdi, FUSE_STATFS, mp, FUSE_ROOT_ID, context); if ((err = fdisp_wait_answ(&fdi))) { // If we cannot communicate with the daemon (most likely because // it's dead), we still want to portray that we are a bonafide // file system so that we can be gracefully unmounted. if (err == ENOTCONN) { deading = faking = true; goto dostatfs; } return err; } dostatfs: if (faking) { bzero(&faked, sizeof(faked)); fsfo = &faked; } else { fsfo = fdi.answ; } if (fsfo->st.bsize == 0) { fsfo->st.bsize = FUSE_DEFAULT_IOSIZE; } if (fsfo->st.frsize == 0) { fsfo->st.frsize = FUSE_DEFAULT_BLOCKSIZE; } /* optimal transfer block size; will go into f_iosize in the kernel */ fsfo->st.bsize = fuse_round_size(fsfo->st.bsize, FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE); /* file system fragment size; will go into f_bsize in the kernel */ fsfo->st.frsize = fuse_round_size(fsfo->st.frsize, FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE); /* We must have: f_iosize >= f_bsize (fsfo->st.bsize >= fsfo->st_frsize) */ if (fsfo->st.bsize < fsfo->st.frsize) { fsfo->st.bsize = fsfo->st.frsize; } /* * TBD: Possibility: * * For actual I/O to fuse4x's "virtual" storage device, we use * data->blocksize and data->iosize. These are really meant to be * constant across the lifetime of a single mount. If necessary, we * can experiment by updating the mount point's stat with the frsize * and bsize values we come across here. */ /* * FUSE user daemon will (might) give us this: * * __u64 blocks; // total data blocks in the file system * __u64 bfree; // free blocks in the file system * __u64 bavail; // free blocks available to non-superuser * __u64 files; // total file nodes in the file system * __u64 ffree; // free file nodes in the file system * __u32 bsize; // preferred/optimal file system block size * __u32 namelen; // maximum length of filenames * __u32 frsize; // fundamental file system block size * * On Mac OS X, we will map this data to struct vfs_attr as follows: * * Mac OS X FUSE * -------- ---- * uint64_t f_supported <- // handled here * uint64_t f_active <- // handled here * uint64_t f_objcount <- - * uint64_t f_filecount <- files * uint64_t f_dircount <- - * uint32_t f_bsize <- frsize * size_t f_iosize <- bsize * uint64_t f_blocks <- blocks * uint64_t f_bfree <- bfree * uint64_t f_bavail <- bavail * uint64_t f_bused <- blocks - bfree * uint64_t f_files <- files * uint64_t f_ffree <- ffree * fsid_t f_fsid <- // handled elsewhere * uid_t f_owner <- // handled elsewhere * ... capabilities <- // handled here * ... attributes <- // handled here * f_create_time <- - * f_modify_time <- - * f_access_time <- - * f_backup_time <- - * uint32_t f_fssubtype <- // daemon provides * char *f_vol_name <- // handled here * uint16_t f_signature <- // handled here * uint16_t f_carbon_fsid <- // handled here */ VFSATTR_RETURN(attr, f_filecount, fsfo->st.files); VFSATTR_RETURN(attr, f_bsize, fsfo->st.frsize); VFSATTR_RETURN(attr, f_iosize, fsfo->st.bsize); VFSATTR_RETURN(attr, f_blocks, fsfo->st.blocks); VFSATTR_RETURN(attr, f_bfree, fsfo->st.bfree); VFSATTR_RETURN(attr, f_bavail, fsfo->st.bavail); VFSATTR_RETURN(attr, f_bused, (fsfo->st.blocks - fsfo->st.bfree)); VFSATTR_RETURN(attr, f_files, fsfo->st.files); VFSATTR_RETURN(attr, f_ffree, fsfo->st.ffree); /* f_fsid and f_owner handled elsewhere. */ /* Handle capabilities and attributes. */ handle_capabilities_and_attributes(mp, attr); VFSATTR_RETURN(attr, f_create_time, kZeroTime); VFSATTR_RETURN(attr, f_modify_time, kZeroTime); VFSATTR_RETURN(attr, f_access_time, kZeroTime); VFSATTR_RETURN(attr, f_backup_time, kZeroTime); if (deading) { VFSATTR_RETURN(attr, f_fssubtype, (uint32_t)FUSE_FSSUBTYPE_INVALID); } else { VFSATTR_RETURN(attr, f_fssubtype, data->fssubtype); } /* Daemon needs to pass this. */ if (VFSATTR_IS_ACTIVE(attr, f_vol_name)) { if (data->volname[0] != 0) { strncpy(attr->f_vol_name, data->volname, MAXPATHLEN); attr->f_vol_name[MAXPATHLEN - 1] = 0; VFSATTR_SET_SUPPORTED(attr, f_vol_name); } } VFSATTR_RETURN(attr, f_signature, OSSwapBigToHostInt16(FUSEFS_SIGNATURE)); VFSATTR_RETURN(attr, f_carbon_fsid, 0); if (!faking) fuse_ticket_drop(fdi.tick); return 0; }
static int fuse_vfsop_unmount(struct mount *mp, int mntflags) { int err = 0; int flags = 0; struct cdev *fdev; struct fuse_data *data; struct fuse_dispatcher fdi; struct thread *td = curthread; fuse_trace_printf_vfsop(); if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; } data = fuse_get_mpdata(mp); if (!data) { panic("no private data for mount point?"); } /* There is 1 extra root vnode reference (mp->mnt_data). */ FUSE_LOCK(); if (data->vroot != NULL) { struct vnode *vroot = data->vroot; data->vroot = NULL; FUSE_UNLOCK(); vrele(vroot); } else FUSE_UNLOCK(); err = vflush(mp, 0, flags, td); if (err) { debug_printf("vflush failed"); return err; } if (fdata_get_dead(data)) { goto alreadydead; } fdisp_init(&fdi, 0); fdisp_make(&fdi, FUSE_DESTROY, mp, 0, td, NULL); err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); fdata_set_dead(data); alreadydead: FUSE_LOCK(); data->mp = NULL; fdev = data->fdev; fdata_trydestroy(data); FUSE_UNLOCK(); MNT_ILOCK(mp); mp->mnt_data = NULL; mp->mnt_flag &= ~MNT_LOCAL; MNT_IUNLOCK(mp); dev_rel(fdev); return 0; }