int fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type, fuse_op_waitfor_t waitfor) { struct fuse_data *data; struct fuse_dispatcher fdi; struct fuse_abi_data fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int err = 0; int op = FUSE_RELEASE; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("osxfuse: filehandle_put called on a valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } data = fuse_get_mpdata(vnode_mount(vp)); if (vnode_isdir(vp)) { op = FUSE_RELEASEDIR; } fdisp_init_abi(&fdi, fuse_release_in, data); fdisp_make_vp(&fdi, op, vp, context); fuse_abi_data_init(&fri, DATOI(data), fdi.indata); fuse_release_in_set_fh(&fri, fufh->fh_id); fuse_release_in_set_flags(&fri, fufh->open_flags); if (waitfor == FUSE_OP_FOREGROUNDED) { err = fdisp_wait_answ(&fdi); if (err) { goto out; } } else { fuse_insert_message(fdi.tick); } fuse_ticket_release(fdi.tick); out: FUSE_OSAddAtomic(-1, (SInt32 *)&fuse_fh_current); fuse_invalidate_attr(vp); return err; }
/* * Because of the vagaries of how a filehandle can be used, we try not to * be too smart in here (we try to be smart elsewhere). It is required that * you come in here only if you really do not have the said filehandle--else * we panic. */ int fuse_filehandle_get(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type, int mode) { struct fuse_dispatcher fdi; struct fuse_abi_data foi; struct fuse_abi_data foo; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); int err = 0; int oflags = 0; int op = FUSE_OPEN; fuse_trace_printf("fuse_filehandle_get(vp=%p, fufh_type=%d, mode=%x)\n", vp, fufh_type, mode); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("osxfuse: filehandle_get called despite valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } /* * Note that this means we are effectively FILTERING OUT open() flags. */ (void)mode; oflags = fuse_filehandle_xlate_to_oflags(fufh_type); if (vnode_isdir(vp)) { op = FUSE_OPENDIR; if (fufh_type != FUFH_RDONLY) { IOLog("osxfuse: non-rdonly fufh requested for directory\n"); fufh_type = FUFH_RDONLY; } } if (vnode_islnk(vp) && (mode & O_SYMLINK)) { oflags |= O_SYMLINK; } if ((mode & O_TRUNC) && (data->dataflags & FSESS_ATOMIC_O_TRUNC)) { oflags |= O_TRUNC; } fdisp_init_abi(&fdi, fuse_open_in, data); fdisp_make_vp(&fdi, op, vp, context); fuse_abi_data_init(&foi, DATOI(data), fdi.indata); fuse_open_in_set_flags(&foi, oflags); FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_upcall_count); err = fdisp_wait_answ(&fdi); if (err) { #if M_OSXFUSE_ENABLE_UNSUPPORTED const char *vname = vnode_getname(vp); #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ if (err == ENOENT) { /* * See comment in fuse_vnop_reclaim(). */ cache_purge(vp); } #if M_OSXFUSE_ENABLE_UNSUPPORTED IOLog("osxfuse: filehandle_get: failed for %s " "(type=%d, err=%d, caller=%p)\n", (vname) ? vname : "?", fufh_type, err, __builtin_return_address(0)); if (vname) { vnode_putname(vname); } #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ if (err == ENOENT) { #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif fuse_internal_vnode_disappear(vp, context, REVOKE_SOFT); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif } return err; } FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_current); fuse_abi_data_init(&foo, DATOI(data), fdi.answ); fufh->fh_id = fuse_open_out_get_fh(&foo); fufh->open_count = 1; fufh->open_flags = oflags; fufh->fuse_open_flags = fuse_open_out_get_open_flags(&foo); fufh->aux_count = 0; fuse_ticket_release(fdi.tick); return 0; }
static errno_t fuse_vfsop_setattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t context) { int error = 0; fuse_trace_printf_vfsop(); kauth_cred_t cred = vfs_context_ucred(context); if (!fuse_vfs_context_issuser(context) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner)) { return EACCES; } struct fuse_data *data = fuse_get_mpdata(mp); if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) { if (!fuse_implemented(data, FSESS_NOIMPLBIT(SETVOLNAME))) { error = ENOTSUP; goto out; } if (fsap->f_vol_name[0] == 0) { error = EINVAL; goto out; } size_t namelen = strlen(fsap->f_vol_name); if (namelen >= MAXPATHLEN) { error = ENAMETOOLONG; goto out; } vnode_t root_vp; error = fuse_vfsop_root(mp, &root_vp, context); if (error) { goto out; } struct fuse_dispatcher fdi; fdisp_init(&fdi, namelen + 1); fdisp_make_vp(&fdi, FUSE_SETVOLNAME, root_vp, context); memcpy((char *)fdi.indata, fsap->f_vol_name, namelen); ((char *)fdi.indata)[namelen] = '\0'; error = fdisp_wait_answ(&fdi); if (!error) { fuse_ticket_release(fdi.tick); } #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif (void)vnode_put(root_vp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif if (error) { if (error == ENOSYS) { error = ENOTSUP; fuse_clear_implemented(data, FSESS_NOIMPLBIT(SETVOLNAME)); } goto out; } copystr(fsap->f_vol_name, data->volname, MAXPATHLEN - 1, &namelen); bzero(data->volname + namelen, MAXPATHLEN - namelen); VFSATTR_SET_SUPPORTED(fsap, f_vol_name); } out: return error; }
static errno_t fuse_vfsop_unmount(mount_t mp, int mntflags, vfs_context_t context) { int err = 0; int flags = 0; fuse_device_t fdev; struct fuse_data *data; struct fuse_dispatcher fdi; vnode_t fuse_rootvp = NULLVP; fuse_trace_printf_vfsop(); if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; } data = fuse_get_mpdata(mp); if (!data) { panic("OSXFUSE: no mount private data in vfs_unmount"); } #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif fdev = data->fdev; if (fdata_dead_get(data)) { /* * If the file system daemon is dead, it's pointless to try to do * any unmount-time operations that go out to user space. Therefore, * we pretend that this is a force unmount. However, this isn't of much * use. That's because if any non-root vnode is in use, the vflush() * that the kernel does before calling our VFS_UNMOUNT will fail * if the original unmount wasn't forcible already. That earlier * vflush is called with SKIPROOT though, so it wouldn't bail out * on the root vnode being in use. * * If we want, we could set FORCECLOSE here so that a non-forced * unmount will be "upgraded" to a forced unmount if the root vnode * is busy (you are cd'd to the mount point, for example). It's not * quite pure to do that though. * * flags |= FORCECLOSE; * IOLog("OSXFUSE: forcing unmount on a dead file system\n"); */ } else if (!(data->dataflags & FSESS_INITED)) { flags |= FORCECLOSE; IOLog("OSXFUSE: forcing unmount on not-yet-alive file system\n"); fdata_set_dead(data, false); } fuse_rootvp = data->rootvp; #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif err = vflush(mp, fuse_rootvp, flags); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif if (err) { #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif return err; } if (vnode_isinuse(fuse_rootvp, 1) && !(flags & FORCECLOSE)) { #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif return EBUSY; } #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif vnode_rele(fuse_rootvp); /* We got this reference in fuse_vfsop_mount(). */ #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif data->rootvp = NULLVP; #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif (void)vflush(mp, NULLVP, FORCECLOSE); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif /* * Set mount state to FM_UNMOUNTING. * * Note: fdata_set_dead will signal VQ_DEAD if it is called for a volume, * that is still mounted. */ fuse_device_lock(fdev); data->mount_state = FM_UNMOUNTING; fuse_device_unlock(fdev); if (!fdata_dead_get(data)) { fdisp_init(&fdi, 0 /* no data to send along */); fdisp_make(&fdi, FUSE_DESTROY, mp, FUSE_ROOT_ID, context); err = fdisp_wait_answ(&fdi); if (!err) { fuse_ticket_release(fdi.tick); } /* Note that dounmount() signals a VQ_UNMOUNT VFS event */ fdata_set_dead(data, false); } fuse_device_lock(fdev); vfs_setfsprivate(mp, NULL); data->mount_state = FM_NOTMOUNTED; OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif if (!(data->dataflags & FSESS_OPENED)) { /* fdev->data was left for us to clean up */ fuse_device_close_final(fdev); /* fdev->data is gone now */ } fuse_device_unlock(fdev); return 0; }
static errno_t fuse_vfsop_getattr(mount_t mp, struct vfs_attr *attr, vfs_context_t context) { int err = 0; bool deading = false, faking = false; struct fuse_dispatcher fdi; struct fuse_statfs_out fsfo; struct fuse_data *data; fuse_trace_printf_vfsop(); data = fuse_get_mpdata(mp); if (!data) { panic("OSXFUSE: no private data for mount point?"); } if (!(data->dataflags & FSESS_INITED)) { /* * coreservices process requests ATTR_VOL_CAPABILITIES on the mount * point right before returning from syscall mount. We need to fake * the output because the FUSE server might not be ready to respond yet. */ faking = true; goto dostatfs; } fdisp_init(&fdi, 0); fdisp_make(&fdi, FUSE_STATFS, mp, FUSE_ROOT_ID, context); err = fdisp_wait_answ(&fdi); if (err) { /* * If we cannot communicate with the daemon (most likely because * it's dead), we still want to portray that we are a bonafide * file system so that we can be gracefully unmounted. */ if (err == ENOTCONN) { deading = faking = true; goto dostatfs; } return err; } dostatfs: if (faking) { bzero(&fsfo, sizeof(fsfo)); } else { fuse_abi_out(fuse_statfs_out, DTOABI(data), fdi.answ, &fsfo); } if (fsfo.st.bsize == 0) { fsfo.st.bsize = FUSE_DEFAULT_IOSIZE; } if (fsfo.st.frsize == 0) { fsfo.st.frsize = FUSE_DEFAULT_BLOCKSIZE; } /* optimal transfer block size; will go into f_iosize in the kernel */ fsfo.st.bsize = fuse_round_size(fsfo.st.bsize, FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE); /* file system fragment size; will go into f_bsize in the kernel */ fsfo.st.frsize = fuse_round_size(fsfo.st.frsize, FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE); /* We must have: f_iosize >= f_bsize (fsfo.st.bsize >= fsfo.st_frsize) */ if (fsfo.st.bsize < fsfo.st.frsize) { fsfo.st.bsize = fsfo.st.frsize; } /* * TBD: Possibility: * * For actual I/O to OSXFUSE's "virtual" storage device, we use * data->blocksize and data->iosize. These are really meant to be * constant across the lifetime of a single mount. If necessary, we * can experiment by updating the mount point's stat with the frsize * and bsize values we come across here. */ /* * FUSE server will (might) give us this: * * __u64 blocks; // total data blocks in the file system * __u64 bfree; // free blocks in the file system * __u64 bavail; // free blocks available to non-superuser * __u64 files; // total file nodes in the file system * __u64 ffree; // free file nodes in the file system * __u32 bsize; // preferred/optimal file system block size * __u32 namelen; // maximum length of filenames * __u32 frsize; // fundamental file system block size * * On Mac OS X, we will map this data to struct vfs_attr as follows: * * Mac OS X FUSE * -------- ---- * uint64_t f_supported <- // handled here * uint64_t f_active <- // handled here * uint64_t f_objcount <- - * uint64_t f_filecount <- files * uint64_t f_dircount <- - * uint32_t f_bsize <- frsize * size_t f_iosize <- bsize * uint64_t f_blocks <- blocks * uint64_t f_bfree <- bfree * uint64_t f_bavail <- bavail * uint64_t f_bused <- blocks - bfree * uint64_t f_files <- files * uint64_t f_ffree <- ffree * fsid_t f_fsid <- // handled elsewhere * uid_t f_owner <- // handled elsewhere * ... capabilities <- // handled here * ... attributes <- // handled here * f_create_time <- - * f_modify_time <- - * f_access_time <- - * f_backup_time <- - * uint32_t f_fssubtype <- // daemon provides * char *f_vol_name <- // handled here * uint16_t f_signature <- // handled here * uint16_t f_carbon_fsid <- // handled here */ VFSATTR_RETURN(attr, f_filecount, fsfo.st.files); VFSATTR_RETURN(attr, f_bsize, fsfo.st.frsize); VFSATTR_RETURN(attr, f_iosize, fsfo.st.bsize); VFSATTR_RETURN(attr, f_blocks, fsfo.st.blocks); VFSATTR_RETURN(attr, f_bfree, fsfo.st.bfree); VFSATTR_RETURN(attr, f_bavail, fsfo.st.bavail); VFSATTR_RETURN(attr, f_bused, (fsfo.st.blocks - fsfo.st.bfree)); VFSATTR_RETURN(attr, f_files, fsfo.st.files); VFSATTR_RETURN(attr, f_ffree, fsfo.st.ffree); /* f_fsid and f_owner handled elsewhere. */ /* Handle capabilities and attributes. */ if (VFSATTR_IS_ACTIVE(attr, f_capabilities) || VFSATTR_IS_ACTIVE(attr, f_attributes)) { handle_capabilities_and_attributes(mp, attr); } VFSATTR_RETURN(attr, f_create_time, kZeroTime); VFSATTR_RETURN(attr, f_modify_time, kZeroTime); VFSATTR_RETURN(attr, f_access_time, kZeroTime); VFSATTR_RETURN(attr, f_backup_time, kZeroTime); if (deading) { VFSATTR_RETURN(attr, f_fssubtype, (uint32_t)FUSE_FSSUBTYPE_INVALID); } else { VFSATTR_RETURN(attr, f_fssubtype, data->fssubtype); } /* Daemon needs to pass this. */ if (VFSATTR_IS_ACTIVE(attr, f_vol_name)) { if (data->volname[0] != 0) { strncpy(attr->f_vol_name, data->volname, MAXPATHLEN); attr->f_vol_name[MAXPATHLEN - 1] = 0; VFSATTR_SET_SUPPORTED(attr, f_vol_name); } } VFSATTR_RETURN(attr, f_signature, OSSwapBigToHostInt16(FUSEFS_SIGNATURE)); VFSATTR_RETURN(attr, f_carbon_fsid, 0); if (!faking) { fuse_ticket_release(fdi.tick); } return 0; }