/* struct vnop_fsync_args { struct vnodeop_desc *a_desc; struct vnode * a_vp; struct ucred * a_cred; int a_waitfor; struct thread * a_td; }; */ static int fuse_vnop_fsync(struct vop_fsync_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = ap->a_td; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); int type, err = 0; fuse_trace_printf_vnop(); if (fuse_isdeadfs(vp)) { return 0; } if ((err = vop_stdfsync(ap))) return err; if (!fsess_isimpl(vnode_mount(vp), (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) { goto out; } for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { fuse_internal_fsync(vp, td, NULL, fufh); } } out: return 0; }
void fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td) { /* * Funcation is called for every vnode open. * Merge fuse_open_flags it may be 0 * * XXXIP: Handle FOPEN_KEEP_CACHE */ /* * Ideally speaking, direct io should be enabled on * fd's but do not see of any way of providing that * this implementation. * Also cannot think of a reason why would two * different fd's on same vnode would like * have DIRECT_IO turned on and off. But linux * based implementation works on an fd not an * inode and provides such a feature. * * XXXIP: Handle fd based DIRECT_IO */ if (fuse_open_flags & FOPEN_DIRECT_IO) { VTOFUD(vp)->flag |= FN_DIRECTIO; } else { VTOFUD(vp)->flag &= ~FN_DIRECTIO; } if (vnode_vtype(vp) == VREG) { /* XXXIP prevent getattr, by using cached node size */ vnode_create_vobject(vp, 0, td); } }
int fuse_vnode_savesize(struct vnode *vp, struct ucred *cred) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct thread *td = curthread; struct fuse_filehandle *fufh = NULL; struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; int err = 0; DEBUG("inode=%jd size=%jd\n", VTOI(vp), fvdat->filesize); ASSERT_VOP_ELOCKED(vp, "fuse_io_extend"); if (fuse_isdeadfs(vp)) { return EBADF; } if (vnode_vtype(vp) == VDIR) { return EISDIR; } if (vfs_isrdonly(vnode_mount(vp))) { return EROFS; } if (cred == NULL) { cred = td->td_ucred; } fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); fsai = fdi.indata; fsai->valid = 0; // Truncate to a new value. fsai->size = fvdat->filesize; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == 0) fvdat->flag &= ~FN_SIZECHANGE; fuse_invalidate_attr(vp); return err; }
void fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td) { /* * Funcation is called for every vnode open. * Merge fuse_open_flags it may be 0 * * XXXIP: Handle FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE */ if (vnode_vtype(vp) == VREG) { /* XXXIP prevent getattr, by using cached node size */ vnode_create_vobject(vp, 0, td); } }
/* * getf(int fd) - hold a lock on a file descriptor, to be released by calling * releasef(). On OSX we will also look up the vnode of the fd for calls * to spl_vn_rdwr(). */ void *getf(int fd) { struct fileproc *fp = NULL; struct spl_fileproc *sfp = NULL; struct vnode *vp; uint32_t vid; /* * We keep the "fp" pointer as well, both for unlocking in releasef() and * used in vn_rdwr(). */ sfp = kmem_alloc(sizeof(*sfp), KM_SLEEP); if (!sfp) return NULL; if (fp_lookup(current_proc(), fd, &fp, 0/*!locked*/)) { kmem_free(sfp, sizeof(*sfp)); return (NULL); } /* * The f_vnode ptr is used to point back to the "sfp" node itself, as it is * the only information passed to vn_rdwr. */ sfp->f_vnode = sfp; sfp->f_fd = fd; sfp->f_offset = 0; sfp->f_proc = current_proc(); sfp->f_fp = fp; /* Also grab vnode, so we can fish out the minor, for onexit */ if (!file_vnode_withvid(fd, &vp, &vid)) { if (vnode_vtype(vp) != VDIR) { sfp->f_file = minor(vnode_specrdev(vp)); } file_drop(fd); } mutex_enter(&spl_getf_lock); list_insert_tail(&spl_getf_list, sfp); mutex_exit(&spl_getf_lock); //printf("SPL: new getf(%d) ret %p fp is %p so vnode set to %p\n", // fd, sfp, fp, sfp->f_vnode); return sfp; }
/* struct vnop_link_args { struct vnode *a_tdvp; struct vnode *a_vp; struct componentname *a_cnp; }; */ static int fuse_vnop_link(struct vop_link_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *tdvp = ap->a_tdvp; struct componentname *cnp = ap->a_cnp; struct vattr *vap = VTOVA(vp); struct fuse_dispatcher fdi; struct fuse_entry_out *feo; struct fuse_link_in fli; int err; fuse_trace_printf_vnop(); if (fuse_isdeadfs(vp)) { return ENXIO; } if (vnode_mount(tdvp) != vnode_mount(vp)) { return EXDEV; } if (vap->va_nlink >= FUSE_LINK_MAX) { return EMLINK; } fli.oldnodeid = VTOI(vp); fdisp_init(&fdi, 0); fuse_internal_newentry_makerequest(vnode_mount(tdvp), VTOI(tdvp), cnp, FUSE_LINK, &fli, sizeof(fli), &fdi); if ((err = fdisp_wait_answ(&fdi))) { goto out; } feo = fdi.answ; err = fuse_internal_checkentry(feo, vnode_vtype(vp)); fuse_invalidate_attr(tdvp); fuse_invalidate_attr(vp); out: fdisp_destroy(&fdi); return err; }
/* assumption is that vnode has iocount on it after vnode create */ int null_getnewvnode( struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root) { struct vnode_fsparam vnfs_param; int error = 0; enum vtype type = VDIR; struct null_node * xp = null_nodecreate(lowervp); if (xp == NULL) { return ENOMEM; } if (lowervp) { type = vnode_vtype(lowervp); } vnfs_param.vnfs_mp = mp; vnfs_param.vnfs_vtype = type; vnfs_param.vnfs_str = "nullfs"; vnfs_param.vnfs_dvp = dvp; vnfs_param.vnfs_fsnode = (void *)xp; vnfs_param.vnfs_vops = nullfs_vnodeop_p; vnfs_param.vnfs_markroot = root; vnfs_param.vnfs_marksystem = 0; vnfs_param.vnfs_rdev = 0; vnfs_param.vnfs_filesize = 0; // set this to 0 since we should only be shadowing non-regular files vnfs_param.vnfs_cnp = cnp; vnfs_param.vnfs_flags = VNFS_ADDFSREF; error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vnfs_param, vpp); if (error == 0) { xp->null_vnode = *vpp; xp->null_myvid = vnode_vid(*vpp); vnode_settag(*vpp, VT_NULL); } else { FREE(xp, M_TEMP); } return error; }
static int new_proc_listener ( kauth_cred_t cred, struct vnode *vp, struct vnode *scriptvp, struct label *vnodelabel, struct label *scriptlabel, struct label *execlabel, struct componentname *cnp, u_int *csflags, void *macpolicyattr, size_t macpolicyattrlen ) #endif { #ifdef _USE_KAUTH vnode_t prog = (vnode_t)arg0; const char* file_path = (const char*)arg1; #else int pathLen = sizeof( g_processes[ 0 ].path ); #endif pid_t pid = 0; pid_t ppid = 0; uid_t uid = 0; #ifdef _USE_KAUTH if( KAUTH_FILEOP_EXEC != action || ( NULL != prog && VREG != vnode_vtype( prog ) ) ) { return KAUTH_RESULT_DEFER; } #endif uid = kauth_getuid(); pid = proc_selfpid(); ppid = proc_selfppid(); // We skip a known false positive if( 0 == ppid && 1 == pid ) { #ifdef _USE_KAUTH return KAUTH_RESULT_DEFER; #else return 0; // Always allow #endif } if( NULL != file_path ) { // rpal_debug_info( "!!!!!! process start: %d/%d/%d %s", ppid, pid, uid, file_path ); } rpal_mutex_lock( g_collector_1_mutex ); #ifdef _USE_KAUTH if( NULL != file_path ) { strncpy( g_processes[ g_nextProcess ].path, file_path, sizeof( g_processes[ g_nextProcess ].path ) - 1 ); } #else vn_getpath( vp, g_processes[ g_nextProcess ].path, &pathLen ); #endif g_processes[ g_nextProcess ].pid = pid; g_processes[ g_nextProcess ].ppid = ppid; g_processes[ g_nextProcess ].uid = uid; g_processes[ g_nextProcess ].ts = rpal_time_getLocal(); g_nextProcess++; if( g_nextProcess == _NUM_BUFFERED_PROCESSES ) { g_nextProcess = 0; rpal_debug_warning( "overflow of the execution buffer" ); } // rpal_debug_info( "now %d processes in buffer", g_nextProcess ); rpal_mutex_unlock( g_collector_1_mutex ); #ifdef _USE_KAUTH return KAUTH_RESULT_DEFER; #else return 0; // Always allow #endif }
errno_t FSNodeGetOrCreateFileVNodeByID(vnode_t *vnPtr, uint32_t flags, struct fuse_abi_data *feo, mount_t mp, vnode_t dvp, vfs_context_t context, uint32_t *oflags) { int err; vnode_t vn = NULLVP; HNodeRef hn = NULL; struct fuse_vnode_data *fvdat = NULL; struct fuse_data *mntdata = NULL; fuse_device_t dummy_device; struct fuse_abi_data fa; enum vtype vtyp; fuse_abi_data_init(&fa, feo->fad_version, fuse_entry_out_get_attr(feo)); vtyp = IFTOVT(fuse_attr_get_mode(&fa)); if ((vtyp >= VBAD) || (vtyp == VNON)) { return EINVAL; } int markroot = (flags & FN_IS_ROOT) ? 1 : 0; uint64_t size = (flags & FN_IS_ROOT) ? 0 : fuse_attr_get_size(&fa); uint32_t rdev = (flags & FN_IS_ROOT) ? 0 : fuse_attr_get_rdev(&fa); uint64_t generation = fuse_entry_out_get_generation(feo); mntdata = fuse_get_mpdata(mp); dummy_device = mntdata->fdev; err = HNodeLookupCreatingIfNecessary(dummy_device, fuse_entry_out_get_nodeid(feo), 0 /* fork index */, &hn, &vn); if ((err == 0) && (vn == NULL)) { struct vnode_fsparam params; fvdat = (struct fuse_vnode_data *)FSNodeGenericFromHNode(hn); if (!fvdat->fInitialised) { fvdat->fInitialised = true; /* self */ fvdat->vp = NULLVP; /* hold on */ fvdat->nodeid = fuse_entry_out_get_nodeid(feo); fvdat->generation = generation; /* parent */ fvdat->parentvp = dvp; if (dvp) { fvdat->parent_nodeid = VTOI(dvp); } else { fvdat->parent_nodeid = 0; } /* I/O */ { int k; for (k = 0; k < FUFH_MAXTYPE; k++) { FUFH_USE_RESET(&(fvdat->fufh[k])); } } /* flags */ fvdat->flag = flags; fvdat->c_flag = 0; /* meta */ /* XXX: truncation */ fvdat->entry_valid.tv_sec = (time_t)fuse_entry_out_get_entry_valid(feo); fvdat->entry_valid.tv_nsec = fuse_entry_out_get_entry_valid_nsec(feo); /* XXX: truncation */ fvdat->attr_valid.tv_sec = 0; fvdat->attr_valid.tv_nsec = 0; /* XXX: truncation */ fvdat->modify_time.tv_sec = (time_t)fuse_attr_get_mtime(&fa); fvdat->modify_time.tv_nsec = fuse_attr_get_mtimensec(&fa); fvdat->filesize = size; fvdat->nlookup = 0; fvdat->vtype = vtyp; /* locking */ fvdat->createlock = lck_mtx_alloc_init(fuse_lock_group, fuse_lock_attr); fvdat->creator = current_thread(); #if M_OSXFUSE_ENABLE_TSLOCKING fvdat->nodelock = lck_rw_alloc_init(fuse_lock_group, fuse_lock_attr); fvdat->nodelockowner = NULL; fvdat->truncatelock = lck_rw_alloc_init(fuse_lock_group, fuse_lock_attr); #endif } if (err == 0) { params.vnfs_mp = mp; params.vnfs_vtype = vtyp; params.vnfs_str = NULL; params.vnfs_dvp = dvp; /* NULLVP for the root vnode */ params.vnfs_fsnode = hn; #if M_OSXFUSE_ENABLE_SPECFS if ((vtyp == VBLK) || (vtyp == VCHR)) { params.vnfs_vops = fuse_spec_operations; params.vnfs_rdev = (dev_t)rdev; #else if (0) { #endif #if M_OSXFUSE_ENABLE_FIFOFS } else if (vtyp == VFIFO) { params.vnfs_vops = fuse_fifo_operations; params.vnfs_rdev = 0; (void)rdev; #else } else if (0) { #endif } else { params.vnfs_vops = fuse_vnode_operations; params.vnfs_rdev = 0; (void)rdev; } params.vnfs_marksystem = 0; params.vnfs_cnp = NULL; params.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE; params.vnfs_filesize = size; params.vnfs_markroot = markroot; #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif err = vnode_create(VNCREATE_FLAVOR, (uint32_t)sizeof(params), ¶ms, &vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif } if (err == 0) { if (markroot) { fvdat->parentvp = vn; } else { fvdat->parentvp = dvp; } if (oflags) { *oflags |= MAKEENTRY; } /* Need VT_OSXFUSE from xnu */ vnode_settag(vn, VT_OTHER); cache_attrs(vn, fuse_entry_out, feo); HNodeAttachVNodeSucceeded(hn, 0 /* forkIndex */, vn); FUSE_OSAddAtomic(1, (SInt32 *)&fuse_vnodes_current); } else { if (HNodeAttachVNodeFailed(hn, 0 /* forkIndex */)) { FSNodeScrub(fvdat); HNodeScrubDone(hn); } } } if (err == 0) { if (vnode_vtype(vn) != vtyp) { IOLog("osxfuse: vnode changed type behind us (old=%d, new=%d)\n", vnode_vtype(vn), vtyp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif fuse_internal_vnode_disappear(vn, context, REVOKE_SOFT); vnode_put(vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif err = EIO; } else if (VTOFUD(vn)->generation != generation) { IOLog("osxfuse: vnode changed generation\n"); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif fuse_internal_vnode_disappear(vn, context, REVOKE_SOFT); vnode_put(vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif err = ESTALE; } } if (err == 0) { *vnPtr = vn; } /* assert((err == 0) == (*vnPtr != NULL); */ return err; } int fuse_vget_i(vnode_t *vpp, uint32_t flags, struct fuse_abi_data *feo, struct componentname *cnp, vnode_t dvp, mount_t mp, vfs_context_t context) { int err = 0; if (!feo) { return EINVAL; } err = FSNodeGetOrCreateFileVNodeByID(vpp, flags, feo, mp, dvp, context, NULL); if (err) { return err; } if (!fuse_isnovncache_mp(mp) && (cnp->cn_flags & MAKEENTRY)) { fuse_vncache_enter(dvp, *vpp, cnp); } /* found: */ VTOFUD(*vpp)->nlookup++; return 0; }
__private_extern__ int fuse_internal_access(vnode_t vp, int action, vfs_context_t context, struct fuse_access_param *facp) { int err = 0; int default_error = 0; uint32_t mask = 0; int dataflags; mount_t mp; struct fuse_dispatcher fdi; struct fuse_access_in *fai; struct fuse_data *data; fuse_trace_printf_func(); mp = vnode_mount(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; /* Allow for now; let checks be handled inline later. */ if (fuse_isdeferpermissions_mp(mp)) { return 0; } if (facp->facc_flags & FACCESS_FROM_VNOP) { default_error = ENOTSUP; } /* * (action & KAUTH_VNODE_GENERIC_WRITE_BITS) on a read-only file system * would have been handled by higher layers. */ if (!fuse_implemented(data, FSESS_NOIMPLBIT(ACCESS))) { return default_error; } /* Unless explicitly permitted, deny everyone except the fs owner. */ if (!vnode_isvroot(vp) && !(facp->facc_flags & FACCESS_NOCHECKSPY)) { if (!(dataflags & FSESS_ALLOW_OTHER)) { int denied = fuse_match_cred(data->daemoncred, vfs_context_ucred(context)); if (denied) { return EPERM; } } facp->facc_flags |= FACCESS_NOCHECKSPY; } if (!(facp->facc_flags & FACCESS_DO_ACCESS)) { return default_error; } if (vnode_isdir(vp)) { if (action & (KAUTH_VNODE_LIST_DIRECTORY | KAUTH_VNODE_READ_EXTATTRIBUTES)) { mask |= R_OK; } if (action & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_DELETE_CHILD)) { mask |= W_OK; } if (action & KAUTH_VNODE_SEARCH) { mask |= X_OK; } } else { if (action & (KAUTH_VNODE_READ_DATA | KAUTH_VNODE_READ_EXTATTRIBUTES)) { mask |= R_OK; } if (action & (KAUTH_VNODE_WRITE_DATA | KAUTH_VNODE_APPEND_DATA)) { mask |= W_OK; } if (action & KAUTH_VNODE_EXECUTE) { mask |= X_OK; } } if (action & (KAUTH_VNODE_WRITE_ATTRIBUTES | KAUTH_VNODE_WRITE_EXTATTRIBUTES | KAUTH_VNODE_WRITE_SECURITY)) { mask |= W_OK; } bzero(&fdi, sizeof(fdi)); fdisp_init(&fdi, sizeof(*fai)); fdisp_make_vp(&fdi, FUSE_ACCESS, vp, context); fai = fdi.indata; fai->mask = F_OK; fai->mask |= mask; if (!(err = fdisp_wait_answ(&fdi))) { fuse_ticket_drop(fdi.tick); } if (err == ENOSYS) { /* * Make sure we don't come in here again. */ vfs_clearauthopaque(mp); fuse_clear_implemented(data, FSESS_NOIMPLBIT(ACCESS)); err = default_error; } if (err == ENOENT) { const char *vname = NULL; #if M_MACFUSE_ENABLE_UNSUPPORTED vname = vnode_getname(vp); #endif /* M_MACFUSE_ENABLE_UNSUPPORTED */ IOLog("MacFUSE: disappearing vnode %p (name=%s type=%d action=%x)\n", vp, (vname) ? vname : "?", vnode_vtype(vp), action); #if M_MACFUSE_ENABLE_UNSUPPORTED if (vname) { vnode_putname(vname); } #endif /* M_MACFUSE_ENABLE_UNSUPPORTED */ /* * On 10.4, I think I can get Finder to lock because of /.Trashes/<uid> * unless I use REVOKE_NONE here. */ #if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK fuse_biglock_unlock(data->biglock); #endif fuse_internal_vnode_disappear(vp, context, REVOKE_SOFT); #if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK fuse_biglock_lock(data->biglock); #endif } return err; }
/* struct vnop_lookup_args { struct vnodeop_desc *a_desc; struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; }; */ int fuse_vnop_lookup(struct vop_lookup_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct thread *td = cnp->cn_thread; struct ucred *cred = cnp->cn_cred; int nameiop = cnp->cn_nameiop; int flags = cnp->cn_flags; int wantparent = flags & (LOCKPARENT | WANTPARENT); int islastcn = flags & ISLASTCN; struct mount *mp = vnode_mount(dvp); int err = 0; int lookup_err = 0; struct vnode *vp = NULL; struct fuse_dispatcher fdi; enum fuse_opcode op; uint64_t nid; struct fuse_access_param facp; FS_DEBUG2G("parent_inode=%ju - %*s\n", (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr); if (fuse_isdeadfs(dvp)) { *vpp = NULL; return ENXIO; } if (!vnode_isdir(dvp)) { return ENOTDIR; } if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) { return EROFS; } /* * We do access check prior to doing anything else only in the case * when we are at fs root (we'd like to say, "we are at the first * component", but that's not exactly the same... nevermind). * See further comments at further access checks. */ bzero(&facp, sizeof(facp)); if (vnode_isvroot(dvp)) { /* early permission check hack */ if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) { return err; } } if (flags & ISDOTDOT) { nid = VTOFUD(dvp)->parent_nid; if (nid == 0) { return ENOENT; } fdisp_init(&fdi, 0); op = FUSE_GETATTR; goto calldaemon; } else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') { nid = VTOI(dvp); fdisp_init(&fdi, 0); op = FUSE_GETATTR; goto calldaemon; } else if (fuse_lookup_cache_enable) { err = cache_lookup(dvp, vpp, cnp, NULL, NULL); switch (err) { case -1: /* positive match */ atomic_add_acq_long(&fuse_lookup_cache_hits, 1); return 0; case 0: /* no match in cache */ atomic_add_acq_long(&fuse_lookup_cache_misses, 1); break; case ENOENT: /* negative match */ /* fall through */ default: return err; } } nid = VTOI(dvp); fdisp_init(&fdi, cnp->cn_namelen + 1); op = FUSE_LOOKUP; calldaemon: fdisp_make(&fdi, op, mp, nid, td, cred); if (op == FUSE_LOOKUP) { memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; } lookup_err = fdisp_wait_answ(&fdi); if ((op == FUSE_LOOKUP) && !lookup_err) { /* lookup call succeeded */ nid = ((struct fuse_entry_out *)fdi.answ)->nodeid; if (!nid) { /* * zero nodeid is the same as "not found", * but it's also cacheable (which we keep * keep on doing not as of writing this) */ lookup_err = ENOENT; } else if (nid == FUSE_ROOT_ID) { lookup_err = EINVAL; } } if (lookup_err && (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) { fdisp_destroy(&fdi); return lookup_err; } /* lookup_err, if non-zero, must be ENOENT at this point */ if (lookup_err) { if ((nameiop == CREATE || nameiop == RENAME) && islastcn /* && directory dvp has not been removed */ ) { if (vfs_isrdonly(mp)) { err = EROFS; goto out; } #if 0 /* THINK_ABOUT_THIS */ if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) { goto out; } #endif /* * Possibly record the position of a slot in the * directory large enough for the new component name. * This can be recorded in the vnode private data for * dvp. Set the SAVENAME flag to hold onto the * pathname for use later in VOP_CREATE or VOP_RENAME. */ cnp->cn_flags |= SAVENAME; err = EJUSTRETURN; goto out; } /* Consider inserting name into cache. */ /* * No we can't use negative caching, as the fs * changes are out of our control. * False positives' falseness turns out just as things * go by, but false negatives' falseness doesn't. * (and aiding the caching mechanism with extra control * mechanisms comes quite close to beating the whole purpose * caching...) */ #if 0 if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) { FS_DEBUG("inserting NULL into cache\n"); cache_enter(dvp, NULL, cnp); } #endif err = ENOENT; goto out; } else { /* !lookup_err */ struct fuse_entry_out *feo = NULL; struct fuse_attr *fattr = NULL; if (op == FUSE_GETATTR) { fattr = &((struct fuse_attr_out *)fdi.answ)->attr; } else { feo = (struct fuse_entry_out *)fdi.answ; fattr = &(feo->attr); } /* * If deleting, and at end of pathname, return parameters * which can be used to remove file. If the wantparent flag * isn't set, we return only the directory, otherwise we go on * and lock the inode, being careful with ".". */ if (nameiop == DELETE && islastcn) { /* * Check for write access on directory. */ facp.xuid = fattr->uid; facp.facc_flags |= FACCESS_STICKY; err = fuse_internal_access(dvp, VWRITE, &facp, td, cred); facp.facc_flags &= ~FACCESS_XQUERIES; if (err) { goto out; } if (nid == VTOI(dvp)) { vref(dvp); *vpp = dvp; } else { err = fuse_vnode_get(dvp->v_mount, nid, dvp, &vp, cnp, IFTOVT(fattr->mode)); if (err) goto out; *vpp = vp; } /* * Save the name for use in VOP_RMDIR and VOP_REMOVE * later. */ cnp->cn_flags |= SAVENAME; goto out; } /* * If rewriting (RENAME), return the inode and the * information required to rewrite the present directory * Must get inode of directory entry to verify it's a * regular file, or empty directory. */ if (nameiop == RENAME && wantparent && islastcn) { #if 0 /* THINK_ABOUT_THIS */ if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) { goto out; } #endif /* * Check for "." */ if (nid == VTOI(dvp)) { err = EISDIR; goto out; } err = fuse_vnode_get(vnode_mount(dvp), nid, dvp, &vp, cnp, IFTOVT(fattr->mode)); if (err) { goto out; } *vpp = vp; /* * Save the name for use in VOP_RENAME later. */ cnp->cn_flags |= SAVENAME; goto out; } if (flags & ISDOTDOT) { struct mount *mp; int ltype; /* * Expanded copy of vn_vget_ino() so that * fuse_vnode_get() can be used. */ mp = dvp->v_mount; ltype = VOP_ISLOCKED(dvp); err = vfs_busy(mp, MBF_NOWAIT); if (err != 0) { vfs_ref(mp); VOP_UNLOCK(dvp, 0); err = vfs_busy(mp, 0); vn_lock(dvp, ltype | LK_RETRY); vfs_rel(mp); if (err) goto out; if ((dvp->v_iflag & VI_DOOMED) != 0) { err = ENOENT; vfs_unbusy(mp); goto out; } } VOP_UNLOCK(dvp, 0); err = fuse_vnode_get(vnode_mount(dvp), nid, NULL, &vp, cnp, IFTOVT(fattr->mode)); vfs_unbusy(mp); vn_lock(dvp, ltype | LK_RETRY); if ((dvp->v_iflag & VI_DOOMED) != 0) { if (err == 0) vput(vp); err = ENOENT; } if (err) goto out; *vpp = vp; } else if (nid == VTOI(dvp)) { vref(dvp); *vpp = dvp; } else { err = fuse_vnode_get(vnode_mount(dvp), nid, dvp, &vp, cnp, IFTOVT(fattr->mode)); if (err) { goto out; } fuse_vnode_setparent(vp, dvp); *vpp = vp; } if (op == FUSE_GETATTR) { cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ); } else { cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ); } /* Insert name into cache if appropriate. */ /* * Nooo, caching is evil. With caching, we can't avoid stale * information taking over the playground (cached info is not * just positive/negative, it does have qualitative aspects, * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when * walking down along cached path components, and that's not * any cheaper than FUSE_LOOKUP. This might change with * implementing kernel side attr caching, but... In Linux, * lookup results are not cached, and the daemon is bombarded * with FUSE_LOOKUPS on and on. This shows that by design, the * daemon is expected to handle frequent lookup queries * efficiently, do its caching in userspace, and so on. * * So just leave the name cache alone. */ /* * Well, now I know, Linux caches lookups, but with a * timeout... So it's the same thing as attribute caching: * we can deal with it when implement timeouts. */ #if 0 if (cnp->cn_flags & MAKEENTRY) { cache_enter(dvp, *vpp, cnp); } #endif } out: if (!lookup_err) { /* No lookup error; need to clean up. */ if (err) { /* Found inode; exit with no vnode. */ if (op == FUSE_LOOKUP) { fuse_internal_forget_send(vnode_mount(dvp), td, cred, nid, 1); } fdisp_destroy(&fdi); return err; } else { #ifndef NO_EARLY_PERM_CHECK_HACK if (!islastcn) { /* * We have the attributes of the next item * *now*, and it's a fact, and we do not * have to do extra work for it (ie, beg the * daemon), and it neither depends on such * accidental things like attr caching. So * the big idea: check credentials *now*, * not at the beginning of the next call to * lookup. * * The first item of the lookup chain (fs root) * won't be checked then here, of course, as * its never "the next". But go and see that * the root is taken care about at the very * beginning of this function. * * Now, given we want to do the access check * this way, one might ask: so then why not * do the access check just after fetching * the inode and its attributes from the * daemon? Why bother with producing the * corresponding vnode at all if something * is not OK? We know what's the deal as * soon as we get those attrs... There is * one bit of info though not given us by * the daemon: whether his response is * authorative or not... His response should * be ignored if something is mounted over * the dir in question. But that can be * known only by having the vnode... */ int tmpvtype = vnode_vtype(*vpp); bzero(&facp, sizeof(facp)); /*the early perm check hack */ facp.facc_flags |= FACCESS_VA_VALID; if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) { err = ENOTDIR; } if (!err && !vnode_mountedhere(*vpp)) { err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred); } if (err) { if (tmpvtype == VLNK) FS_DEBUG("weird, permission error with a symlink?\n"); vput(*vpp); *vpp = NULL; } } #endif } } fdisp_destroy(&fdi); return err; }
static int vnop_setattr_9p(struct vnop_setattr_args *ap) { struct vnode_attr *vap; vnode_t vp; node_9p *np; dir_9p d; int e; TRACE(); vp = ap->a_vp; vap = ap->a_vap; np = NTO9P(vp); if (vnode_vfsisrdonly(vp)) return EROFS; if (vnode_isvroot(vp)) return EACCES; nulldir(&d); if (VATTR_IS_ACTIVE(vap, va_data_size)) { if (vnode_isdir(vp)) return EISDIR; d.length = vap->va_data_size; } VATTR_SET_SUPPORTED(vap, va_data_size); if (VATTR_IS_ACTIVE(vap, va_access_time)) d.atime = vap->va_access_time.tv_sec; VATTR_SET_SUPPORTED(vap, va_access_time); if (VATTR_IS_ACTIVE(vap, va_modify_time)) d.mtime = vap->va_modify_time.tv_sec; VATTR_SET_SUPPORTED(vap, va_modify_time); if (VATTR_IS_ACTIVE(vap, va_mode)) { d.mode = vap->va_mode & 0777; if (vnode_isdir(vp)) SET(d.mode, DMDIR); if (ISSET(np->nmp->flags, F_DOTU)) { switch (vnode_vtype(vp)) { case VBLK: case VCHR: SET(d.mode, DMDEVICE); break; case VLNK: SET(d.mode, DMSYMLINK); break; case VSOCK: SET(d.mode, DMSOCKET); break; case VFIFO: SET(d.mode, DMNAMEDPIPE); break; default: break; } } } VATTR_SET_SUPPORTED(vap, va_mode); nlock_9p(np, NODE_LCK_EXCLUSIVE); e = wstat_9p(np->nmp, np->fid, &d); np->dirtimer = 0; if (e==0 && d.length!=~0) ubc_setsize(vp, d.length); nunlock_9p(np); return e; }
__private_extern__ int fuse_internal_strategy(vnode_t vp, buf_t bp) { size_t biosize; size_t chunksize; size_t respsize; int mapped = FALSE; int mode; int op; int vtype = vnode_vtype(vp); int err = 0; caddr_t bufdat; off_t left; off_t offset; int32_t bflags = buf_flags(bp); fufh_type_t fufh_type; struct fuse_dispatcher fdi; struct fuse_data *data; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; mount_t mp = vnode_mount(vp); data = fuse_get_mpdata(mp); biosize = data->blocksize; if (!(vtype == VREG || vtype == VDIR)) { return ENOTSUP; } if (bflags & B_READ) { mode = FREAD; fufh_type = FUFH_RDONLY; /* FUFH_RDWR will also do */ } else { mode = FWRITE; fufh_type = FUFH_WRONLY; /* FUFH_RDWR will also do */ } if (fvdat->flag & FN_CREATING) { fuse_lck_mtx_lock(fvdat->createlock); if (fvdat->flag & FN_CREATING) { (void)fuse_msleep(fvdat->creator, fvdat->createlock, PDROP | PINOD | PCATCH, "fuse_internal_strategy", NULL); } else { fuse_lck_mtx_unlock(fvdat->createlock); } } fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { fufh_type = FUFH_RDWR; fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { fufh = NULL; } else { /* We've successfully fallen back to FUFH_RDWR. */ } } if (!fufh) { if (mode == FREAD) { fufh_type = FUFH_RDONLY; } else { fufh_type = FUFH_RDWR; } /* * Lets NOT do the filehandle preflight check here. */ err = fuse_filehandle_get(vp, NULL, fufh_type, 0 /* mode */); if (!err) { fufh = &(fvdat->fufh[fufh_type]); FUFH_AUX_INC(fufh); /* We've created a NEW fufh of type fufh_type. open_count is 1. */ } } else { /* good fufh */ FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_reuse_count); /* We're using an existing fufh of type fufh_type. */ } if (err) { /* A more typical error case. */ if ((err == ENOTCONN) || fuse_isdeadfs(vp)) { buf_seterror(bp, EIO); buf_biodone(bp); return EIO; } IOLog("MacFUSE: strategy failed to get fh " "(vtype=%d, fufh_type=%d, err=%d)\n", vtype, fufh_type, err); if (!vfs_issynchronous(mp)) { IOLog("MacFUSE: asynchronous write failed!\n"); } buf_seterror(bp, EIO); buf_biodone(bp); return EIO; } if (!fufh) { panic("MacFUSE: tried everything but still no fufh"); /* NOTREACHED */ } #define B_INVAL 0x00040000 /* Does not contain valid info. */ #define B_ERROR 0x00080000 /* I/O error occurred. */ if (bflags & B_INVAL) { IOLog("MacFUSE: buffer does not contain valid information\n"); } if (bflags & B_ERROR) { IOLog("MacFUSE: an I/O error has occured\n"); } if (buf_count(bp) == 0) { return 0; } fdisp_init(&fdi, 0); if (mode == FREAD) { struct fuse_read_in *fri; buf_setresid(bp, buf_count(bp)); offset = (off_t)((off_t)buf_blkno(bp) * biosize); if (offset >= fvdat->filesize) { /* Trying to read at/after EOF? */ if (offset != fvdat->filesize) { /* Trying to read after EOF? */ buf_seterror(bp, EINVAL); } buf_biodone(bp); return 0; } /* Note that we just made sure that offset < fvdat->filesize. */ if ((offset + buf_count(bp)) > fvdat->filesize) { /* Trimming read */ buf_setcount(bp, (uint32_t)(fvdat->filesize - offset)); } if (buf_map(bp, &bufdat)) { IOLog("MacFUSE: failed to map buffer in strategy\n"); return EFAULT; } else { mapped = TRUE; } while (buf_resid(bp) > 0) { chunksize = min((size_t)buf_resid(bp), data->iosize); fdi.iosize = sizeof(*fri); op = FUSE_READ; if (vtype == VDIR) { op = FUSE_READDIR; } fdisp_make_vp(&fdi, op, vp, (vfs_context_t)0); fri = fdi.indata; fri->fh = fufh->fh_id; /* * Historical note: * * fri->offset = ((off_t)(buf_blkno(bp))) * biosize; * * This wasn't being incremented!? */ fri->offset = offset; fri->size = (typeof(fri->size))chunksize; fdi.tick->tk_aw_type = FT_A_BUF; fdi.tick->tk_aw_bufdata = bufdat; if ((err = fdisp_wait_answ(&fdi))) { /* There was a problem with reading. */ goto out; } respsize = fdi.tick->tk_aw_bufsize; if (respsize < 0) { /* Cannot really happen... */ err = EIO; goto out; } buf_setresid(bp, (uint32_t)(buf_resid(bp) - respsize)); bufdat += respsize; offset += respsize; /* Did we hit EOF before being done? */ if ((respsize == 0) && (buf_resid(bp) > 0)) { /* * Historical note: * If we don't get enough data, just fill the rest with zeros. * In NFS context, this would mean a hole in the file. */ /* Zero-pad the incomplete buffer. */ bzero(bufdat, buf_resid(bp)); buf_setresid(bp, 0); break; } } /* while (buf_resid(bp) > 0) */ } else { /* write */ struct fuse_write_in *fwi; struct fuse_write_out *fwo; int merr = 0; off_t diff; if (buf_map(bp, &bufdat)) { IOLog("MacFUSE: failed to map buffer in strategy\n"); return EFAULT; } else { mapped = TRUE; } /* Write begin */ buf_setresid(bp, buf_count(bp)); offset = (off_t)((off_t)buf_blkno(bp) * biosize); /* XXX: TBD -- Check here for extension (writing past end) */ left = buf_count(bp); while (left) { fdi.iosize = sizeof(*fwi); op = FUSE_WRITE; fdisp_make_vp(&fdi, op, vp, (vfs_context_t)0); chunksize = min((size_t)left, data->iosize); fwi = fdi.indata; fwi->fh = fufh->fh_id; fwi->offset = offset; fwi->size = (typeof(fwi->size))chunksize; fdi.tick->tk_ms_type = FT_M_BUF; fdi.tick->tk_ms_bufdata = bufdat; fdi.tick->tk_ms_bufsize = chunksize; /* About to write <chunksize> at <offset> */ if ((err = fdisp_wait_answ(&fdi))) { merr = 1; break; } fwo = fdi.answ; diff = chunksize - fwo->size; if (diff < 0) { err = EINVAL; break; } left -= fwo->size; bufdat += fwo->size; offset += fwo->size; buf_setresid(bp, buf_resid(bp) - fwo->size); } if (merr) { goto out; } } if (fdi.tick) { fuse_ticket_drop(fdi.tick); } else { /* No ticket upon leaving */ } out: if (err) { buf_seterror(bp, err); } if (mapped == TRUE) { buf_unmap(bp); } buf_biodone(bp); return err; }
/* struct vnop_getattr_args { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_getattr(struct vop_getattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct thread *td = curthread; struct fuse_vnode_data *fvdat = VTOFUD(vp); int err = 0; int dataflags; struct fuse_dispatcher fdi; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags; /* Note that we are not bailing out on a dead file system just yet. */ if (!(dataflags & FSESS_INITED)) { if (!vnode_isvroot(vp)) { fdata_set_dead(fuse_get_mpdata(vnode_mount(vp))); err = ENOTCONN; debug_printf("fuse_getattr b: returning ENOTCONN\n"); return err; } else { goto fake; } } fdisp_init(&fdi, 0); if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) { if ((err == ENOTCONN) && vnode_isvroot(vp)) { /* see comment at similar place in fuse_statfs() */ fdisp_destroy(&fdi); goto fake; } if (err == ENOENT) { fuse_internal_vnode_disappear(vp); } goto out; } cache_attrs(vp, (struct fuse_attr_out *)fdi.answ); if (vap != VTOVA(vp)) { memcpy(vap, VTOVA(vp), sizeof(*vap)); } if (vap->va_type != vnode_vtype(vp)) { fuse_internal_vnode_disappear(vp); err = ENOENT; goto out; } if ((fvdat->flag & FN_SIZECHANGE) != 0) vap->va_size = fvdat->filesize; if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) { /* * This is for those cases when the file size changed without us * knowing, and we want to catch up. */ off_t new_filesize = ((struct fuse_attr_out *) fdi.answ)->attr.size; if (fvdat->filesize != new_filesize) { fuse_vnode_setsize(vp, cred, new_filesize); } } debug_printf("fuse_getattr e: returning 0\n"); out: fdisp_destroy(&fdi); return err; fake: bzero(vap, sizeof(*vap)); vap->va_type = vnode_vtype(vp); return 0; }
/* kauth file scope listener * this allows to detect files written to the filesystem * arg2 contains a flag KAUTH_FILEOP_CLOSE which is set if a modified file is being closed * this way we don't need to trace every close(), only the ones writing to the filesystem */ static int fileop_scope_listener(kauth_cred_t credential, void * idata, kauth_action_t action, uintptr_t arg0, /* vnode reference */ uintptr_t arg1, /* full path to file being closed */ uintptr_t arg2, /* flags */ uintptr_t arg3) { /* ignore all actions except FILE_CLOSE */ if (action != KAUTH_FILEOP_CLOSE) { return KAUTH_RESULT_DEFER; } /* ignore operations with bad data */ if (credential == NULL || (vnode_t)arg0 == NULL || (char*)arg1 == NULL) { ERROR_MSG("Arguments contain null pointers!"); return KAUTH_RESULT_DEFER; } /* ignore closes on folders, character and block devices */ switch ( vnode_vtype((vnode_t)arg0) ) { case VDIR: case VCHR: case VBLK: return KAUTH_RESULT_DEFER; default: break; } /* we are only interested when a modified file is being closed */ if ((int)arg2 != KAUTH_FILEOP_CLOSE_MODIFIED) { return KAUTH_RESULT_DEFER; } char *file_path = (char*)arg1; /* get information from current proc trying to write to the vnode */ proc_t proc = current_proc(); pid_t mypid = proc_pid(proc); char myprocname[MAXCOMLEN+1] = {0}; proc_name(mypid, myprocname, sizeof(myprocname)); /* retrieve the vnode attributes, we can get a lot of vnode information from here */ struct vnode_attr vap = {0}; vfs_context_t context = vfs_context_create(NULL); /* initialize the structure fields we are interested in * reference vn_stat_noauth() xnu/bsd/vfs/vfs_vnops.c */ VATTR_INIT(&vap); VATTR_WANTED(&vap, va_mode); VATTR_WANTED(&vap, va_type); VATTR_WANTED(&vap, va_uid); VATTR_WANTED(&vap, va_gid); VATTR_WANTED(&vap, va_data_size); VATTR_WANTED(&vap, va_flags); int attr_ok = 1; if ( vnode_getattr((vnode_t)arg0, &vap, context) != 0 ) { /* in case of error permissions and filesize will be bogus */ ERROR_MSG("failed to vnode_getattr"); attr_ok = 0; } /* release the context we created, else kab00m! */ vfs_context_rele(context); int error = 0; /* make sure we : * - were able to read the attributes * - file size is at least uint32_t * - path starts with /Users */ if ( attr_ok == 1 && vap.va_data_size >= sizeof(uint32_t) && strprefix(file_path, "/Users/") ) { uint32_t magic = 0; /* read target vnode */ uio_t uio = NULL; /* read from offset 0 */ uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); if (uio == NULL) { ERROR_MSG("uio_create returned null!"); return KAUTH_RESULT_DEFER; } /* we just want to read 4 bytes to match the header */ if ( (error = uio_addiov(uio, CAST_USER_ADDR_T(&magic), sizeof(uint32_t))) ) { ERROR_MSG("uio_addiov returned error %d!", error); return KAUTH_RESULT_DEFER; } if ( (error = VNOP_READ((vnode_t)arg0, uio, 0, NULL)) ) { ERROR_MSG("VNOP_READ failed %d!", error); return KAUTH_RESULT_DEFER; } else if (uio_resid(uio)) { ERROR_MSG("uio_resid!"); return KAUTH_RESULT_DEFER; } /* verify if it's a Mach-O file */ if (magic == MH_MAGIC || magic == MH_MAGIC_64 || magic == FAT_CIGAM) { char *token = NULL; char *string = NULL; char *tofree = NULL; int library = 0; int preferences = 0; tofree = string = STRDUP(file_path, M_TEMP); while ((token = strsep(&string, "/")) != NULL) { if (strcmp(token, "Library") == 0) { library = 1; } else if (library == 1 && strcmp(token, "Preferences") == 0) { preferences = 1; } } _FREE(tofree, M_TEMP); /* we got a match into /Users/username/Library/Preferences, warn user about it */ if (library == 1 && preferences == 1) { DEBUG_MSG("Found Mach-O written to %s by %s.", file_path, myprocname); char alert_msg[1025] = {0}; snprintf(alert_msg, sizeof(alert_msg), "Process \"%s\" wrote Mach-O binary %s.\n This could be Hacking Team's malware!", myprocname, file_path); alert_msg[sizeof(alert_msg)-1] = '\0'; /* log to syslog */ printf("[WARNING] Process \"%s\" wrote Mach-O binary %s.\n This could be Hacking Team's malware!", myprocname, file_path); /* deprecated but still usable to display the alert */ KUNCUserNotificationDisplayNotice(10, // Timeout 0, // Flags - default is Stop alert level NULL, // iconpath NULL, // soundpath NULL, // localization path "Security Alert", // alert header alert_msg, // alert message "OK"); // button title } } } /* don't deny access, we are just here to observe */ return KAUTH_RESULT_DEFER; }
/* struct vnop_setattr_args { struct vnode *a_vp; struct vattr *a_vap; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_setattr(struct vop_setattr_args *ap) { struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct ucred *cred = ap->a_cred; struct thread *td = curthread; struct fuse_dispatcher fdi; struct fuse_setattr_in *fsai; struct fuse_access_param facp; int err = 0; enum vtype vtyp; int sizechanged = 0; uint64_t newsize = 0; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); if (fuse_isdeadfs(vp)) { return ENXIO; } fdisp_init(&fdi, sizeof(*fsai)); fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred); fsai = fdi.indata; fsai->valid = 0; bzero(&facp, sizeof(facp)); facp.xuid = vap->va_uid; facp.xgid = vap->va_gid; if (vap->va_uid != (uid_t)VNOVAL) { facp.facc_flags |= FACCESS_CHOWN; fsai->uid = vap->va_uid; fsai->valid |= FATTR_UID; } if (vap->va_gid != (gid_t)VNOVAL) { facp.facc_flags |= FACCESS_CHOWN; fsai->gid = vap->va_gid; fsai->valid |= FATTR_GID; } if (vap->va_size != VNOVAL) { struct fuse_filehandle *fufh = NULL; /*Truncate to a new value. */ fsai->size = vap->va_size; sizechanged = 1; newsize = vap->va_size; fsai->valid |= FATTR_SIZE; fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh); if (fufh) { fsai->fh = fufh->fh_id; fsai->valid |= FATTR_FH; } } if (vap->va_atime.tv_sec != VNOVAL) { fsai->atime = vap->va_atime.tv_sec; fsai->atimensec = vap->va_atime.tv_nsec; fsai->valid |= FATTR_ATIME; } if (vap->va_mtime.tv_sec != VNOVAL) { fsai->mtime = vap->va_mtime.tv_sec; fsai->mtimensec = vap->va_mtime.tv_nsec; fsai->valid |= FATTR_MTIME; } if (vap->va_mode != (mode_t)VNOVAL) { fsai->mode = vap->va_mode & ALLPERMS; fsai->valid |= FATTR_MODE; } if (!fsai->valid) { goto out; } vtyp = vnode_vtype(vp); if (fsai->valid & FATTR_SIZE && vtyp == VDIR) { err = EISDIR; goto out; } if (vfs_isrdonly(vnode_mount(vp)) && (fsai->valid & ~FATTR_SIZE || vtyp == VREG)) { err = EROFS; goto out; } if (fsai->valid & ~FATTR_SIZE) { /*err = fuse_internal_access(vp, VADMIN, context, &facp); */ /*XXX */ err = 0; } facp.facc_flags &= ~FACCESS_XQUERIES; if (err && !(fsai->valid & ~(FATTR_ATIME | FATTR_MTIME)) && vap->va_vaflags & VA_UTIMES_NULL) { err = fuse_internal_access(vp, VWRITE, &facp, td, cred); } if (err) goto out; if ((err = fdisp_wait_answ(&fdi))) goto out; vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode); if (vnode_vtype(vp) != vtyp) { if (vnode_vtype(vp) == VNON && vtyp != VNON) { debug_printf("FUSE: Dang! vnode_vtype is VNON and vtype isn't.\n"); } else { /* * STALE vnode, ditch * * The vnode has changed its type "behind our back". There's * nothing really we can do, so let us just force an internal * revocation and tell the caller to try again, if interested. */ fuse_internal_vnode_disappear(vp); err = EAGAIN; } } if (!err && !sizechanged) { cache_attrs(vp, (struct fuse_attr_out *)fdi.answ); } out: fdisp_destroy(&fdi); if (!err && sizechanged) { fuse_vnode_setsize(vp, cred, newsize); VTOFUD(vp)->flag &= ~FN_SIZECHANGE; } return err; }
int zfs_getattr_znode_unlocked(struct vnode *vp, vattr_t *vap) { znode_t *zp = VTOZ(vp); zfsvfs_t *zfsvfs = zp->z_zfsvfs; int error = 0; uint64_t parent; //printf("getattr_osx\n"); ZFS_ENTER(zfsvfs); /* * On Mac OS X we always export the root directory id as 2 */ vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_id; //vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_vid; vap->va_nlink = zp->z_links; vap->va_data_size = zp->z_size; vap->va_total_size = zp->z_size; vap->va_gen = zp->z_gen; /* * For Carbon compatibility,pretend to support this legacy/unused attribute */ if (VATTR_IS_ACTIVE(vap, va_backup_time)) { vap->va_backup_time.tv_sec = 0; vap->va_backup_time.tv_nsec = 0; VATTR_SET_SUPPORTED(vap, va_backup_time); } vap->va_flags = zfs_getbsdflags(zp); /* * On Mac OS X we always export the root directory id as 2 * and its parent as 1 */ error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent)); if (!error) { if (zp->z_id == zfsvfs->z_root) vap->va_parentid = 1; else if (parent == zfsvfs->z_root) vap->va_parentid = 2; else vap->va_parentid = parent; } vap->va_iosize = zp->z_blksz ? zp->z_blksz : zfsvfs->z_max_blksz; //vap->va_iosize = 512; VATTR_SET_SUPPORTED(vap, va_iosize); /* Don't include '.' and '..' in the number of entries */ if (VATTR_IS_ACTIVE(vap, va_nchildren) && vnode_isdir(vp)) { VATTR_RETURN(vap, va_nchildren, vap->va_nlink - 2); } /* * va_dirlinkcount is the count of directory hard links. When a file * system does not support ATTR_DIR_LINKCOUNT, xnu will default to 1. * Since we claim to support ATTR_DIR_LINKCOUNT both as valid and as * native, we'll just return 1. We set 1 for this value in dirattrpack * as well. If in the future ZFS actually supports directory hard links, * we can return a real value. */ if (VATTR_IS_ACTIVE(vap, va_dirlinkcount) && vnode_isdir(vp)) { VATTR_RETURN(vap, va_dirlinkcount, 1); } if (VATTR_IS_ACTIVE(vap, va_acl)) { //printf("want acl\n"); #if 0 zfs_acl_phys_t acl; if (sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), &acl, sizeof (zfs_acl_phys_t))) { //if (zp->z_acl.z_acl_count == 0) { vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE; } else { if ((error = zfs_getacl(zp, &vap->va_acl, B_TRUE, NULL))) { dprintf("zfs_getacl returned error %d\n", error); error = 0; //ZFS_EXIT(zfsvfs); //return (error); } } #endif //VATTR_SET_SUPPORTED(vap, va_acl); VATTR_RETURN(vap, va_uuuid, kauth_null_guid); VATTR_RETURN(vap, va_guuid, kauth_null_guid); dprintf("Calling getacl\n"); if ((error = zfs_getacl(zp, &vap->va_acl, B_FALSE, NULL))) { dprintf("zfs_getacl returned error %d\n", error); error = 0; } else { VATTR_SET_SUPPORTED(vap, va_acl); /* va_acl implies that va_uuuid and va_guuid are also supported. */ VATTR_RETURN(vap, va_uuuid, kauth_null_guid); VATTR_RETURN(vap, va_guuid, kauth_null_guid); } } if (VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_alloc)) { uint32_t blksize; u_longlong_t nblks; sa_object_size(zp->z_sa_hdl, &blksize, &nblks); vap->va_data_alloc = (uint64_t)512LL * (uint64_t)nblks; vap->va_total_alloc = vap->va_data_alloc; vap->va_supported |= VNODE_ATTR_va_data_alloc | VNODE_ATTR_va_total_alloc; } if (VATTR_IS_ACTIVE(vap, va_name)) { vap->va_name[0] = 0; if (!vnode_isvroot(vp)) { /* Lets not supply name as zap_cursor can cause panic */ #if 0 if (zap_value_search(zfsvfs->z_os, parent, zp->z_id, ZFS_DIRENT_OBJ(-1ULL), vap->va_name) == 0) VATTR_SET_SUPPORTED(vap, va_name); #endif } else { /* * The vroot objects must return a unique name for Finder to * be able to distringuish between mounts. For this reason * we simply return the fullname, from the statfs mountedfrom */ strlcpy(vap->va_name, vfs_statfs(vnode_mount(vp))->f_mntfromname, MAXPATHLEN); VATTR_SET_SUPPORTED(vap, va_name); } } if (VATTR_IS_ACTIVE(vap, va_filerev)) { VATTR_RETURN(vap, va_filerev, 0); } if (VATTR_IS_ACTIVE(vap, va_linkid)) { VATTR_RETURN(vap, va_linkid, vap->va_fileid); } if (VATTR_IS_ACTIVE(vap, va_fsid)) { VATTR_RETURN(vap, va_fsid, vfs_statfs(zfsvfs->z_vfs)->f_fsid.val[0]); } if (VATTR_IS_ACTIVE(vap, va_type)) { VATTR_RETURN(vap, va_type, vnode_vtype(ZTOV(zp))); } if (VATTR_IS_ACTIVE(vap, va_encoding)) { VATTR_RETURN(vap, va_encoding, kTextEncodingMacUnicode); } #ifdef VNODE_ATTR_va_addedtime if (VATTR_IS_ACTIVE(vap, va_addedtime)) { VATTR_RETURN(vap, va_addedtime, vap->va_ctime); } #endif if (VATTR_IS_ACTIVE(vap, va_uuuid)) { kauth_cred_uid2guid(zp->z_uid, &vap->va_uuuid); } if (VATTR_IS_ACTIVE(vap, va_guuid)) { kauth_cred_uid2guid(zp->z_gid, &vap->va_guuid); } vap->va_supported |= ZFS_SUPPORTED_VATTRS; ZFS_EXIT(zfsvfs); return (error); }
__private_extern__ errno_t fuse_internal_strategy_buf(struct vnop_strategy_args *ap) { int32_t bflags; upl_t bupl; daddr64_t blkno, lblkno; int bmap_flags; buf_t bp = ap->a_bp; vnode_t vp = buf_vnode(bp); int vtype = vnode_vtype(vp); struct fuse_data *data; if (!vp || vtype == VCHR || vtype == VBLK) { panic("MacFUSE: buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n"); } bflags = buf_flags(bp); if (bflags & B_READ) { bmap_flags = VNODE_READ; } else { bmap_flags = VNODE_WRITE; } bupl = buf_upl(bp); blkno = buf_blkno(bp); lblkno = buf_lblkno(bp); if (!(bflags & B_CLUSTER)) { if (bupl) { return cluster_bp(bp); } if (blkno == lblkno) { off_t f_offset; size_t contig_bytes; data = fuse_get_mpdata(vnode_mount(vp)); // Still think this is a kludge? f_offset = lblkno * data->blocksize; blkno = f_offset / data->blocksize; buf_setblkno(bp, blkno); contig_bytes = buf_count(bp); if (blkno == -1) { buf_clear(bp); } /* * Our "device" is always /all contiguous/. We don't wanna be * doing things like: * * ... * else if ((long)contig_bytes < buf_count(bp)) { * ret = buf_strategy_fragmented(devvp, bp, f_offset, * contig_bytes)); * return ret; * } */ } if (blkno == -1) { buf_biodone(bp); return 0; } } // Issue the I/O return fuse_internal_strategy(vp, bp); }
/* relies on v1 paging */ static int nullfs_pagein(struct vnop_pagein_args * ap) { int error = EIO; struct vnode *vp, *lvp; NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); vp = ap->a_vp; lvp = NULLVPTOLOWERVP(vp); if (vnode_vtype(vp) != VREG) { return ENOTSUP; } /* * Ask VM/UBC/VFS to do our bidding */ if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) { vm_offset_t ioaddr; uio_t auio; kern_return_t kret; off_t bytes_to_commit; off_t lowersize; upl_t upl = ap->a_pl; user_ssize_t bytes_remaining = 0; auio = uio_create(1, ap->a_f_offset, UIO_SYSSPACE, UIO_READ); if (auio == NULL) { error = EIO; goto exit_no_unmap; } kret = ubc_upl_map(upl, &ioaddr); if (KERN_SUCCESS != kret) { panic("nullfs_pagein: ubc_upl_map() failed with (%d)", kret); } ioaddr += ap->a_pl_offset; error = uio_addiov(auio, (user_addr_t)ioaddr, ap->a_size); if (error) { goto exit; } lowersize = ubc_getsize(lvp); if (lowersize != ubc_getsize(vp)) { (void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */ } error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ap->a_context); bytes_remaining = uio_resid(auio); if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) { /* zero bytes that weren't read in to the upl */ bzero((void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), (size_t) bytes_remaining); } exit: kret = ubc_upl_unmap(upl); if (KERN_SUCCESS != kret) { panic("nullfs_pagein: ubc_upl_unmap() failed with (%d)", kret); } if (auio != NULL) { uio_free(auio); } exit_no_unmap: if ((ap->a_flags & UPL_NOCOMMIT) == 0) { if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) { /* only commit what was read in (page aligned)*/ bytes_to_commit = ap->a_size - bytes_remaining; if (bytes_to_commit) { /* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/ if (bytes_to_commit & PAGE_MASK) { bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1); assert(bytes_to_commit <= (off_t)ap->a_size); bytes_remaining = ap->a_size - bytes_to_commit; } ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY); } /* abort anything thats left */ if (bytes_remaining) { ubc_upl_abort_range(upl, ap->a_pl_offset + bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); } } else { ubc_upl_abort_range(upl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); } } vnode_put(lvp); } else if((ap->a_flags & UPL_NOCOMMIT) == 0) { ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); } return error; }
static int nullfs_special_getattr(struct vnop_getattr_args * args) { mount_t mp = vnode_mount(args->a_vp); struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); ino_t ino = NULL_ROOT_INO; struct vnode_attr covered_rootattr; vnode_t checkvp = null_mp->nullm_lowerrootvp; VATTR_INIT(&covered_rootattr); VATTR_WANTED(&covered_rootattr, va_uid); VATTR_WANTED(&covered_rootattr, va_gid); VATTR_WANTED(&covered_rootattr, va_create_time); VATTR_WANTED(&covered_rootattr, va_modify_time); VATTR_WANTED(&covered_rootattr, va_access_time); /* prefer to get this from the lower root vp, but if not (i.e. forced unmount * of lower fs) try the mount point covered vnode */ if (vnode_getwithvid(checkvp, null_mp->nullm_lowerrootvid)) { checkvp = vfs_vnodecovered(mp); if (checkvp == NULL) { return EIO; } } int error = vnode_getattr(checkvp, &covered_rootattr, args->a_context); vnode_put(checkvp); if (error) { /* we should have been able to get attributes fore one of the two choices so * fail if we didn't */ return error; } /* we got the attributes of the vnode we cover so plow ahead */ if (args->a_vp == null_mp->nullm_secondvp) { ino = NULL_SECOND_INO; } VATTR_RETURN(args->a_vap, va_type, vnode_vtype(args->a_vp)); VATTR_RETURN(args->a_vap, va_rdev, 0); VATTR_RETURN(args->a_vap, va_nlink, 3); /* always just ., .., and the child */ VATTR_RETURN(args->a_vap, va_total_size, 0); // hoping this is ok VATTR_RETURN(args->a_vap, va_data_size, 0); // hoping this is ok VATTR_RETURN(args->a_vap, va_data_alloc, 0); VATTR_RETURN(args->a_vap, va_iosize, vfs_statfs(mp)->f_iosize); VATTR_RETURN(args->a_vap, va_fileid, ino); VATTR_RETURN(args->a_vap, va_linkid, ino); VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(mp)->f_fsid.val[0]); // return the fsid of the mount point VATTR_RETURN(args->a_vap, va_filerev, 0); VATTR_RETURN(args->a_vap, va_gen, 0); VATTR_RETURN(args->a_vap, va_flags, UF_HIDDEN); /* mark our fake directories as hidden. People shouldn't be enocouraged to poke around in them */ if (ino == NULL_SECOND_INO) { VATTR_RETURN(args->a_vap, va_parentid, NULL_ROOT_INO); /* no parent at the root, so the only other vnode that goes through this path is second and its parent is 1.*/ } if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) { /* force dr_xr_xr_x */ VATTR_RETURN(args->a_vap, va_mode, S_IFDIR | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); } if (VATTR_IS_ACTIVE(args->a_vap, va_uid)) { VATTR_RETURN(args->a_vap, va_uid, covered_rootattr.va_uid); } if (VATTR_IS_ACTIVE(args->a_vap, va_gid)) { VATTR_RETURN(args->a_vap, va_gid, covered_rootattr.va_gid); } if (VATTR_IS_ACTIVE(args->a_vap, va_create_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_create_time); args->a_vap->va_create_time.tv_sec = covered_rootattr.va_create_time.tv_sec; args->a_vap->va_create_time.tv_nsec = covered_rootattr.va_create_time.tv_nsec; } if (VATTR_IS_ACTIVE(args->a_vap, va_modify_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_modify_time); args->a_vap->va_modify_time.tv_sec = covered_rootattr.va_modify_time.tv_sec; args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_modify_time.tv_nsec; } if (VATTR_IS_ACTIVE(args->a_vap, va_access_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_access_time); args->a_vap->va_modify_time.tv_sec = covered_rootattr.va_access_time.tv_sec; args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_access_time.tv_nsec; } return 0; }
int fuse_internal_access(struct vnode *vp, mode_t mode, struct fuse_access_param *facp, struct thread *td, struct ucred *cred) { int err = 0; uint32_t mask = 0; int dataflags; int vtype; struct mount *mp; struct fuse_dispatcher fdi; struct fuse_access_in *fai; struct fuse_data *data; /* NOT YET DONE */ /* * If this vnop gives you trouble, just return 0 here for a lazy * kludge. */ /* return 0;*/ fuse_trace_printf_func(); mp = vnode_mount(vp); vtype = vnode_vtype(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; if ((mode & VWRITE) && vfs_isrdonly(mp)) { return EACCES; } /* Unless explicitly permitted, deny everyone except the fs owner. */ if (vnode_isvroot(vp) && !(facp->facc_flags & FACCESS_NOCHECKSPY)) { if (!(dataflags & FSESS_DAEMON_CAN_SPY)) { int denied = fuse_match_cred(data->daemoncred, cred); if (denied) { return EPERM; } } facp->facc_flags |= FACCESS_NOCHECKSPY; } if (!(facp->facc_flags & FACCESS_DO_ACCESS)) { return 0; } if (((vtype == VREG) && (mode & VEXEC))) { #ifdef NEED_MOUNT_ARGUMENT_FOR_THIS /* Let the kernel handle this through open / close heuristics.*/ return ENOTSUP; #else /* Let the kernel handle this. */ return 0; #endif } if (!fsess_isimpl(mp, FUSE_ACCESS)) { /* Let the kernel handle this. */ return 0; } if (dataflags & FSESS_DEFAULT_PERMISSIONS) { /* Let the kernel handle this. */ return 0; } if ((mode & VADMIN) != 0) { err = priv_check_cred(cred, PRIV_VFS_ADMIN, 0); if (err) { return err; } } if ((mode & (VWRITE | VAPPEND | VADMIN)) != 0) { mask |= W_OK; } if ((mode & VREAD) != 0) { mask |= R_OK; } if ((mode & VEXEC) != 0) { mask |= X_OK; } bzero(&fdi, sizeof(fdi)); fdisp_init(&fdi, sizeof(*fai)); fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred); fai = fdi.indata; fai->mask = F_OK; fai->mask |= mask; err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); if (err == ENOSYS) { fsess_set_notimpl(mp, FUSE_ACCESS); err = 0; } return err; }