static int vnop_fsync_9p(struct vnop_fsync_args *ap) { node_9p *np; dir_9p d; int e; TRACE(); if (!vnode_isreg(ap->a_vp)) return 0; np = NTO9P(ap->a_vp); nlock_9p(np, NODE_LCK_EXCLUSIVE); if (ubc_getsize(ap->a_vp)>0 && !vnode_isnocache(ap->a_vp)) { if (ISSET(np->flags, NODE_MMAPPED)) ubc_msync(np->vp, 0, ubc_getsize(np->vp), NULL, UBC_PUSHDIRTY|UBC_SYNC); else cluster_push(np->vp, IO_SYNC); } e = 0; /* only sync write fids */ if (np->openfid[OWRITE].fid!=NOFID || np->openfid[ORDWR].fid!=NOFID) { nulldir(&d); e = wstat_9p(np->nmp, np->fid, &d); } nunlock_9p(np); return e; }
static int vnop_close_9p(struct vnop_close_args *ap) { openfid_9p *op; node_9p *np; int e; TRACE(); e = 0; np = NTO9P(ap->a_vp); nlock_9p(np, NODE_LCK_EXCLUSIVE); op = ofidget(np, ap->a_fflag); if (op->fid == NOFID) { e = EBADF; goto error; } if (OSDecrementAtomic(&op->ref) == 1) { if (ISSET(np->flags, NODE_MMAPPED)) ubc_msync(np->vp, 0, ubc_getsize(np->vp), NULL, UBC_PUSHDIRTY|UBC_SYNC); else cluster_push(np->vp, IO_CLOSE); /* root gets clunk in vfs_unmount_9p() */ if (!ISSET(np->nmp->flags, F_UNMOUNTING)) e = clunk_9p(np->nmp, op->fid); op->fid = NOFID; } error: nunlock_9p(np); return e; }
static int vnop_read_9p(struct vnop_read_args *ap) { node_9p *np; vnode_t vp; uio_t uio; int e; TRACE(); vp = ap->a_vp; uio = ap->a_uio; np = NTO9P(vp); if (vnode_isdir(vp)) return EISDIR; if (uio_offset(uio) < 0) return EINVAL; if (uio_resid(uio) == 0) return 0; nlock_9p(np, NODE_LCK_SHARED); if (vnode_isnocache(vp) || ISSET(ap->a_ioflag, IO_NOCACHE)) { if (ISSET(np->flags, NODE_MMAPPED)) ubc_msync(vp, 0, ubc_getsize(vp), NULL, UBC_PUSHDIRTY|UBC_SYNC); else cluster_push(vp, IO_SYNC); ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE); e = nread_9p(np, uio); } else e = cluster_read(vp, uio, np->dir.length, ap->a_ioflag); nunlock_9p(np); return e; }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs_mp(mp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif cluster_push(vp, 0); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync_fh(vp, args->context, fufh, FUSE_OP_FOREGROUNDED); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_dispatcher fdi; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs(vp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); cluster_push(vp, 0); fuse_dispatcher_init(&fdi, 0); for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync(vp, args->context, fufh, &fdi); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }