static int vnop_write_9p(struct vnop_write_args *ap) { vnode_t vp; node_9p *np; uio_t uio; user_ssize_t resid; off_t eof, zh, zt, off; int e, flag; TRACE(); vp = ap->a_vp; uio = ap->a_uio; np = NTO9P(vp); if (vnode_isdir(vp)) return EISDIR; off = uio_offset(uio); if (off < 0) return EINVAL; resid = uio_resid(uio); if (resid == 0) return 0; flag = ap->a_ioflag; if (ISSET(flag, IO_APPEND)) { off = np->dir.length; uio_setoffset(uio, off); } nlock_9p(np, NODE_LCK_EXCLUSIVE); if (vnode_isnocache(vp) || ISSET(flag, IO_NOCACHE)) { ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_PUSHDIRTY|UBC_SYNC); ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE); e = nwrite_9p(np, uio); } else { zh = zt = 0; eof = MAX(np->dir.length, resid+off); if (eof > np->dir.length) { if (off > np->dir.length) { zh = np->dir.length; SET(flag, IO_HEADZEROFILL); } zt = (eof + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64; if (zt > eof) { zt = eof; SET(flag, IO_TAILZEROFILL); } } e = cluster_write(vp, uio, np->dir.length, eof, zh, zt, flag); if (e==0 && eof>np->dir.length) { np->dirtimer = 0; np->dir.length = eof; ubc_setsize(vp, eof); } } nunlock_9p(np); return e; }
static int vnop_read_9p(struct vnop_read_args *ap) { node_9p *np; vnode_t vp; uio_t uio; int e; TRACE(); vp = ap->a_vp; uio = ap->a_uio; np = NTO9P(vp); if (vnode_isdir(vp)) return EISDIR; if (uio_offset(uio) < 0) return EINVAL; if (uio_resid(uio) == 0) return 0; nlock_9p(np, NODE_LCK_SHARED); if (vnode_isnocache(vp) || ISSET(ap->a_ioflag, IO_NOCACHE)) { if (ISSET(np->flags, NODE_MMAPPED)) ubc_msync(vp, 0, ubc_getsize(vp), NULL, UBC_PUSHDIRTY|UBC_SYNC); else cluster_push(vp, IO_SYNC); ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE); e = nread_9p(np, uio); } else e = cluster_read(vp, uio, np->dir.length, ap->a_ioflag); nunlock_9p(np); return e; }
static int vnop_fsync_9p(struct vnop_fsync_args *ap) { node_9p *np; dir_9p d; int e; TRACE(); if (!vnode_isreg(ap->a_vp)) return 0; np = NTO9P(ap->a_vp); nlock_9p(np, NODE_LCK_EXCLUSIVE); if (ubc_getsize(ap->a_vp)>0 && !vnode_isnocache(ap->a_vp)) { if (ISSET(np->flags, NODE_MMAPPED)) ubc_msync(np->vp, 0, ubc_getsize(np->vp), NULL, UBC_PUSHDIRTY|UBC_SYNC); else cluster_push(np->vp, IO_SYNC); } e = 0; /* only sync write fids */ if (np->openfid[OWRITE].fid!=NOFID || np->openfid[ORDWR].fid!=NOFID) { nulldir(&d); e = wstat_9p(np->nmp, np->fid, &d); } nunlock_9p(np); return e; }
static int vnop_close_9p(struct vnop_close_args *ap) { openfid_9p *op; node_9p *np; int e; TRACE(); e = 0; np = NTO9P(ap->a_vp); nlock_9p(np, NODE_LCK_EXCLUSIVE); op = ofidget(np, ap->a_fflag); if (op->fid == NOFID) { e = EBADF; goto error; } if (OSDecrementAtomic(&op->ref) == 1) { if (ISSET(np->flags, NODE_MMAPPED)) ubc_msync(np->vp, 0, ubc_getsize(np->vp), NULL, UBC_PUSHDIRTY|UBC_SYNC); else cluster_push(np->vp, IO_CLOSE); /* root gets clunk in vfs_unmount_9p() */ if (!ISSET(np->nmp->flags, F_UNMOUNTING)) e = clunk_9p(np->nmp, op->fid); op->fid = NOFID; } error: nunlock_9p(np); return e; }
int fuse_notify_inval_inode(struct fuse_data *data, struct fuse_iov *iov) { int err = 0; struct fuse_notify_inval_inode_out fniio; HNodeRef hp; vnode_t vp; fuse_abi_out(fuse_notify_inval_inode_out, DTOABI(data), iov->base, &fniio); err = (int)HNodeLookupRealQuickIfExists(data->fdev, (ino_t)fniio.ino, 0 /* fork index */, &hp, &vp); if (err) { return err; } assert(vp != NULL); fuse_nodelock_lock(VTOFUD(vp), FUSEFS_EXCLUSIVE_LOCK); fuse_invalidate_attr(vp); if (fniio.off >= 0) { off_t end_off; if (fniio.len > 0) { end_off = (off_t) min(fniio.off + fniio.len, ubc_getsize(vp)); } else { end_off = ubc_getsize(vp); } ubc_msync(vp, (off_t)fniio.off, end_off, NULL, UBC_PUSHDIRTY | UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); } FUSE_KNOTE(vp, NOTE_ATTRIB); fuse_nodelock_unlock(VTOFUD(vp)); vnode_put(vp); return err; }
/* ioctl */ __private_extern__ int fuse_internal_ioctl_avfi(vnode_t vp, __unused vfs_context_t context, struct fuse_avfi_ioctl *avfi) { int ret = 0; uint32_t hint = 0; if (!avfi) { return EINVAL; } if (avfi->cmd & FUSE_AVFI_MARKGONE) { /* * TBD */ return EINVAL; } /* The result of this /does/ alter our return value. */ if (avfi->cmd & FUSE_AVFI_UBC) { int ubc_flags = avfi->ubc_flags & (UBC_PUSHDIRTY | UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); if (ubc_msync(vp, (off_t)0, ubc_getsize(vp), (off_t*)0, ubc_flags) == 0) { /* failed */ ret = EINVAL; /* don't really have a good error to return */ } } if (avfi->cmd & FUSE_AVFI_UBC_SETSIZE) { if (VTOFUD(vp)->filesize != avfi->size) { hint |= NOTE_WRITE; if (avfi->size > VTOFUD(vp)->filesize) { hint |= NOTE_EXTEND; } VTOFUD(vp)->filesize = avfi->size; ubc_setsize(vp, avfi->size); } (void)fuse_invalidate_attr(vp); } /* The result of this doesn't alter our return value. */ if (avfi->cmd & FUSE_AVFI_PURGEATTRCACHE) { hint |= NOTE_ATTRIB; (void)fuse_invalidate_attr(vp); } /* The result of this doesn't alter our return value. */ if (avfi->cmd & FUSE_AVFI_PURGEVNCACHE) { (void)fuse_vncache_purge(vp); } if (avfi->cmd & FUSE_AVFI_KNOTE) { hint |= avfi->note; } if (hint) { FUSE_KNOTE(vp, hint); } return ret; }
__private_extern__ int fuse_internal_exchange(vnode_t fvp, const char *fname, size_t flen, vnode_t tvp, const char *tname, size_t tlen, int options, vfs_context_t context) { struct fuse_dispatcher fdi; struct fuse_exchange_in *fei; struct fuse_vnode_data *ffud = VTOFUD(fvp); struct fuse_vnode_data *tfud = VTOFUD(tvp); vnode_t fdvp = ffud->parentvp; vnode_t tdvp = tfud->parentvp; int err = 0; fdisp_init(&fdi, sizeof(*fei) + flen + tlen + 2); fdisp_make_vp(&fdi, FUSE_EXCHANGE, fvp, context); fei = fdi.indata; fei->olddir = VTOI(fdvp); fei->newdir = VTOI(tdvp); fei->options = (uint64_t)options; memcpy((char *)fdi.indata + sizeof(*fei), fname, flen); ((char *)fdi.indata)[sizeof(*fei) + flen] = '\0'; memcpy((char *)fdi.indata + sizeof(*fei) + flen + 1, tname, tlen); ((char *)fdi.indata)[sizeof(*fei) + flen + tlen + 1] = '\0'; ubc_msync(fvp, (off_t)0, (off_t)ffud->filesize, (off_t*)0, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); ubc_msync(tvp, (off_t)0, (off_t)tfud->filesize, (off_t*)0, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); if (!(err = fdisp_wait_answ(&fdi))) { fuse_ticket_drop(fdi.tick); } if (err == 0) { if (fdvp) { fuse_invalidate_attr(fdvp); } if (tdvp != fdvp) { if (tdvp) { fuse_invalidate_attr(tdvp); } } fuse_invalidate_attr(fvp); fuse_invalidate_attr(tvp); cache_purge(fvp); cache_purge(tvp); /* Swap sizes */ off_t tmpfilesize = ffud->filesize; ffud->filesize = tfud->filesize; tfud->filesize = tmpfilesize; ubc_setsize(fvp, (off_t)ffud->filesize); ubc_setsize(tvp, (off_t)tfud->filesize); fuse_kludge_exchange(fvp, tvp); /* * Another approach (will need additional kernel support to work): * vnode_t tmpvp = ffud->vp; ffud->vp = tfud->vp; tfud->vp = tmpvp; vnode_t tmpparentvp = ffud->parentvp; ffud->parentvp = tfud->parentvp; tfud->parentvp = tmpparentvp; off_t tmpfilesize = ffud->filesize; ffud->filesize = tfud->filesize; tfud->filesize = tmpfilesize; struct fuse_vnode_data tmpfud; memcpy(&tmpfud, ffud, sizeof(struct fuse_vnode_data)); memcpy(ffud, tfud, sizeof(struct fuse_vnode_data)); memcpy(tfud, &tmpfud, sizeof(struct fuse_vnode_data)); HNodeExchangeFromFSNode(ffud, tfud); * */ } return err; }