int fuse_internal_fsync(struct vnode *vp, struct thread *td, struct ucred *cred, struct fuse_filehandle *fufh) { int op = FUSE_FSYNC; struct fuse_fsync_in *ffsi; struct fuse_dispatcher fdi; fuse_trace_printf_func(); if (vnode_isdir(vp)) { op = FUSE_FSYNCDIR; } fdisp_init(&fdi, sizeof(*ffsi)); fdisp_make_vp(&fdi, op, vp, td, cred); ffsi = fdi.indata; ffsi->fh = fufh->fh_id; ffsi->fsync_flags = 1; /* datasync */ fuse_insert_callback(fdi.tick, fuse_internal_fsync_callback); fuse_insert_message(fdi.tick); fdisp_destroy(&fdi); return 0; }
static int vnop_read_9p(struct vnop_read_args *ap) { node_9p *np; vnode_t vp; uio_t uio; int e; TRACE(); vp = ap->a_vp; uio = ap->a_uio; np = NTO9P(vp); if (vnode_isdir(vp)) return EISDIR; if (uio_offset(uio) < 0) return EINVAL; if (uio_resid(uio) == 0) return 0; nlock_9p(np, NODE_LCK_SHARED); if (vnode_isnocache(vp) || ISSET(ap->a_ioflag, IO_NOCACHE)) { if (ISSET(np->flags, NODE_MMAPPED)) ubc_msync(vp, 0, ubc_getsize(vp), NULL, UBC_PUSHDIRTY|UBC_SYNC); else cluster_push(vp, IO_SYNC); ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE); e = nread_9p(np, uio); } else e = cluster_read(vp, uio, np->dir.length, ap->a_ioflag); nunlock_9p(np); return e; }
static int vnop_write_9p(struct vnop_write_args *ap) { vnode_t vp; node_9p *np; uio_t uio; user_ssize_t resid; off_t eof, zh, zt, off; int e, flag; TRACE(); vp = ap->a_vp; uio = ap->a_uio; np = NTO9P(vp); if (vnode_isdir(vp)) return EISDIR; off = uio_offset(uio); if (off < 0) return EINVAL; resid = uio_resid(uio); if (resid == 0) return 0; flag = ap->a_ioflag; if (ISSET(flag, IO_APPEND)) { off = np->dir.length; uio_setoffset(uio, off); } nlock_9p(np, NODE_LCK_EXCLUSIVE); if (vnode_isnocache(vp) || ISSET(flag, IO_NOCACHE)) { ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_PUSHDIRTY|UBC_SYNC); ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE); e = nwrite_9p(np, uio); } else { zh = zt = 0; eof = MAX(np->dir.length, resid+off); if (eof > np->dir.length) { if (off > np->dir.length) { zh = np->dir.length; SET(flag, IO_HEADZEROFILL); } zt = (eof + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64; if (zt > eof) { zt = eof; SET(flag, IO_TAILZEROFILL); } } e = cluster_write(vp, uio, np->dir.length, eof, zh, zt, flag); if (e==0 && eof>np->dir.length) { np->dirtimer = 0; np->dir.length = eof; ubc_setsize(vp, eof); } } nunlock_9p(np); return e; }
/* struct vnop_remove_args { struct vnode *a_dvp; struct vnode *a_vp; struct componentname *a_cnp; }; */ static int fuse_vnop_remove(struct vop_remove_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode *vp = ap->a_vp; struct componentname *cnp = ap->a_cnp; int err; FS_DEBUG2G("inode=%ju name=%*s\n", (uintmax_t)VTOI(vp), (int)cnp->cn_namelen, cnp->cn_nameptr); if (fuse_isdeadfs(vp)) { return ENXIO; } if (vnode_isdir(vp)) { return EPERM; } cache_purge(vp); err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK); if (err == 0) fuse_internal_vnode_disappear(vp); return err; }
int fuse_filehandle_open(struct vnode *vp, fufh_type_t fufh_type, struct fuse_filehandle **fufhp, struct thread *td, struct ucred *cred) { struct fuse_dispatcher fdi; struct fuse_open_in *foi; struct fuse_open_out *foo; int err = 0; int isdir = 0; int oflags = 0; int op = FUSE_OPEN; fuse_trace_printf("fuse_filehandle_open(vp=%p, fufh_type=%d)\n", vp, fufh_type); if (fuse_filehandle_valid(vp, fufh_type)) { panic("FUSE: filehandle_open called despite valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } /* * Note that this means we are effectively FILTERING OUT open() flags. */ oflags = fuse_filehandle_xlate_to_oflags(fufh_type); if (vnode_isdir(vp)) { isdir = 1; op = FUSE_OPENDIR; if (fufh_type != FUFH_RDONLY) { printf("FUSE:non-rdonly fh requested for a directory?\n"); fufh_type = FUFH_RDONLY; } } fdisp_init(&fdi, sizeof(*foi)); fdisp_make_vp(&fdi, op, vp, td, cred); foi = fdi.indata; foi->flags = oflags; if ((err = fdisp_wait_answ(&fdi))) { debug_printf("OUCH ... daemon didn't give fh (err = %d)\n", err); if (err == ENOENT) { fuse_internal_vnode_disappear(vp); } goto out; } foo = fdi.answ; fuse_filehandle_init(vp, fufh_type, fufhp, foo->fh); fuse_vnode_open(vp, foo->open_flags, td); out: fdisp_destroy(&fdi); return err; }
int fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type, fuse_op_waitfor_t waitfor) { struct fuse_data *data; struct fuse_dispatcher fdi; struct fuse_abi_data fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int err = 0; int op = FUSE_RELEASE; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("osxfuse: filehandle_put called on a valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } data = fuse_get_mpdata(vnode_mount(vp)); if (vnode_isdir(vp)) { op = FUSE_RELEASEDIR; } fdisp_init_abi(&fdi, fuse_release_in, data); fdisp_make_vp(&fdi, op, vp, context); fuse_abi_data_init(&fri, DATOI(data), fdi.indata); fuse_release_in_set_fh(&fri, fufh->fh_id); fuse_release_in_set_flags(&fri, fufh->open_flags); if (waitfor == FUSE_OP_FOREGROUNDED) { err = fdisp_wait_answ(&fdi); if (err) { goto out; } } else { fuse_insert_message(fdi.tick); } fuse_ticket_release(fdi.tick); out: FUSE_OSAddAtomic(-1, (SInt32 *)&fuse_fh_current); fuse_invalidate_attr(vp); return err; }
int fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type) { struct fuse_dispatcher fdi; struct fuse_release_in *fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int err = 0; int isdir = 0; int op = FUSE_RELEASE; const bool wait_for_completion = true; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("fuse4x: filehandle_put called on a valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } if (vnode_isdir(vp)) { op = FUSE_RELEASEDIR; isdir = 1; } fdisp_init(&fdi, sizeof(*fri)); fdisp_make_vp(&fdi, op, vp, context); fri = fdi.indata; fri->fh = fufh->fh_id; fri->flags = fufh->open_flags; if (wait_for_completion) { if ((err = fdisp_wait_answ(&fdi))) { goto out; } else { fuse_ticket_drop(fdi.tick); } } else { fuse_insert_callback(fdi.tick, NULL); fuse_insert_message(fdi.tick); } out: OSAddAtomic(-1, (SInt32 *)&fuse_fh_current); fuse_invalidate_attr(vp); return err; }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs_mp(mp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif cluster_push(vp, 0); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync_fh(vp, args->context, fufh, FUSE_OP_FOREGROUNDED); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }
int fuse_filehandle_close(struct vnode *vp, fufh_type_t fufh_type, struct thread *td, struct ucred *cred) { struct fuse_dispatcher fdi; struct fuse_release_in *fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int err = 0; int isdir = 0; int op = FUSE_RELEASE; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { panic("FUSE: filehandle_put called on invalid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } if (vnode_isdir(vp)) { op = FUSE_RELEASEDIR; isdir = 1; } fdisp_init(&fdi, sizeof(*fri)); fdisp_make_vp(&fdi, op, vp, td, cred); fri = fdi.indata; fri->fh = fufh->fh_id; fri->flags = fuse_filehandle_xlate_to_oflags(fufh_type); err = fdisp_wait_answ(&fdi); fdisp_destroy(&fdi); out: atomic_subtract_acq_int(&fuse_fh_count, 1); fufh->fh_id = (uint64_t)-1; fufh->fh_type = FUFH_INVALID; fuse_invalidate_attr(vp); return err; }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_dispatcher fdi; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs(vp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); cluster_push(vp, 0); fuse_dispatcher_init(&fdi, 0); for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync(vp, args->context, fufh, &fdi); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }
__private_extern__ int fuse_internal_fsync(vnode_t vp, vfs_context_t context, struct fuse_filehandle *fufh, void *param, fuse_op_waitfor_t waitfor) { int err = 0; int op = FUSE_FSYNC; struct fuse_fsync_in *ffsi; struct fuse_dispatcher *fdip = param; fuse_trace_printf_func(); fdip->iosize = sizeof(*ffsi); fdip->tick = NULL; if (vnode_isdir(vp)) { op = FUSE_FSYNCDIR; } fdisp_make_vp(fdip, op, vp, context); ffsi = fdip->indata; ffsi->fh = fufh->fh_id; ffsi->fsync_flags = 1; /* datasync */ if (waitfor == FUSE_OP_FOREGROUNDED) { if ((err = fdisp_wait_answ(fdip))) { if (err == ENOSYS) { if (op == FUSE_FSYNC) { fuse_clear_implemented(fdip->tick->tk_data, FSESS_NOIMPLBIT(FSYNC)); } else if (op == FUSE_FSYNCDIR) { fuse_clear_implemented(fdip->tick->tk_data, FSESS_NOIMPLBIT(FSYNCDIR)); } } goto out; } else { fuse_ticket_drop(fdip->tick); } } else { fuse_insert_callback(fdip->tick, fuse_internal_fsync_callback); fuse_insert_message(fdip->tick); } out: return err; }
/* struct vnop_open_args { struct vnode *a_vp; int a_mode; struct ucred *a_cred; struct thread *a_td; int a_fdidx; / struct file *a_fp; }; */ static int fuse_vnop_open(struct vop_open_args *ap) { struct vnode *vp = ap->a_vp; int mode = ap->a_mode; struct thread *td = ap->a_td; struct ucred *cred = ap->a_cred; fufh_type_t fufh_type; struct fuse_vnode_data *fvdat; int error, isdir = 0; int32_t fuse_open_flags; FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode); if (fuse_isdeadfs(vp)) { return ENXIO; } fvdat = VTOFUD(vp); if (vnode_isdir(vp)) { isdir = 1; } fuse_open_flags = 0; if (isdir) { fufh_type = FUFH_RDONLY; } else { fufh_type = fuse_filehandle_xlate_from_fflags(mode); /* * For WRONLY opens, force DIRECT_IO. This is necessary * since writing a partial block through the buffer cache * will result in a read of the block and that read won't * be allowed by the WRONLY open. */ if (fufh_type == FUFH_WRONLY || (fvdat->flag & FN_DIRECTIO) != 0) fuse_open_flags = FOPEN_DIRECT_IO; } if (fuse_filehandle_validrw(vp, fufh_type) != FUFH_INVALID) { fuse_vnode_open(vp, fuse_open_flags, td); return 0; } error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred); return error; }
/** * Check if VFS object exists on a host side. * * @param vnode Guest VFS vnode that corresponds to host VFS object * * @return 1 if exists, 0 otherwise. */ int vboxvfs_exist_internal(vnode_t vnode) { int rc; PSHFLSTRING pSFPath = NULL; SHFLHANDLE handle; uint32_t fFlags; vboxvfs_mount_t *pMount; mount_t mp; /* Return FALSE if invalid parameter given */ AssertReturn(vnode, 0); mp = vnode_mount(vnode); AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); fFlags = (vnode_isdir(vnode)) ? SHFL_CF_DIRECTORY : 0; fFlags |= SHFL_CF_ACCESS_READ | SHFL_CF_ACT_OPEN_IF_EXISTS | SHFL_CF_ACT_FAIL_IF_NEW; rc = vboxvfs_guest_vnode_to_shflstring_path_internal(vnode, &pSFPath); AssertReturn(rc == 0, rc); if (rc == 0) { rc = vboxvfs_open_internal(pMount, pSFPath, fFlags, &handle); if (rc == 0) { rc = vboxvfs_close_internal(pMount, handle); if (rc != 0) { PDEBUG("Unable to close() VBoxVFS object handle while checking if object exist on host: %d", rc); } } } vboxvfs_put_path_internal((void **)&pSFPath); return (rc == 0); }
int fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type) { struct fuse_dispatcher fdi; struct fuse_release_in *fri; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n", vp, fufh_type); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("fuse4x: filehandle_put called on a valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } if (fuse_isdeadfs(vp)) { goto out; } int op = vnode_isdir(vp) ? FUSE_RELEASEDIR : FUSE_RELEASE; fuse_dispatcher_init(&fdi, sizeof(*fri)); fuse_dispatcher_make_vp(&fdi, op, vp, context); fri = fdi.indata; fri->fh = fufh->fh_id; fri->flags = fufh->open_flags; int err = fuse_dispatcher_wait_answer(&fdi); if (!err) { fuse_ticket_drop(fdi.ticket); } out: OSDecrementAtomic((SInt32 *)&fuse_fh_current); fuse_invalidate_attr(vp); return err; }
/* struct vnop_close_args { struct vnode *a_vp; int a_fflag; struct ucred *a_cred; struct thread *a_td; }; */ static int fuse_vnop_close(struct vop_close_args *ap) { struct vnode *vp = ap->a_vp; struct ucred *cred = ap->a_cred; int fflag = ap->a_fflag; fufh_type_t fufh_type; fuse_trace_printf_vnop(); if (fuse_isdeadfs(vp)) { return 0; } if (vnode_isdir(vp)) { if (fuse_filehandle_valid(vp, FUFH_RDONLY)) { fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred); } return 0; } if (fflag & IO_NDELAY) { return 0; } fufh_type = fuse_filehandle_xlate_from_fflags(fflag); if (!fuse_filehandle_valid(vp, fufh_type)) { int i; for (i = 0; i < FUFH_MAXTYPE; i++) if (fuse_filehandle_valid(vp, i)) break; if (i == FUFH_MAXTYPE) panic("FUSE: fufh type %d found to be invalid in close" " (fflag=0x%x)\n", fufh_type, fflag); } if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) { fuse_vnode_savesize(vp, cred); } return 0; }
/* struct vnop_open_args { struct vnode *a_vp; int a_mode; struct ucred *a_cred; struct thread *a_td; int a_fdidx; / struct file *a_fp; }; */ static int fuse_vnop_open(struct vop_open_args *ap) { struct vnode *vp = ap->a_vp; int mode = ap->a_mode; struct thread *td = ap->a_td; struct ucred *cred = ap->a_cred; fufh_type_t fufh_type; struct fuse_vnode_data *fvdat; int error, isdir = 0; FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode); if (fuse_isdeadfs(vp)) { return ENXIO; } fvdat = VTOFUD(vp); if (vnode_isdir(vp)) { isdir = 1; } if (isdir) { fufh_type = FUFH_RDONLY; } else { fufh_type = fuse_filehandle_xlate_from_fflags(mode); } if (fuse_filehandle_valid(vp, fufh_type)) { fuse_vnode_open(vp, 0, td); return 0; } error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred); return error; }
/*ARGSUSED*/ static int zfs_vfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) { zfsvfs_t *zfsvfs = vfs_fsprivate(mp); objset_t *os = zfsvfs->z_os; znode_t *zp, *nextzp; int ret, i; int flags; /*XXX NOEL: delegation admin stuffs, add back if we use delg. admin */ #if 0 ret = 0; /* UNDEFINED: secpolicy_fs_unmount(cr, vfsp); */ if (ret) { ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource), ZFS_DELEG_PERM_MOUNT, cr); if (ret) return (ret); } /* * We purge the parent filesystem's vfsp as the parent filesystem * and all of its snapshots have their vnode's v_vfsp set to the * parent's filesystem's vfsp. Note, 'z_parent' is self * referential for non-snapshots. */ (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0); #endif /* * Unmount any snapshots mounted under .zfs before unmounting the * dataset itself. */ #if 0 if (zfsvfs->z_ctldir != NULL && (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) { return (ret); #endif flags = SKIPSYSTEM; if (mntflags & MNT_FORCE) flags |= FORCECLOSE; ret = vflush(mp, NULLVP, flags); /* * Mac OS X needs a file system modify time * * We use the mtime of the "com.apple.system.mtime" * extended attribute, which is associated with the * file system root directory. * * Here we need to release the ref we took on z_mtime_vp during mount. */ if ((ret == 0) || (mntflags & MNT_FORCE)) { if (zfsvfs->z_mtime_vp != NULL) { struct vnode *mvp; mvp = zfsvfs->z_mtime_vp; zfsvfs->z_mtime_vp = NULL; if (vnode_get(mvp) == 0) { vnode_rele(mvp); vnode_recycle(mvp); vnode_put(mvp); } } } if (!(mntflags & MNT_FORCE)) { /* * Check the number of active vnodes in the file system. * Our count is maintained in the vfs structure, but the * number is off by 1 to indicate a hold on the vfs * structure itself. * * The '.zfs' directory maintains a reference of its * own, and any active references underneath are * reflected in the vnode count. */ if (ret) return (EBUSY); #if 0 if (zfsvfs->z_ctldir == NULL) { if (vfsp->vfs_count > 1) return (EBUSY); } else { if (vfsp->vfs_count > 2 || zfsvfs->z_ctldir->v_count > 1) { return (EBUSY); } } #endif } rw_enter(&zfsvfs->z_unmount_lock, RW_WRITER); rw_enter(&zfsvfs->z_unmount_inactive_lock, RW_WRITER); /* * At this point there are no vops active, and any new vops will * fail with EIO since we have z_unmount_lock for writer (only * relavent for forced unmount). * * Release all holds on dbufs. * Note, the dmu can still callback via znode_pageout_func() * which can zfs_znode_free() the znode. So we lock * z_all_znodes; search the list for a held dbuf; drop the lock * (we know zp can't disappear if we hold a dbuf lock) then * regrab the lock and restart. */ mutex_enter(&zfsvfs->z_znodes_lock); for (zp = list_head(&zfsvfs->z_all_znodes); zp; zp = nextzp) { nextzp = list_next(&zfsvfs->z_all_znodes, zp); if (zp->z_dbuf_held) { /* dbufs should only be held when force unmounting */ zp->z_dbuf_held = 0; mutex_exit(&zfsvfs->z_znodes_lock); dmu_buf_rele(zp->z_dbuf, NULL); /* Start again */ mutex_enter(&zfsvfs->z_znodes_lock); nextzp = list_head(&zfsvfs->z_all_znodes); } } mutex_exit(&zfsvfs->z_znodes_lock); /* * Set the unmounted flag and let new vops unblock. * zfs_inactive will have the unmounted behavior, and all other * vops will fail with EIO. */ zfsvfs->z_unmounted = B_TRUE; rw_exit(&zfsvfs->z_unmount_lock); rw_exit(&zfsvfs->z_unmount_inactive_lock); /* * Unregister properties. */ #ifndef __APPLE__ if (!dmu_objset_is_snapshot(os)) zfs_unregister_callbacks(zfsvfs); #endif /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ if (zfsvfs->z_log) { zil_close(zfsvfs->z_log); zfsvfs->z_log = NULL; } /* * Evict all dbufs so that cached znodes will be freed */ if (dmu_objset_evict_dbufs(os, B_TRUE)) { txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); (void) dmu_objset_evict_dbufs(os, B_FALSE); } /* * Finally close the objset */ dmu_objset_close(os); /* * We can now safely destroy the '.zfs' directory node. */ #if 0 if (zfsvfs->z_ctldir != NULL) zfsctl_destroy(zfsvfs); #endif /* * Note that this work is normally done in zfs_freevfs, but since * there is no VOP_FREEVFS in OSX, we free VFS items here */ OSDecrementAtomic((SInt32 *)&zfs_active_fs_count); for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_destroy(&zfsvfs->z_hold_mtx[i]); mutex_destroy(&zfsvfs->z_znodes_lock); list_destroy(&zfsvfs->z_all_znodes); rw_destroy(&zfsvfs->z_unmount_lock); rw_destroy(&zfsvfs->z_unmount_inactive_lock); return (0); } struct vnode* vnode_getparent(struct vnode *vp); /* sys/vnode_internal.h */ static int zfs_vget_internal(zfsvfs_t *zfsvfs, ino64_t ino, struct vnode **vpp) { struct vnode *vp; struct vnode *dvp = NULL; znode_t *zp; int error; *vpp = NULL; /* * On Mac OS X we always export the root directory id as 2 * and its parent as 1 */ if (ino == 2 || ino == 1) ino = zfsvfs->z_root; if ((error = zfs_zget(zfsvfs, ino, &zp))) goto out; /* Don't expose EA objects! */ if (zp->z_phys->zp_flags & ZFS_XATTR) { vnode_put(ZTOV(zp)); error = ENOENT; goto out; } *vpp = vp = ZTOV(zp); if (vnode_isvroot(vp)) goto out; /* * If this znode didn't just come from the cache then * it won't have a valid identity (parent and name). * * Manually fix its identity here (normally done by namei lookup). */ if ((dvp = vnode_getparent(vp)) == NULL) { if (zp->z_phys->zp_parent != 0 && zfs_vget_internal(zfsvfs, zp->z_phys->zp_parent, &dvp)) { goto out; } if ( vnode_isdir(dvp) ) { char objname[ZAP_MAXNAMELEN]; /* 256 bytes */ int flags = VNODE_UPDATE_PARENT; /* Look for znode's name in its parent's zap */ if ( zap_value_search(zfsvfs->z_os, zp->z_phys->zp_parent, zp->z_id, ZFS_DIRENT_OBJ(-1ULL), objname) == 0 ) { flags |= VNODE_UPDATE_NAME; } /* Update the znode's parent and name */ vnode_update_identity(vp, dvp, objname, 0, 0, flags); } } /* All done with znode's parent */ vnode_put(dvp); out: return (error); } /* * Get a vnode from a file id (ignoring the generation) * * Use by NFS Server (readdirplus) and VFS (build_path) */ static int zfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context) { zfsvfs_t *zfsvfs = vfs_fsprivate(mp); int error; ZFS_ENTER(zfsvfs); /* * On Mac OS X we always export the root directory id as 2. * So we don't expect to see the real root directory id * from zfs_vfs_vget KPI (unless of course the real id was * already 2). */ if ((ino == zfsvfs->z_root) && (zfsvfs->z_root != 2)) { ZFS_EXIT(zfsvfs); return (ENOENT); } error = zfs_vget_internal(zfsvfs, ino, vpp); ZFS_EXIT(zfsvfs); return (error); }
/* * Because of the vagaries of how a filehandle can be used, we try not to * be too smart in here (we try to be smart elsewhere). It is required that * you come in here only if you really do not have the said filehandle--else * we panic. */ int fuse_filehandle_get(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type, int mode) { struct fuse_dispatcher fdi; struct fuse_abi_data foi; struct fuse_abi_data foo; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); int err = 0; int oflags = 0; int op = FUSE_OPEN; fuse_trace_printf("fuse_filehandle_get(vp=%p, fufh_type=%d, mode=%x)\n", vp, fufh_type, mode); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("osxfuse: filehandle_get called despite valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } /* * Note that this means we are effectively FILTERING OUT open() flags. */ (void)mode; oflags = fuse_filehandle_xlate_to_oflags(fufh_type); if (vnode_isdir(vp)) { op = FUSE_OPENDIR; if (fufh_type != FUFH_RDONLY) { IOLog("osxfuse: non-rdonly fufh requested for directory\n"); fufh_type = FUFH_RDONLY; } } if (vnode_islnk(vp) && (mode & O_SYMLINK)) { oflags |= O_SYMLINK; } if ((mode & O_TRUNC) && (data->dataflags & FSESS_ATOMIC_O_TRUNC)) { oflags |= O_TRUNC; } fdisp_init_abi(&fdi, fuse_open_in, data); fdisp_make_vp(&fdi, op, vp, context); fuse_abi_data_init(&foi, DATOI(data), fdi.indata); fuse_open_in_set_flags(&foi, oflags); FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_upcall_count); err = fdisp_wait_answ(&fdi); if (err) { #if M_OSXFUSE_ENABLE_UNSUPPORTED const char *vname = vnode_getname(vp); #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ if (err == ENOENT) { /* * See comment in fuse_vnop_reclaim(). */ cache_purge(vp); } #if M_OSXFUSE_ENABLE_UNSUPPORTED IOLog("osxfuse: filehandle_get: failed for %s " "(type=%d, err=%d, caller=%p)\n", (vname) ? vname : "?", fufh_type, err, __builtin_return_address(0)); if (vname) { vnode_putname(vname); } #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ if (err == ENOENT) { #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif fuse_internal_vnode_disappear(vp, context, REVOKE_SOFT); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif } return err; } FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_current); fuse_abi_data_init(&foo, DATOI(data), fdi.answ); fufh->fh_id = fuse_open_out_get_fh(&foo); fufh->open_count = 1; fufh->open_flags = oflags; fufh->fuse_open_flags = fuse_open_out_get_open_flags(&foo); fufh->aux_count = 0; fuse_ticket_release(fdi.tick); return 0; }
/* For part 1 of zfs_getattr() */ int zfs_getattr_znode_locked(vattr_t *vap, znode_t *zp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; int error; uint64_t times[2]; uint64_t val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &val, sizeof (val)) == 0); vap->va_mode = val & MODEMASK; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_UID(zfsvfs), &val, sizeof (val)) == 0); vap->va_uid = val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_GID(zfsvfs), &val, sizeof (val)) == 0); vap->va_gid = val; //vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; /* On OS X, the root directory id is always 2 */ vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_id; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), &val, sizeof (val)) == 0); vap->va_nlink = val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), &val, sizeof (val)) == 0); vap->va_data_size = val; vap->va_total_size = val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &val, sizeof (val)) == 0); vap->va_rdev = val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &val, sizeof (val)) == 0); vap->va_gen = val; (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&vap->va_create_time, times); (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&vap->va_access_time, times); (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&vap->va_modify_time, times); (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&vap->va_change_time, times); if (VATTR_IS_ACTIVE(vap, va_backup_time)) { vap->va_backup_time.tv_sec = 0; vap->va_backup_time.tv_nsec = 0; VATTR_SET_SUPPORTED(vap, va_backup_time); } vap->va_flags = zfs_getbsdflags(zp); /* On OS X, the root directory id is always 2 and its parent is 1 */ VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &val, sizeof (val)) == 0); if (zp->z_id == zfsvfs->z_root) vap->va_parentid = 1; else if (val == zfsvfs->z_root) vap->va_parentid = 2; else vap->va_parentid = val; vap->va_iosize = zp->z_blksz ? zp->z_blksz : zfsvfs->z_max_blksz; VATTR_SET_SUPPORTED(vap, va_iosize); printf("stat blksize set to %d\n", vap->va_iosize); vap->va_supported |= ZFS_SUPPORTED_VATTRS; if (VATTR_IS_ACTIVE(vap, va_nchildren) && vnode_isdir(ZTOV(zp))) VATTR_RETURN(vap, va_nchildren, vap->va_nlink - 2); if (VATTR_IS_ACTIVE(vap, va_acl)) { if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), times, sizeof (times)))) { // if (zp->z_phys->zp_acl.z_acl_count == 0) { vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE; } else { error = zfs_getacl(zp, &vap->va_acl, B_TRUE, cr); if (error) return (error); VATTR_SET_SUPPORTED(vap, va_acl); /* * va_acl implies that va_uuuid and va_guuid are * also supported. */ VATTR_RETURN(vap, va_uuuid, kauth_null_guid); VATTR_RETURN(vap, va_guuid, kauth_null_guid); } } return (0); }
static int vnop_setattr_9p(struct vnop_setattr_args *ap) { struct vnode_attr *vap; vnode_t vp; node_9p *np; dir_9p d; int e; TRACE(); vp = ap->a_vp; vap = ap->a_vap; np = NTO9P(vp); if (vnode_vfsisrdonly(vp)) return EROFS; if (vnode_isvroot(vp)) return EACCES; nulldir(&d); if (VATTR_IS_ACTIVE(vap, va_data_size)) { if (vnode_isdir(vp)) return EISDIR; d.length = vap->va_data_size; } VATTR_SET_SUPPORTED(vap, va_data_size); if (VATTR_IS_ACTIVE(vap, va_access_time)) d.atime = vap->va_access_time.tv_sec; VATTR_SET_SUPPORTED(vap, va_access_time); if (VATTR_IS_ACTIVE(vap, va_modify_time)) d.mtime = vap->va_modify_time.tv_sec; VATTR_SET_SUPPORTED(vap, va_modify_time); if (VATTR_IS_ACTIVE(vap, va_mode)) { d.mode = vap->va_mode & 0777; if (vnode_isdir(vp)) SET(d.mode, DMDIR); if (ISSET(np->nmp->flags, F_DOTU)) { switch (vnode_vtype(vp)) { case VBLK: case VCHR: SET(d.mode, DMDEVICE); break; case VLNK: SET(d.mode, DMSYMLINK); break; case VSOCK: SET(d.mode, DMSOCKET); break; case VFIFO: SET(d.mode, DMNAMEDPIPE); break; default: break; } } } VATTR_SET_SUPPORTED(vap, va_mode); nlock_9p(np, NODE_LCK_EXCLUSIVE); e = wstat_9p(np->nmp, np->fid, &d); np->dirtimer = 0; if (e==0 && d.length!=~0) ubc_setsize(vp, d.length); nunlock_9p(np); return e; }
static int vboxvfs_vnode_open(struct vnop_open_args *args) { vnode_t vnode; vboxvfs_vnode_t *pVnodeData; uint32_t fHostFlags; mount_t mp; vboxvfs_mount_t *pMount; int rc; PDEBUG("Opening vnode..."); AssertReturn(args, EINVAL); vnode = args->a_vp; AssertReturn(vnode, EINVAL); pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(vnode); AssertReturn(pVnodeData, EINVAL); mp = vnode_mount(vnode); AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); lck_rw_lock_exclusive(pVnodeData->pLock); if (vnode_isinuse(vnode, 0)) { PDEBUG("vnode '%s' (handle 0x%X) already has VBoxVFS object handle assigned, just return ok", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle); lck_rw_unlock_exclusive(pVnodeData->pLock); return 0; } /* At this point we must make sure that nobody is using VBoxVFS object handle */ //if (pVnodeData->Handle != SHFL_HANDLE_NIL) //{ // PDEBUG("vnode has active VBoxVFS object handle set, aborting"); // lck_rw_unlock_exclusive(pVnodeData->pLock); // return EINVAL; //} fHostFlags = vboxvfs_g2h_mode_inernal(args->a_mode); fHostFlags |= (vnode_isdir(vnode) ? SHFL_CF_DIRECTORY : 0); SHFLHANDLE Handle; rc = vboxvfs_open_internal(pMount, pVnodeData->pPath, fHostFlags, &Handle); if (rc == 0) { PDEBUG("Open success: '%s' (handle 0x%X)", (char *)pVnodeData->pPath->String.utf8, (int)Handle); pVnodeData->pHandle = Handle; } else { PDEBUG("Unable to open: '%s': %d", (char *)pVnodeData->pPath->String.utf8, rc); } lck_rw_unlock_exclusive(pVnodeData->pLock); return rc; }
/* * Unlink zp from dl, and mark zp for deletion if this was the last link. * Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST). * If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list. * If it's non-NULL, we use it to indicate whether the znode needs deletion, * and it's the caller's job to do it. */ int zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, boolean_t *unlinkedp) { znode_t *dzp = dl->dl_dzp; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; vnode_t *vp = ZTOV(zp); int zp_is_dir = vnode_isdir((vp)); boolean_t unlinked = B_FALSE; sa_bulk_attr_t bulk[5]; uint64_t mtime[2], ctime[2]; int count = 0; int error; dnlc_remove(ZTOV(dzp), dl->dl_name); if (!(flag & ZRENAMING)) { if (vn_vfswlock(vp)) /* prevent new mounts on zp */ return ((EBUSY)); if (vn_ismntpt(vp)) { /* don't remove mount point */ vn_vfsunlock(vp); return ((EBUSY)); } mutex_enter(&zp->z_lock); if (zp_is_dir && !zfs_dirempty(zp)) { mutex_exit(&zp->z_lock); vn_vfsunlock(vp); return (SET_ERROR(ENOTEMPTY)); } /* * If we get here, we are going to try to remove the object. * First try removing the name from the directory; if that * fails, return the error. */ error = zfs_dropname(dl, zp, dzp, tx, flag); if (error != 0) { mutex_exit(&zp->z_lock); vn_vfsunlock(vp); return (error); } if (zp->z_links <= zp_is_dir) { zfs_panic_recover("zfs: link count on vnode %p is %u, " "should be at least %u", zp->z_vnode, (int)zp->z_links, zp_is_dir + 1); zp->z_links = zp_is_dir + 1; } if (--zp->z_links == zp_is_dir) { zp->z_unlinked = B_TRUE; zp->z_links = 0; unlinked = B_TRUE; } else { SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, sizeof (ctime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime, B_TRUE); } SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &zp->z_links, sizeof (zp->z_links)); error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); count = 0; ASSERT(error == 0); mutex_exit(&zp->z_lock); vn_vfsunlock(vp); } else { error = zfs_dropname(dl, zp, dzp, tx, flag); if (error != 0) return (error); } mutex_enter(&dzp->z_lock); dzp->z_size--; /* one dirent removed */ dzp->z_links -= zp_is_dir; /* ".." link from zp */ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &dzp->z_links, sizeof (dzp->z_links)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &dzp->z_size, sizeof (dzp->z_size)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, sizeof (mtime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &dzp->z_pflags, sizeof (dzp->z_pflags)); zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE); error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx); ASSERT(error == 0); mutex_exit(&dzp->z_lock); if (unlinkedp != NULL) *unlinkedp = unlinked; else if (unlinked) zfs_unlinked_add(zp, tx); return (0); }
/* struct vnop_lookup_args { struct vnodeop_desc *a_desc; struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; }; */ int fuse_vnop_lookup(struct vop_lookup_args *ap) { struct vnode *dvp = ap->a_dvp; struct vnode **vpp = ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct thread *td = cnp->cn_thread; struct ucred *cred = cnp->cn_cred; int nameiop = cnp->cn_nameiop; int flags = cnp->cn_flags; int wantparent = flags & (LOCKPARENT | WANTPARENT); int islastcn = flags & ISLASTCN; struct mount *mp = vnode_mount(dvp); int err = 0; int lookup_err = 0; struct vnode *vp = NULL; struct fuse_dispatcher fdi; enum fuse_opcode op; uint64_t nid; struct fuse_access_param facp; FS_DEBUG2G("parent_inode=%ju - %*s\n", (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr); if (fuse_isdeadfs(dvp)) { *vpp = NULL; return ENXIO; } if (!vnode_isdir(dvp)) { return ENOTDIR; } if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) { return EROFS; } /* * We do access check prior to doing anything else only in the case * when we are at fs root (we'd like to say, "we are at the first * component", but that's not exactly the same... nevermind). * See further comments at further access checks. */ bzero(&facp, sizeof(facp)); if (vnode_isvroot(dvp)) { /* early permission check hack */ if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) { return err; } } if (flags & ISDOTDOT) { nid = VTOFUD(dvp)->parent_nid; if (nid == 0) { return ENOENT; } fdisp_init(&fdi, 0); op = FUSE_GETATTR; goto calldaemon; } else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') { nid = VTOI(dvp); fdisp_init(&fdi, 0); op = FUSE_GETATTR; goto calldaemon; } else if (fuse_lookup_cache_enable) { err = cache_lookup(dvp, vpp, cnp, NULL, NULL); switch (err) { case -1: /* positive match */ atomic_add_acq_long(&fuse_lookup_cache_hits, 1); return 0; case 0: /* no match in cache */ atomic_add_acq_long(&fuse_lookup_cache_misses, 1); break; case ENOENT: /* negative match */ /* fall through */ default: return err; } } nid = VTOI(dvp); fdisp_init(&fdi, cnp->cn_namelen + 1); op = FUSE_LOOKUP; calldaemon: fdisp_make(&fdi, op, mp, nid, td, cred); if (op == FUSE_LOOKUP) { memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; } lookup_err = fdisp_wait_answ(&fdi); if ((op == FUSE_LOOKUP) && !lookup_err) { /* lookup call succeeded */ nid = ((struct fuse_entry_out *)fdi.answ)->nodeid; if (!nid) { /* * zero nodeid is the same as "not found", * but it's also cacheable (which we keep * keep on doing not as of writing this) */ lookup_err = ENOENT; } else if (nid == FUSE_ROOT_ID) { lookup_err = EINVAL; } } if (lookup_err && (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) { fdisp_destroy(&fdi); return lookup_err; } /* lookup_err, if non-zero, must be ENOENT at this point */ if (lookup_err) { if ((nameiop == CREATE || nameiop == RENAME) && islastcn /* && directory dvp has not been removed */ ) { if (vfs_isrdonly(mp)) { err = EROFS; goto out; } #if 0 /* THINK_ABOUT_THIS */ if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) { goto out; } #endif /* * Possibly record the position of a slot in the * directory large enough for the new component name. * This can be recorded in the vnode private data for * dvp. Set the SAVENAME flag to hold onto the * pathname for use later in VOP_CREATE or VOP_RENAME. */ cnp->cn_flags |= SAVENAME; err = EJUSTRETURN; goto out; } /* Consider inserting name into cache. */ /* * No we can't use negative caching, as the fs * changes are out of our control. * False positives' falseness turns out just as things * go by, but false negatives' falseness doesn't. * (and aiding the caching mechanism with extra control * mechanisms comes quite close to beating the whole purpose * caching...) */ #if 0 if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) { FS_DEBUG("inserting NULL into cache\n"); cache_enter(dvp, NULL, cnp); } #endif err = ENOENT; goto out; } else { /* !lookup_err */ struct fuse_entry_out *feo = NULL; struct fuse_attr *fattr = NULL; if (op == FUSE_GETATTR) { fattr = &((struct fuse_attr_out *)fdi.answ)->attr; } else { feo = (struct fuse_entry_out *)fdi.answ; fattr = &(feo->attr); } /* * If deleting, and at end of pathname, return parameters * which can be used to remove file. If the wantparent flag * isn't set, we return only the directory, otherwise we go on * and lock the inode, being careful with ".". */ if (nameiop == DELETE && islastcn) { /* * Check for write access on directory. */ facp.xuid = fattr->uid; facp.facc_flags |= FACCESS_STICKY; err = fuse_internal_access(dvp, VWRITE, &facp, td, cred); facp.facc_flags &= ~FACCESS_XQUERIES; if (err) { goto out; } if (nid == VTOI(dvp)) { vref(dvp); *vpp = dvp; } else { err = fuse_vnode_get(dvp->v_mount, nid, dvp, &vp, cnp, IFTOVT(fattr->mode)); if (err) goto out; *vpp = vp; } /* * Save the name for use in VOP_RMDIR and VOP_REMOVE * later. */ cnp->cn_flags |= SAVENAME; goto out; } /* * If rewriting (RENAME), return the inode and the * information required to rewrite the present directory * Must get inode of directory entry to verify it's a * regular file, or empty directory. */ if (nameiop == RENAME && wantparent && islastcn) { #if 0 /* THINK_ABOUT_THIS */ if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) { goto out; } #endif /* * Check for "." */ if (nid == VTOI(dvp)) { err = EISDIR; goto out; } err = fuse_vnode_get(vnode_mount(dvp), nid, dvp, &vp, cnp, IFTOVT(fattr->mode)); if (err) { goto out; } *vpp = vp; /* * Save the name for use in VOP_RENAME later. */ cnp->cn_flags |= SAVENAME; goto out; } if (flags & ISDOTDOT) { struct mount *mp; int ltype; /* * Expanded copy of vn_vget_ino() so that * fuse_vnode_get() can be used. */ mp = dvp->v_mount; ltype = VOP_ISLOCKED(dvp); err = vfs_busy(mp, MBF_NOWAIT); if (err != 0) { vfs_ref(mp); VOP_UNLOCK(dvp, 0); err = vfs_busy(mp, 0); vn_lock(dvp, ltype | LK_RETRY); vfs_rel(mp); if (err) goto out; if ((dvp->v_iflag & VI_DOOMED) != 0) { err = ENOENT; vfs_unbusy(mp); goto out; } } VOP_UNLOCK(dvp, 0); err = fuse_vnode_get(vnode_mount(dvp), nid, NULL, &vp, cnp, IFTOVT(fattr->mode)); vfs_unbusy(mp); vn_lock(dvp, ltype | LK_RETRY); if ((dvp->v_iflag & VI_DOOMED) != 0) { if (err == 0) vput(vp); err = ENOENT; } if (err) goto out; *vpp = vp; } else if (nid == VTOI(dvp)) { vref(dvp); *vpp = dvp; } else { err = fuse_vnode_get(vnode_mount(dvp), nid, dvp, &vp, cnp, IFTOVT(fattr->mode)); if (err) { goto out; } fuse_vnode_setparent(vp, dvp); *vpp = vp; } if (op == FUSE_GETATTR) { cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ); } else { cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ); } /* Insert name into cache if appropriate. */ /* * Nooo, caching is evil. With caching, we can't avoid stale * information taking over the playground (cached info is not * just positive/negative, it does have qualitative aspects, * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when * walking down along cached path components, and that's not * any cheaper than FUSE_LOOKUP. This might change with * implementing kernel side attr caching, but... In Linux, * lookup results are not cached, and the daemon is bombarded * with FUSE_LOOKUPS on and on. This shows that by design, the * daemon is expected to handle frequent lookup queries * efficiently, do its caching in userspace, and so on. * * So just leave the name cache alone. */ /* * Well, now I know, Linux caches lookups, but with a * timeout... So it's the same thing as attribute caching: * we can deal with it when implement timeouts. */ #if 0 if (cnp->cn_flags & MAKEENTRY) { cache_enter(dvp, *vpp, cnp); } #endif } out: if (!lookup_err) { /* No lookup error; need to clean up. */ if (err) { /* Found inode; exit with no vnode. */ if (op == FUSE_LOOKUP) { fuse_internal_forget_send(vnode_mount(dvp), td, cred, nid, 1); } fdisp_destroy(&fdi); return err; } else { #ifndef NO_EARLY_PERM_CHECK_HACK if (!islastcn) { /* * We have the attributes of the next item * *now*, and it's a fact, and we do not * have to do extra work for it (ie, beg the * daemon), and it neither depends on such * accidental things like attr caching. So * the big idea: check credentials *now*, * not at the beginning of the next call to * lookup. * * The first item of the lookup chain (fs root) * won't be checked then here, of course, as * its never "the next". But go and see that * the root is taken care about at the very * beginning of this function. * * Now, given we want to do the access check * this way, one might ask: so then why not * do the access check just after fetching * the inode and its attributes from the * daemon? Why bother with producing the * corresponding vnode at all if something * is not OK? We know what's the deal as * soon as we get those attrs... There is * one bit of info though not given us by * the daemon: whether his response is * authorative or not... His response should * be ignored if something is mounted over * the dir in question. But that can be * known only by having the vnode... */ int tmpvtype = vnode_vtype(*vpp); bzero(&facp, sizeof(facp)); /*the early perm check hack */ facp.facc_flags |= FACCESS_VA_VALID; if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) { err = ENOTDIR; } if (!err && !vnode_mountedhere(*vpp)) { err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred); } if (err) { if (tmpvtype == VLNK) FS_DEBUG("weird, permission error with a symlink?\n"); vput(*vpp); *vpp = NULL; } } #endif } } fdisp_destroy(&fdi); return err; }
/* struct vnop_rename_args { struct vnode *a_fdvp; struct vnode *a_fvp; struct componentname *a_fcnp; struct vnode *a_tdvp; struct vnode *a_tvp; struct componentname *a_tcnp; }; */ static int fuse_vnop_rename(struct vop_rename_args *ap) { struct vnode *fdvp = ap->a_fdvp; struct vnode *fvp = ap->a_fvp; struct componentname *fcnp = ap->a_fcnp; struct vnode *tdvp = ap->a_tdvp; struct vnode *tvp = ap->a_tvp; struct componentname *tcnp = ap->a_tcnp; struct fuse_data *data; int err = 0; FS_DEBUG2G("from: inode=%ju name=%*s -> to: inode=%ju name=%*s\n", (uintmax_t)VTOI(fvp), (int)fcnp->cn_namelen, fcnp->cn_nameptr, (uintmax_t)(tvp == NULL ? -1 : VTOI(tvp)), (int)tcnp->cn_namelen, tcnp->cn_nameptr); if (fuse_isdeadfs(fdvp)) { return ENXIO; } if (fvp->v_mount != tdvp->v_mount || (tvp && fvp->v_mount != tvp->v_mount)) { FS_DEBUG("cross-device rename: %s -> %s\n", fcnp->cn_nameptr, (tcnp != NULL ? tcnp->cn_nameptr : "(NULL)")); err = EXDEV; goto out; } cache_purge(fvp); /* * FUSE library is expected to check if target directory is not * under the source directory in the file system tree. * Linux performs this check at VFS level. */ data = fuse_get_mpdata(vnode_mount(tdvp)); sx_xlock(&data->rename_lock); err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp); if (err == 0) { if (tdvp != fdvp) fuse_vnode_setparent(fvp, tdvp); if (tvp != NULL) fuse_vnode_setparent(tvp, NULL); } sx_unlock(&data->rename_lock); if (tvp != NULL && tvp != fvp) { cache_purge(tvp); } if (vnode_isdir(fvp)) { if ((tvp != NULL) && vnode_isdir(tvp)) { cache_purge(tdvp); } cache_purge(fdvp); } out: if (tdvp == tvp) { vrele(tdvp); } else { vput(tdvp); } if (tvp != NULL) { vput(tvp); } vrele(fdvp); vrele(fvp); return err; }
/* * Link zp into dl. Can only fail if zp has been unlinked. */ int zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag) { znode_t *dzp = dl->dl_dzp; zfsvfs_t *zfsvfs = zp->z_zfsvfs; vnode_t *vp = ZTOV(zp); uint64_t value; int zp_is_dir = (vnode_isdir(vp)); sa_bulk_attr_t bulk[5]; uint64_t mtime[2], ctime[2]; int count = 0; int error; mutex_enter(&zp->z_lock); if (!(flag & ZRENAMING)) { if (zp->z_unlinked) { /* no new links to unlinked zp */ ASSERT(!(flag & (ZNEW | ZEXISTS))); mutex_exit(&zp->z_lock); return (SET_ERROR(ENOENT)); } zp->z_links++; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &zp->z_links, sizeof (zp->z_links)); } SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &dzp->z_id, sizeof (dzp->z_id)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); if (!(flag & ZNEW)) { SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime, B_TRUE); } error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); ASSERT(error == 0); mutex_exit(&zp->z_lock); mutex_enter(&dzp->z_lock); dzp->z_size++; dzp->z_links += zp_is_dir; count = 0; SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &dzp->z_size, sizeof (dzp->z_size)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &dzp->z_links, sizeof (dzp->z_links)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, sizeof (mtime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &dzp->z_pflags, sizeof (dzp->z_pflags)); zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE); error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx); ASSERT(error == 0); mutex_exit(&dzp->z_lock); value = zfs_dirent(zp, zp->z_mode); error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, 8, 1, &value, tx); ASSERT(error == 0); dnlc_update(ZTOV(dzp), dl->dl_name, vp); return (0); }
/* * Function: devfs_kernel_mount * Purpose: * Mount devfs at the given mount point from within the kernel. */ int devfs_kernel_mount(char * mntname) { struct mount *mp; int error; struct nameidata nd; struct vnode * vp; vfs_context_t ctx = vfs_context_kernel(); struct vfstable *vfsp; /* Find our vfstable entry */ for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) if (!strncmp(vfsp->vfc_name, "devfs", sizeof(vfsp->vfc_name))) break; if (!vfsp) { panic("Could not find entry in vfsconf for devfs.\n"); } /* * Get vnode to be covered */ NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(mntname), ctx); if ((error = namei(&nd))) { printf("devfs_kernel_mount: failed to find directory '%s', %d", mntname, error); return (error); } nameidone(&nd); vp = nd.ni_vp; if ((error = VNOP_FSYNC(vp, MNT_WAIT, ctx))) { printf("devfs_kernel_mount: vnop_fsync failed: %d\n", error); vnode_put(vp); return (error); } if ((error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0))) { printf("devfs_kernel_mount: buf_invalidateblks failed: %d\n", error); vnode_put(vp); return (error); } if (vnode_isdir(vp) == 0) { printf("devfs_kernel_mount: '%s' is not a directory\n", mntname); vnode_put(vp); return (ENOTDIR); } if ((vnode_mountedhere(vp))) { vnode_put(vp); return (EBUSY); } /* * Allocate and initialize the filesystem. */ MALLOC_ZONE(mp, struct mount *, sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, sizeof(struct mount)); /* Initialize the default IO constraints */ mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; mp->mnt_ioflags = 0; mp->mnt_realrootvp = NULLVP; mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL; mount_lock_init(mp); TAILQ_INIT(&mp->mnt_vnodelist); TAILQ_INIT(&mp->mnt_workerqueue); TAILQ_INIT(&mp->mnt_newvnodes); (void)vfs_busy(mp, LK_NOWAIT); mp->mnt_op = &devfs_vfsops; mp->mnt_vtable = vfsp; mp->mnt_flag = 0; mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN); vp->v_mountedhere = mp; mp->mnt_vnodecovered = vp; mp->mnt_vfsstat.f_owner = kauth_cred_getuid(kauth_cred_get()); (void) copystr(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN - 1, 0); #if CONFIG_MACF mac_mount_label_init(mp); mac_mount_label_associate(ctx, mp); #endif error = devfs_mount(mp, NULL, USER_ADDR_NULL, ctx); if (error) { printf("devfs_kernel_mount: mount %s failed: %d", mntname, error); mp->mnt_vtable->vfc_refcount--; vfs_unbusy(mp); mount_lock_destroy(mp); #if CONFIG_MACF mac_mount_label_destroy(mp); #endif FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); vnode_put(vp); return (error); } vnode_ref(vp); vnode_put(vp); vfs_unbusy(mp); mount_list_add(mp); return (0); }
static int vboxvfs_vnode_lookup(struct vnop_lookup_args *args) { int rc; vnode_t vnode; vboxvfs_vnode_t *pVnodeData; PDEBUG("Looking up for vnode..."); AssertReturn(args, EINVAL); AssertReturn(args->a_dvp, EINVAL); AssertReturn(vnode_isdir(args->a_dvp), EINVAL); AssertReturn(args->a_cnp, EINVAL); AssertReturn(args->a_cnp->cn_nameptr, EINVAL); AssertReturn(args->a_vpp, EINVAL); pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(args->a_dvp); AssertReturn(pVnodeData, EINVAL); AssertReturn(pVnodeData->pLock, EINVAL); /* todo: take care about args->a_cnp->cn_nameiop */ if (args->a_cnp->cn_nameiop == LOOKUP) PDEBUG("LOOKUP"); else if (args->a_cnp->cn_nameiop == CREATE) PDEBUG("CREATE"); else if (args->a_cnp->cn_nameiop == RENAME) PDEBUG("RENAME"); else if (args->a_cnp->cn_nameiop == DELETE) PDEBUG("DELETE"); else PDEBUG("Unknown cn_nameiop: 0x%X", (int)args->a_cnp->cn_nameiop); lck_rw_lock_exclusive(pVnodeData->pLock); /* Take care about '.' and '..' entries */ if (vboxvfs_vnode_lookup_dot_handler(args, &vnode) == 0) { vnode_get(vnode); *args->a_vpp = vnode; lck_rw_unlock_exclusive(pVnodeData->pLock); return 0; } /* Look into VFS cache and attempt to find previously allocated vnode there. */ rc = cache_lookup(args->a_dvp, &vnode, args->a_cnp); if (rc == -1) /* Record found */ { PDEBUG("Found record in VFS cache"); /* Check if VFS object still exist on a host side */ if (vboxvfs_exist_internal(vnode)) { /* Prepare & return cached vnode */ vnode_get(vnode); *args->a_vpp = vnode; rc = 0; } else { /* If vnode exist in guets VFS cache, but not exist on a host -- just forget it. */ cache_purge(vnode); /* todo: free vnode data here */ rc = ENOENT; } } else { PDEBUG("cache_lookup() returned %d, create new VFS vnode", rc); rc = vboxvfs_vnode_lookup_instantinate_vnode(args->a_dvp, args->a_cnp->cn_nameptr, &vnode); if (rc == 0) { cache_enter(args->a_dvp, vnode, args->a_cnp); *args->a_vpp = vnode; } else { rc = ENOENT; } } lck_rw_unlock_exclusive(pVnodeData->pLock); return rc; }
static int vnop_readdir_9p(struct vnop_readdir_args *ap) { struct direntry de64; struct dirent de32; vnode_t vp; node_9p *np; dir_9p *dp; fid_9p fid; off_t off; uio_t uio; uint32_t i, nd, nlen, plen; void *p; int e; TRACE(); vp = ap->a_vp; uio = ap->a_uio; np = NTO9P(vp); if (!vnode_isdir(vp)) return ENOTDIR; if (ISSET(ap->a_flags, VNODE_READDIR_REQSEEKOFF)) return EINVAL; off = uio_offset(uio); if (off < 0) return EINVAL; if (uio_resid(uio) == 0) return 0; e = 0; nlock_9p(np, NODE_LCK_EXCLUSIVE); fid = np->openfid[OREAD].fid; if (fid == NOFID) { e = EBADF; goto error; } if (ap->a_eofflag) ap->a_eofflag = 0; if (off == 0 || np->direntries==NULL) { if((e=readdirs_9p(np->nmp, fid, &np->direntries, &np->ndirentries))) goto error; if (np->ndirentries && np->direntries==NULL) panic("bug in readdir"); } dp = np->direntries; nd = np->ndirentries; for (i=off; i<nd; i++) { if (ISSET(ap->a_flags, VNODE_READDIR_EXTENDED)) { bzero(&de64, sizeof(de64)); de64.d_ino = QTOI(dp[i].qid); de64.d_type = dp[i].mode&DMDIR? DT_DIR: DT_REG; nlen = strlen(dp[i].name); de64.d_namlen = MIN(nlen, sizeof(de64.d_name)-1); bcopy(dp[i].name, de64.d_name, de64.d_namlen); de64.d_reclen = DIRENT64_LEN(de64.d_namlen); plen = de64.d_reclen; p = &de64; } else { bzero(&de32, sizeof(de32)); de32.d_ino = QTOI(dp[i].qid); de32.d_type = dp[i].mode&DMDIR? DT_DIR: DT_REG; nlen = strlen(dp[i].name); de32.d_namlen = MIN(nlen, sizeof(de32.d_name)-1); bcopy(dp[i].name, de32.d_name, de32.d_namlen); de32.d_reclen = DIRENT32_LEN(de32.d_namlen); plen = de32.d_reclen; p = &de32; } if (uio_resid(uio) < plen) break; if ((e=uiomove(p, plen, uio))) goto error; } uio_setoffset(uio, i); if (ap->a_numdirent) *ap->a_numdirent = i - off; if (i==nd && ap->a_eofflag) { *ap->a_eofflag = 1; free_9p(np->direntries); np->direntries = NULL; np->ndirentries = 0; } error: nunlock_9p(np); return e; }
static int vboxvfs_vnode_readdir(struct vnop_readdir_args *args) { vboxvfs_mount_t *pMount; vboxvfs_vnode_t *pVnodeData; SHFLDIRINFO *Info; uint32_t cbInfo; mount_t mp; vnode_t vnode; struct uio *uio; int rc = 0, rc2; PDEBUG("Reading directory..."); AssertReturn(args, EINVAL); AssertReturn(args->a_eofflag, EINVAL); AssertReturn(args->a_numdirent, EINVAL); uio = args->a_uio; AssertReturn(uio, EINVAL); vnode = args->a_vp; AssertReturn(vnode, EINVAL); AssertReturn(vnode_isdir(vnode), EINVAL); pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(vnode); AssertReturn(pVnodeData, EINVAL); mp = vnode_mount(vnode); AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); lck_rw_lock_shared(pVnodeData->pLock); cbInfo = sizeof(Info) + MAXPATHLEN; Info = (SHFLDIRINFO *)RTMemAllocZ(cbInfo); if (!Info) { PDEBUG("No memory to allocate internal data"); lck_rw_unlock_shared(pVnodeData->pLock); return ENOMEM; } uint32_t index = (uint32_t)uio_offset(uio) / (uint32_t)sizeof(struct dirent); uint32_t cFiles = 0; PDEBUG("Exploring VBoxVFS directory (%s), handle (0x%.8X), offset (0x%X), count (%d)", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle, index, uio_iovcnt(uio)); /* Currently, there is a problem when VbglR0SfDirInfo() is not able to * continue retrieve directory content if the same VBoxVFS handle is used. * This macro forces to use a new handle in readdir() callback. If enabled, * the original handle (obtained in open() callback is ignored). */ SHFLHANDLE Handle; rc = vboxvfs_open_internal(pMount, pVnodeData->pPath, SHFL_CF_DIRECTORY | SHFL_CF_ACCESS_READ | SHFL_CF_ACT_OPEN_IF_EXISTS | SHFL_CF_ACT_FAIL_IF_NEW, &Handle); if (rc != 0) { PDEBUG("Unable to open dir: %d", rc); RTMemFree(Info); lck_rw_unlock_shared(pVnodeData->pLock); return rc; } #if 0 rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0, 0, index, &cbInfo, (PSHFLDIRINFO)Info, &cFiles); #else SHFLSTRING *pMask = vboxvfs_construct_shflstring("*", strlen("*")); if (pMask) { for (uint32_t cSkip = 0; (cSkip < index + 1) && (rc == VINF_SUCCESS); cSkip++) { //rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0 /* pMask */, 0 /* SHFL_LIST_RETURN_ONE */, 0, &cbInfo, (PSHFLDIRINFO)Info, &cFiles); uint32_t cbReturned = cbInfo; //rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, pMask, SHFL_LIST_RETURN_ONE, 0, &cbReturned, (PSHFLDIRINFO)Info, &cFiles); rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0, SHFL_LIST_RETURN_ONE, 0, &cbReturned, (PSHFLDIRINFO)Info, &cFiles); } PDEBUG("read %d files", cFiles); RTMemFree(pMask); } else { PDEBUG("Can't alloc mask"); rc = ENOMEM; } #endif rc2 = vboxvfs_close_internal(pMount, Handle); if (rc2 != 0) { PDEBUG("Unable to close directory: %s: %d", pVnodeData->pPath->String.utf8, rc2); } switch (rc) { case VINF_SUCCESS: { rc = vboxvfs_vnode_readdir_copy_data((ino_t)(index + 1), Info, uio, args->a_numdirent); break; } case VERR_NO_MORE_FILES: { PDEBUG("No more entries in directory"); *(args->a_eofflag) = 1; break; } default: { PDEBUG("VbglR0SfDirInfo() for item #%d has failed: %d", (int)index, (int)rc); rc = EINVAL; break; } } RTMemFree(Info); lck_rw_unlock_shared(pVnodeData->pLock); return rc; }
static int vnop_lookup_9p(struct vnop_lookup_args *ap) { struct componentname *cnp; node_9p *dnp; vnode_t *vpp, dvp; fid_9p fid; qid_9p qid; int e; TRACE(); dvp = ap->a_dvp; vpp = ap->a_vpp; cnp = ap->a_cnp; dnp = NTO9P(dvp); if(!vnode_isdir(dvp)) return ENOTDIR; if (isdotdot(cnp) && vnode_isvroot(dvp)) return EIO; if (islastcn(cnp) && !isop(cnp, LOOKUP) && vnode_vfsisrdonly(dvp)) return EROFS; if (isdot(cnp)) { if (islastcn(cnp) && isop(cnp, RENAME)) return EISDIR; if ((e=vnode_get(dvp))) return e; *vpp = dvp; return 0; } if (isdotdot(cnp)) { *vpp = vnode_getparent(dvp); if (*vpp == NULL) return ENOENT; return 0; } e = cache_lookup(dvp, vpp, cnp); if (e == -1) /* found */ return 0; if (e != 0) /* errno */ return e; /* not in cache */ nlock_9p(dnp, NODE_LCK_EXCLUSIVE); e = walk_9p(dnp->nmp, dnp->fid, cnp->cn_nameptr, cnp->cn_namelen, &fid, &qid); if (e) { if (islastcn(cnp)) { if (isop(cnp, CREATE) || isop(cnp, RENAME)) e = EJUSTRETURN; else if (ismkentry(cnp) && dnp->dir.qid.vers!=0) cache_enter(dvp, NULL, cnp); } goto error; } e = nget_9p(dnp->nmp, fid, qid, dvp, vpp, cnp, ap->a_context); if (e || *vpp==NULL || NTO9P(*vpp)->fid!=fid) clunk_9p(dnp->nmp, fid); if (*vpp) nunlock_9p(NTO9P(*vpp)); error: nunlock_9p(dnp); return e; }