/* * MPSAFE */ int vop_write(struct vop_ops *ops, struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) { struct vop_write_args ap; VFS_MPLOCK_DECLARE; int error, do_accounting = 0; struct vattr va; uint64_t size_before=0, size_after=0; struct mount *mp; uint64_t offset, delta; ap.a_head.a_desc = &vop_write_desc; ap.a_head.a_ops = ops; ap.a_vp = vp; ap.a_uio = uio; ap.a_ioflag = ioflag; ap.a_cred = cred; /* is this a regular vnode ? */ VFS_MPLOCK_FLAG(vp->v_mount, MNTK_WR_MPSAFE); if (vfs_quota_enabled && (vp->v_type == VREG)) { if ((error = VOP_GETATTR(vp, &va)) != 0) goto done; size_before = va.va_size; /* this file may already have been removed */ if (va.va_nlink > 0) do_accounting = 1; offset = uio->uio_offset; if (ioflag & IO_APPEND) offset = size_before; size_after = offset + uio->uio_resid; if (size_after < size_before) size_after = size_before; delta = size_after - size_before; mp = vq_vptomp(vp); /* QUOTA CHECK */ if (!vq_write_ok(mp, va.va_uid, va.va_gid, delta)) { error = EDQUOT; goto done; } } DO_OPS(ops, error, &ap, vop_write); if ((error == 0) && do_accounting) { VFS_ACCOUNT(mp, va.va_uid, va.va_gid, size_after - size_before); } done: VFS_MPUNLOCK(vp->v_mount); return(error); }
/* * MPSAFE */ int vfs_start(struct mount *mp, int flags) { VFS_MPLOCK_DECLARE; int error; VFS_MPLOCK_FLAG(mp, MNTK_ST_MPSAFE); error = (mp->mnt_op->vfs_start)(mp, flags); if (error == 0) /* do not call vfs_acinit on mount updates */ if ((mp->mnt_flag & MNT_UPDATE) == 0) error = (mp->mnt_op->vfs_acinit)(mp); VFS_MPUNLOCK(mp); if (error == EMOUNTEXIT) error = 0; return (error); }
/* * MPSAFE */ int vop_getattr(struct vop_ops *ops, struct vnode *vp, struct vattr *vap) { struct vop_getattr_args ap; VFS_MPLOCK_DECLARE; int error; ap.a_head.a_desc = &vop_getattr_desc; ap.a_head.a_ops = ops; ap.a_vp = vp; ap.a_vap = vap; VFS_MPLOCK_FLAG(vp->v_mount, MNTK_GA_MPSAFE); DO_OPS(ops, error, &ap, vop_getattr); VFS_MPUNLOCK(vp->v_mount); return(error); }
/* * MPSAFE */ int vop_read(struct vop_ops *ops, struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) { struct vop_read_args ap; VFS_MPLOCK_DECLARE; int error; ap.a_head.a_desc = &vop_read_desc; ap.a_head.a_ops = ops; ap.a_vp = vp; ap.a_uio = uio; ap.a_ioflag = ioflag; ap.a_cred = cred; VFS_MPLOCK_FLAG(vp->v_mount, MNTK_RD_MPSAFE); DO_OPS(ops, error, &ap, vop_read); VFS_MPUNLOCK(vp->v_mount); return(error); }
/* * MPSAFE */ int vop_inactive(struct vop_ops *ops, struct vnode *vp) { struct vop_inactive_args ap; struct mount *mp; VFS_MPLOCK_DECLARE; int error; ap.a_head.a_desc = &vop_inactive_desc; ap.a_head.a_ops = ops; ap.a_vp = vp; /* * WARNING! Deactivation of the vnode can cause it to be recycled, * clearing vp->v_mount. */ mp = vp->v_mount; VFS_MPLOCK_FLAG(mp, MNTK_IN_MPSAFE); DO_OPS(ops, error, &ap, vop_inactive); VFS_MPUNLOCK(mp); return(error); }
/* * MPSAFE */ int vop_strategy(struct vop_ops *ops, struct vnode *vp, struct bio *bio) { struct vop_strategy_args ap; VFS_MPLOCK_DECLARE; int error; ap.a_head.a_desc = &vop_strategy_desc; ap.a_head.a_ops = ops; ap.a_vp = vp; ap.a_bio = bio; if (vp->v_mount) { VFS_MPLOCK_FLAG(vp->v_mount, MNTK_SG_MPSAFE); DO_OPS(ops, error, &ap, vop_strategy); VFS_MPUNLOCK(vp->v_mount); } else { /* ugly hack for swap */ get_mplock(); DO_OPS(ops, error, &ap, vop_strategy); rel_mplock(); } return(error); }