int ufs_mark_compressed(struct vnode *vp) { struct inode *ip = VTOI(vp); struct ufsvfs *ufsvfsp = ip->i_ufsvfs; if (vp->v_type != VREG) return (EINVAL); rw_enter(&ip->i_contents, RW_WRITER); ip->i_cflags |= ICOMPRESS; TRANS_INODE(ufsvfsp, ip); ip->i_flag |= (ICHG|ISEQ); ip->i_seq++; if (!TRANS_ISTRANS(ufsvfsp)) ufs_iupdat(ip, I_ASYNC); rw_exit(&ip->i_contents); return (0); }
/* ARGSUSED7 */ int /* ERRNO if error, 0 if successful. */ sam_create_ino( sam_node_t *pip, /* pointer to parent directory inode. */ char *cp, /* pointer to the component name to create. */ vattr_t *vap, /* vattr ptr for type & mode information. */ vcexcl_t ex, /* exclusive create flag. */ int mode, /* file mode information. */ vnode_t **vpp, /* pointer pointer to returned vnode. */ cred_t *credp, /* credentials pointer. */ int filemode) /* open file mode */ { int error = 0; sam_node_t *ip; struct sam_name name; /* If no entry, slot info is returned here */ int trans_size; int issync; int truncflag = 0; int terr = 0; #ifdef LQFS_TODO_LOCKFS struct ulockfs *ulp; #endif /* LQFS_TODO_LOCKFS */ /* * Cannot set sticky bit unless superuser. */ if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(credp)) { vap->va_mode &= ~VSVTX; } lookup_name: #ifdef LQFS_TODO_LOCKFS error = qfs_lockfs_begin(pip->mp, &ulp, ULOCKFS_CREATE_MASK); if (error) { return (error); } if (ulp) { #endif /* LQFS_TODO_LOCKFS */ /* Start LQFS create transaction */ trans_size = (int)TOP_CREATE_SIZE(pip); TRANS_BEGIN_CSYNC(pip->mp, issync, TOP_CREATE, trans_size); #ifdef LQFS_TODO_LOCKFS } #endif /* LQFS_TODO_LOCKFS */ RW_LOCK_OS(&pip->data_rwl, RW_WRITER); name.operation = SAM_CREATE; if ((error = sam_lookup_name(pip, cp, &ip, &name, credp)) == ENOENT) { if (((error = sam_create_name(pip, cp, &ip, &name, vap, credp)) != 0) && IS_SAM_ENOSPC(error)) { RW_UNLOCK_OS(&pip->data_rwl, RW_WRITER); /* * Temporarily end LQFS create transaction */ #ifdef LQFS_TODO_LOCKFS if (ulp) { #endif /* LQFS_TODO_LOCKFS */ TRANS_END_CSYNC(pip->mp, terr, issync, TOP_CREATE, trans_size); #ifdef LQFS_TODO_LOCKFS } #endif /* LQFS_TODO_LOCKFS */ error = sam_wait_space(pip, error); if (error == 0) { error = terr; } if (error) { return (error); } goto lookup_name; } RW_UNLOCK_OS(&pip->data_rwl, RW_WRITER); } else if (error == 0) { /* If entry already exists. */ RW_UNLOCK_OS(&pip->data_rwl, RW_WRITER); error = EEXIST; if (ex == NONEXCL) { /* If non-exclusive create */ if ((S_ISDIR(ip->di.mode) || S_ISATTRDIR(ip->di.mode)) && (mode & S_IWRITE)) { /* Cannot create over an existing dir. */ error = EISDIR; } else if (SAM_PRIVILEGE_INO(ip->di.version, ip->di.id.ino)) { /* Cannot create over privileged inodes */ error = EPERM; } else if (mode) { /* Check mode if set */ error = sam_access_ino(ip, mode, FALSE, credp); } else { error = 0; } if ((error == 0) && S_ISREG(ip->di.mode) && (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) { /* * If logging, do the truncate after the * LQFS create transaction is logged. */ if (TRANS_ISTRANS(ip->mp)) { truncflag++; } else { RW_LOCK_OS(&ip->inode_rwl, RW_WRITER); error = sam_clear_file(ip, 0, STALE_ARCHIVE, credp); RW_UNLOCK_OS(&ip->inode_rwl, RW_WRITER); } if (error == 0) { VNEVENT_CREATE_OS(SAM_ITOV(ip), NULL); } } } /* * Cannot do the following as it caused a stale of * offline copies. */ #if 0 if ((error == 0) && ((mode & O_CREAT) == 0)) { TRANS_INODE(ip->mp, ip); sam_mark_ino(ip, SAM_UPDATED|SAM_CHANGED); } #endif if (error) { VN_RELE(SAM_ITOV(ip)); /* Decrement v_count if error */ } } else { RW_UNLOCK_OS(&pip->data_rwl, RW_WRITER); } #ifdef LQFS_TODO_LOCKFS if (ulp) { #endif /* LQFS_TODO_LOCKFS */ TRANS_END_CSYNC(pip->mp, terr, issync, TOP_CREATE, trans_size); /* * If we haven't had a more interesting failure * already, then anything that might've happened * here should be reported. */ if (error == 0) { error = terr; } #ifdef LQFS_TODO_LOCKFS } #endif /* LQFS_TODO_LOCKFS */ if (!error && truncflag) { (void) TRANS_ITRUNC(ip, (u_offset_t)0, STALE_ARCHIVE, credp); } #ifdef LQFS_TODO_LOCKFS if (ulp) { qfs_lockfs_end(ulp); } #endif /* LQFS_TODO_LOCKFS */ if (error == 0) { *vpp = SAM_ITOV(ip); TRACE(T_SAM_CREATE_RET, SAM_ITOV(pip), (sam_tr_t)* vpp, ip->di.id.ino, error); } return (error); }
/* * ufs_fiosdio * Set delayed-io state. This ioctl is tailored * to metamucil's needs and may change at any time. */ int ufs_fiosdio( struct vnode *vp, /* file's vnode */ uint_t *diop, /* dio flag */ int flag, /* flag from ufs_ioctl */ struct cred *cr) /* credentials from ufs_ioctl */ { uint_t dio; /* copy of user's dio */ struct inode *ip; /* inode for vp */ struct ufsvfs *ufsvfsp; struct fs *fs; struct ulockfs *ulp; int error = 0; #ifdef lint flag = flag; #endif /* check input conditions */ if (secpolicy_fs_config(cr, vp->v_vfsp) != 0) return (EPERM); if (copyin(diop, &dio, sizeof (dio))) return (EFAULT); if (dio > 1) return (EINVAL); /* file system has been forcibly unmounted */ if (VTOI(vp)->i_ufsvfs == NULL) return (EIO); ip = VTOI(vp); ufsvfsp = ip->i_ufsvfs; ulp = &ufsvfsp->vfs_ulockfs; /* logging file system; dio ignored */ if (TRANS_ISTRANS(ufsvfsp)) return (error); /* hold the mutex to prevent race with a lockfs request */ vfs_lock_wait(vp->v_vfsp); mutex_enter(&ulp->ul_lock); atomic_inc_ulong(&ufs_quiesce_pend); if (ULOCKFS_IS_HLOCK(ulp)) { error = EIO; goto out; } if (ULOCKFS_IS_ELOCK(ulp)) { error = EBUSY; goto out; } /* wait for outstanding accesses to finish */ if (error = ufs_quiesce(ulp)) goto out; /* flush w/invalidate */ if (error = ufs_flush(vp->v_vfsp)) goto out; /* * update dio */ mutex_enter(&ufsvfsp->vfs_lock); ufsvfsp->vfs_dio = dio; /* * enable/disable clean flag processing */ fs = ip->i_fs; if (fs->fs_ronly == 0 && fs->fs_clean != FSBAD && fs->fs_clean != FSLOG) { if (dio) fs->fs_clean = FSSUSPEND; else fs->fs_clean = FSACTIVE; ufs_sbwrite(ufsvfsp); mutex_exit(&ufsvfsp->vfs_lock); } else mutex_exit(&ufsvfsp->vfs_lock); out: /* * we need this broadcast because of the ufs_quiesce call above */ atomic_dec_ulong(&ufs_quiesce_pend); cv_broadcast(&ulp->ul_cv); mutex_exit(&ulp->ul_lock); vfs_unlock(vp->v_vfsp); return (error); }
/* * Sync quota information records to disk for the specified file system * or all file systems with quotas if ufsvfsp == NULL. Grabs a reader * lock on vfs_dqrwlock if it is needed. * * Currently, if ufsvfsp is NULL, then do_lock is always true, but the * routine is coded to account for either do_lock value. This seemed * to be the safer thing to do. */ int quotasync(struct ufsvfs *ufsvfsp, int do_lock) { struct dquot *dqp; rw_enter(&dq_rwlock, RW_READER); if (!quotas_initialized) { rw_exit(&dq_rwlock); return (ESRCH); } rw_exit(&dq_rwlock); /* * The operation applies to a specific file system only. */ if (ufsvfsp) { if (do_lock) { rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER); } /* * Quotas are not enabled on this file system so bail. */ if ((ufsvfsp->vfs_qflags & MQ_ENABLED) == 0) { if (do_lock) { rw_exit(&ufsvfsp->vfs_dqrwlock); } return (ESRCH); } /* * This operation is a no-op on a logging file system because * quota information is treated as metadata and is in the log. * This code path treats quota information as user data which * is not necessary on a logging file system. */ if (TRANS_ISTRANS(ufsvfsp)) { if (do_lock) { rw_exit(&ufsvfsp->vfs_dqrwlock); } return (0); } /* * Try to sync all the quota info records for this * file system: */ for (dqp = dquot; dqp < dquotNDQUOT; dqp++) { /* * If someone else has it, then ignore it. */ if (!mutex_tryenter(&dqp->dq_lock)) { continue; } /* * The quota info record is for this file system * and it has changes. */ if (dqp->dq_ufsvfsp == ufsvfsp && (dqp->dq_flags & DQ_MOD)) { ASSERT(ufsvfsp->vfs_qflags & MQ_ENABLED); dqupdate(dqp); } mutex_exit(&dqp->dq_lock); } if (do_lock) { rw_exit(&ufsvfsp->vfs_dqrwlock); } return (0); } /* * Try to sync all the quota info records for *all* file systems * for which quotas are enabled. */ for (dqp = dquot; dqp < dquotNDQUOT; dqp++) { /* * If someone else has it, then ignore it. */ if (!mutex_tryenter(&dqp->dq_lock)) { continue; } ufsvfsp = dqp->dq_ufsvfsp; /* shorthand */ /* * This quota info record has no changes or is * not a valid quota info record yet. */ if ((dqp->dq_flags & DQ_MOD) == 0 || ufsvfsp == NULL) { mutex_exit(&dqp->dq_lock); continue; } /* * Now we have a potential lock order problem: * * vfs_dqrwlock > dq_lock * * so if we have to get vfs_dqrwlock, then go thru hoops * to avoid deadlock. If we cannot get the order right, * then we ignore this quota info record. */ if (do_lock) { /* * If we can't grab vfs_dqrwlock, then we don't * want to wait to avoid deadlock. */ if (rw_tryenter(&ufsvfsp->vfs_dqrwlock, RW_READER) == 0) { mutex_exit(&dqp->dq_lock); continue; } /* * Okay, now we have both dq_lock and vfs_dqrwlock. * We should not deadlock for the following reasons: * - If another thread has a reader lock on * vfs_dqrwlock and is waiting for dq_lock, * there is no conflict because we can also have * a reader lock on vfs_dqrwlock. * - If another thread has a writer lock on * vfs_dqrwlock and is waiting for dq_lock, * we would have failed the rw_tryenter() above * and given up dq_lock. * - Since we have dq_lock another thread cannot * have it and be waiting for vfs_dqrwlock. */ } /* * Since we got to this file system via a quota info * record and we have vfs_dqrwlock this is paranoia * to make sure that quotas are enabled. */ ASSERT(ufsvfsp->vfs_qflags & MQ_ENABLED); /* * We are not logging. See above logging file system * comment. */ if (!TRANS_ISTRANS(ufsvfsp)) { dqupdate(dqp); } /* * Since we have a private copy of dqp->dq_ufsvfsp, * we can drop dq_lock now. */ mutex_exit(&dqp->dq_lock); if (do_lock) { rw_exit(&ufsvfsp->vfs_dqrwlock); } } return (0); }