STATIC int xfs_qm_scall_trunc_qfiles( xfs_mount_t *mp, uint flags) { int error; xfs_inode_t *qip; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); error = 0; if (!XFS_SB_VERSION_HASQUOTA(&mp->m_sb) || flags == 0) { qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); return XFS_ERROR(EINVAL); } if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); if (! error) { (void) xfs_truncate_file(mp, qip); VN_RELE(XFS_ITOV(qip)); } } if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && mp->m_sb.sb_gquotino != NULLFSINO) { error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); if (! error) { (void) xfs_truncate_file(mp, qip); VN_RELE(XFS_ITOV(qip)); } } return (error); }
int xfs_qm_scall_trunc_qfiles( xfs_mount_t *mp, uint flags) { int error = 0, error2 = 0; xfs_inode_t *qip; if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); return XFS_ERROR(EINVAL); } if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); if (!error) { error = xfs_truncate_file(mp, qip); IRELE(qip); } } if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && mp->m_sb.sb_gquotino != NULLFSINO) { error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); if (!error2) { error2 = xfs_truncate_file(mp, qip); IRELE(qip); } } return error ? error : error2; }
/* * Adjust quota limits, and start/stop timers accordingly. */ STATIC int xfs_qm_scall_setqlim( xfs_mount_t *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { xfs_disk_dquot_t *ddq; xfs_dquot_t *dqp; xfs_trans_t *tp; int error; xfs_qcnt_t hard, soft; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); if ((newlim->d_fieldmask & (FS_DQ_LIMIT_MASK|FS_DQ_TIMER_MASK|FS_DQ_WARNS_MASK)) == 0) return (0); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, 0, 0, XFS_DEFAULT_LOG_COUNT))) { xfs_trans_cancel(tp, 0); return (error); } /* * We don't want to race with a quotaoff so take the quotaoff lock. * (We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening). (XXXThis doesn't currently happen * because we take the vfslock before calling xfs_qm_sysent). */ mutex_lock(&(XFS_QI_QOFFLOCK(mp))); /* * Get the dquot (locked), and join it to the transaction. * Allocate the dquot if this doesn't exist. */ if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { xfs_trans_cancel(tp, XFS_TRANS_ABORT); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); ASSERT(error != ENOENT); return (error); } xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET"); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); if (id == 0) { mp->m_quotainfo->qi_bhardlimit = hard; mp->m_quotainfo->qi_bsoftlimit = soft; } } else { qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { mp->m_quotainfo->qi_rtbhardlimit = hard; mp->m_quotainfo->qi_rtbsoftlimit = soft; } } else { qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { mp->m_quotainfo->qi_ihardlimit = hard; mp->m_quotainfo->qi_isoftlimit = soft; } } else { qdprintk("ihard %Ld < isoft %Ld\n", hard, soft); } /* * Update warnings counter(s) if requested */ if (newlim->d_fieldmask & FS_DQ_BWARNS) ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); if (newlim->d_fieldmask & FS_DQ_IWARNS) ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); if (newlim->d_fieldmask & FS_DQ_RTBWARNS) ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above), and * for warnings. */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { mp->m_quotainfo->qi_btimelimit = newlim->d_btimer; ddq->d_btimer = cpu_to_be32(newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { mp->m_quotainfo->qi_itimelimit = newlim->d_itimer; ddq->d_itimer = cpu_to_be32(newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { mp->m_quotainfo->qi_rtbtimelimit = newlim->d_rtbtimer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); } if (newlim->d_fieldmask & FS_DQ_BWARNS) mp->m_quotainfo->qi_bwarnlimit = newlim->d_bwarns; if (newlim->d_fieldmask & FS_DQ_IWARNS) mp->m_quotainfo->qi_iwarnlimit = newlim->d_iwarns; if (newlim->d_fieldmask & FS_DQ_RTBWARNS) mp->m_quotainfo->qi_rtbwarnlimit = newlim->d_rtbwarns; } else { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT"); xfs_trans_commit(tp, 0, NULL); xfs_qm_dqprint(dqp); xfs_qm_dqrele(dqp); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); return (0); }
/* * Switch on (a given) quota enforcement for a filesystem. This takes * effect immediately. * (Switching on quota accounting must be done at mount time.) */ STATIC int xfs_qm_scall_quotaon( xfs_mount_t *mp, uint flags) { int error; unsigned long s; uint qf; uint accflags; __int64_t sbflags; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); /* * Switching on quota accounting must be done at mount time. */ accflags = flags & XFS_ALL_QUOTA_ACCT; flags &= ~(XFS_ALL_QUOTA_ACCT); sbflags = 0; if (flags == 0) { qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags); return XFS_ERROR(EINVAL); } /* No fs can turn on quotas with a delayed effect */ ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0); /* * Can't enforce without accounting. We check the superblock * qflags here instead of m_qflags because rootfs can have * quota acct on ondisk without m_qflags' knowing. */ if (((flags & XFS_UQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && (flags & XFS_UQUOTA_ENFD)) || ((flags & XFS_PQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && (flags & XFS_OQUOTA_ENFD)) || ((flags & XFS_GQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && (flags & XFS_OQUOTA_ENFD))) { qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n", flags, mp->m_sb.sb_qflags); return XFS_ERROR(EINVAL); } /* * If everything's upto-date incore, then don't waste time. */ if ((mp->m_qflags & flags) == flags) return XFS_ERROR(EEXIST); /* * Change sb_qflags on disk but not incore mp->qflags * if this is the root filesystem. */ s = XFS_SB_LOCK(mp); qf = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = qf | flags; XFS_SB_UNLOCK(mp, s); /* * There's nothing to change if it's the same. */ if ((qf & flags) == flags && sbflags == 0) return XFS_ERROR(EEXIST); sbflags |= XFS_SB_QFLAGS; if ((error = xfs_qm_write_sb_changes(mp, sbflags))) return (error); /* * If we aren't trying to switch on quota enforcement, we are done. */ if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != (mp->m_qflags & XFS_UQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != (mp->m_qflags & XFS_PQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != (mp->m_qflags & XFS_GQUOTA_ACCT)) || (flags & XFS_ALL_QUOTA_ENFD) == 0) return (0); if (! XFS_IS_QUOTA_RUNNING(mp)) return XFS_ERROR(ESRCH); /* * Switch on quota enforcement in core. */ mutex_lock(&(XFS_QI_QOFFLOCK(mp))); mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); return (0); }
STATIC int xfs_qm_internalqcheck_adjust( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* not used */ int ubsize, /* not used */ void *private_data, /* not used */ xfs_daddr_t bno, /* starting block of inode cluster */ int *ubused, /* not used */ void *dip, /* not used */ int *res) /* bulkstat result code */ { xfs_inode_t *ip; xfs_dqtest_t *ud, *gd; uint lock_flags; boolean_t ipreleased; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { *res = BULKSTAT_RV_NOTHING; qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n", (unsigned long long) ino, (unsigned long long) mp->m_sb.sb_uquotino, (unsigned long long) mp->m_sb.sb_gquotino); return XFS_ERROR(EINVAL); } ipreleased = B_FALSE; again: lock_flags = XFS_ILOCK_SHARED; if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { *res = BULKSTAT_RV_NOTHING; return (error); } if (ip->i_d.di_mode == 0) { xfs_iput_new(ip, lock_flags); *res = BULKSTAT_RV_NOTHING; return XFS_ERROR(ENOENT); } /* * This inode can have blocks after eof which can get released * when we send it to inactive. Since we don't check the dquot * until the after all our calculations are done, we must get rid * of those now. */ if (! ipreleased) { xfs_iput(ip, lock_flags); ipreleased = B_TRUE; goto again; } xfs_qm_internalqcheck_get_dquots(mp, (xfs_dqid_t) ip->i_d.di_uid, (xfs_dqid_t) ip->i_d.di_projid, (xfs_dqid_t) ip->i_d.di_gid, &ud, &gd); if (XFS_IS_UQUOTA_ON(mp)) { ASSERT(ud); xfs_qm_internalqcheck_dqadjust(ip, ud); } if (XFS_IS_OQUOTA_ON(mp)) { ASSERT(gd); xfs_qm_internalqcheck_dqadjust(ip, gd); } xfs_iput(ip, lock_flags); *res = BULKSTAT_RV_DIDONE; return (0); }