Exemplo n.º 1
0
int
xfs_qm_scall_trunc_qfiles(
	xfs_mount_t	*mp,
	uint		flags)
{
	int		error = -EINVAL;

	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
	    (flags & ~XFS_DQ_ALLTYPES)) {
		xfs_debug(mp, "%s: flags=%x m_qflags=%x",
			__func__, flags, mp->m_qflags);
		return -EINVAL;
	}

	if (flags & XFS_DQ_USER) {
		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
		if (error)
			return error;
	}
	if (flags & XFS_DQ_GROUP) {
		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
		if (error)
			return error;
	}
	if (flags & XFS_DQ_PROJ)
		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);

	return error;
}
Exemplo n.º 2
0
int
xfs_qm_scall_trunc_qfiles(
	xfs_mount_t	*mp,
	uint		flags)
{
	int		error = 0, error2 = 0;

	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
		xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
			__func__, flags, mp->m_qflags);
		return XFS_ERROR(EINVAL);
	}

	if (flags & XFS_DQ_USER)
		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
	if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);

	return error ? error : error2;
}
/*
 * Adjust quota limits, and start/stop timers accordingly.
 */
int
xfs_qm_scall_setqlim(
    struct xfs_mount	*mp,
    xfs_dqid_t		id,
    uint			type,
    struct qc_dqblk		*newlim)
{
    struct xfs_quotainfo	*q = mp->m_quotainfo;
    struct xfs_disk_dquot	*ddq;
    struct xfs_dquot	*dqp;
    struct xfs_trans	*tp;
    int			error;
    xfs_qcnt_t		hard, soft;

    if (newlim->d_fieldmask & ~XFS_QC_MASK)
        return EINVAL;
    if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
        return 0;

    /*
     * We don't want to race with a quotaoff so take the quotaoff lock.
     * We don't hold an inode lock, so there's nothing else to stop
     * a quotaoff from happening.
     */
    mutex_lock(&q->qi_quotaofflock);

    /*
     * Get the dquot (locked) before we start, as we need to do a
     * transaction to allocate it if it doesn't exist. Once we have the
     * dquot, unlock it so we can start the next transaction safely. We hold
     * a reference to the dquot, so it's safe to do this unlock/lock without
     * it being reclaimed in the mean time.
     */
    error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
    if (error) {
        ASSERT(error != ENOENT);
        goto out_unlock;
    }
    xfs_dqunlock(dqp);

    tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
    error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);
    if (error) {
        xfs_trans_cancel(tp, 0);
        goto out_rele;
    }

    xfs_dqlock(dqp);
    xfs_trans_dqjoin(tp, dqp);
    ddq = &dqp->q_core;

    /*
     * Make sure that hardlimits are >= soft limits before changing.
     */
    hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
           be64_to_cpu(ddq->d_blk_hardlimit);
    soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
           be64_to_cpu(ddq->d_blk_softlimit);
    if (hard == 0 || hard >= soft) {
        ddq->d_blk_hardlimit = cpu_to_be64(hard);
        ddq->d_blk_softlimit = cpu_to_be64(soft);
        xfs_dquot_set_prealloc_limits(dqp);
        if (id == 0) {
            q->qi_bhardlimit = hard;
            q->qi_bsoftlimit = soft;
        }
    } else {
        xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
    }
    hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
           be64_to_cpu(ddq->d_rtb_hardlimit);
    soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
           be64_to_cpu(ddq->d_rtb_softlimit);
    if (hard == 0 || hard >= soft) {
        ddq->d_rtb_hardlimit = cpu_to_be64(hard);
        ddq->d_rtb_softlimit = cpu_to_be64(soft);
        if (id == 0) {
            q->qi_rtbhardlimit = hard;
            q->qi_rtbsoftlimit = soft;
        }
    } else {
        xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
    }

    hard = (newlim->d_fieldmask & QC_INO_HARD) ?
           (xfs_qcnt_t) newlim->d_ino_hardlimit :
           be64_to_cpu(ddq->d_ino_hardlimit);
    soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
           (xfs_qcnt_t) newlim->d_ino_softlimit :
           be64_to_cpu(ddq->d_ino_softlimit);
    if (hard == 0 || hard >= soft) {
        ddq->d_ino_hardlimit = cpu_to_be64(hard);
        ddq->d_ino_softlimit = cpu_to_be64(soft);
        if (id == 0) {
            q->qi_ihardlimit = hard;
            q->qi_isoftlimit = soft;
        }
    } else {
        xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
    }

    /*
     * Update warnings counter(s) if requested
     */
    if (newlim->d_fieldmask & QC_SPC_WARNS)
        ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
    if (newlim->d_fieldmask & QC_INO_WARNS)
        ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
    if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
        ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);

    if (id == 0) {
        /*
         * Timelimits for the super user set the relative time
         * the other users can be over quota for this file system.
         * If it is zero a default is used.  Ditto for the default
         * soft and hard limit values (already done, above), and
         * for warnings.
         */
        if (newlim->d_fieldmask & QC_SPC_TIMER) {
            q->qi_btimelimit = newlim->d_spc_timer;
            ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
        }
        if (newlim->d_fieldmask & QC_INO_TIMER) {
            q->qi_itimelimit = newlim->d_ino_timer;
            ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
        }
        if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
            q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
            ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
        }
        if (newlim->d_fieldmask & QC_SPC_WARNS)
            q->qi_bwarnlimit = newlim->d_spc_warns;
        if (newlim->d_fieldmask & QC_INO_WARNS)
            q->qi_iwarnlimit = newlim->d_ino_warns;
        if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
            q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
    } else {
        /*
         * If the user is now over quota, start the timelimit.
         * The user will not be 'warned'.
         * Note that we keep the timers ticking, whether enforcement
         * is on or off. We don't really want to bother with iterating
         * over all ondisk dquots and turning the timers on/off.
         */
        xfs_qm_adjust_dqtimers(mp, ddq);
    }
    dqp->dq_flags |= XFS_DQ_DIRTY;
    xfs_trans_log_dquot(tp, dqp);

    error = xfs_trans_commit(tp, 0);

out_rele:
    xfs_qm_dqrele(dqp);
out_unlock:
    mutex_unlock(&q->qi_quotaofflock);
    return error;
}
/*
 * Switch on (a given) quota enforcement for a filesystem.  This takes
 * effect immediately.
 * (Switching on quota accounting must be done at mount time.)
 */
int
xfs_qm_scall_quotaon(
    xfs_mount_t	*mp,
    uint		flags)
{
    int		error;
    uint		qf;
    __int64_t	sbflags;

    flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
    /*
     * Switching on quota accounting must be done at mount time.
     */
    flags &= ~(XFS_ALL_QUOTA_ACCT);

    sbflags = 0;

    if (flags == 0) {
        xfs_debug(mp, "%s: zero flags, m_qflags=%x",
                  __func__, mp->m_qflags);
        return XFS_ERROR(EINVAL);
    }

    /* No fs can turn on quotas with a delayed effect */
    ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);

    /*
     * Can't enforce without accounting. We check the superblock
     * qflags here instead of m_qflags because rootfs can have
     * quota acct on ondisk without m_qflags' knowing.
     */
    if (((flags & XFS_UQUOTA_ACCT) == 0 &&
            (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
            (flags & XFS_UQUOTA_ENFD)) ||
            ((flags & XFS_GQUOTA_ACCT) == 0 &&
             (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
             (flags & XFS_GQUOTA_ENFD)) ||
            ((flags & XFS_PQUOTA_ACCT) == 0 &&
             (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
             (flags & XFS_PQUOTA_ENFD))) {
        xfs_debug(mp,
                  "%s: Can't enforce without acct, flags=%x sbflags=%x",
                  __func__, flags, mp->m_sb.sb_qflags);
        return XFS_ERROR(EINVAL);
    }
    /*
     * If everything's up to-date incore, then don't waste time.
     */
    if ((mp->m_qflags & flags) == flags)
        return XFS_ERROR(EEXIST);

    /*
     * Change sb_qflags on disk but not incore mp->qflags
     * if this is the root filesystem.
     */
    spin_lock(&mp->m_sb_lock);
    qf = mp->m_sb.sb_qflags;
    mp->m_sb.sb_qflags = qf | flags;
    spin_unlock(&mp->m_sb_lock);

    /*
     * There's nothing to change if it's the same.
     */
    if ((qf & flags) == flags && sbflags == 0)
        return XFS_ERROR(EEXIST);
    sbflags |= XFS_SB_QFLAGS;

    if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
        return (error);
    /*
     * If we aren't trying to switch on quota enforcement, we are done.
     */
    if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
            (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
            ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
             (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
            ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
             (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
            (flags & XFS_ALL_QUOTA_ENFD) == 0)
        return (0);

    if (! XFS_IS_QUOTA_RUNNING(mp))
        return XFS_ERROR(ESRCH);

    /*
     * Switch on quota enforcement in core.
     */
    mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
    mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
    mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);

    return (0);
}
Exemplo n.º 5
0
/*
 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
 * a locked dquot, doing an allocation (if requested) as needed.
 * When both an inode and an id are given, the inode's id takes precedence.
 * That is, if the id changes while we don't hold the ilock inside this
 * function, the new dquot is returned, not necessarily the one requested
 * in the id argument.
 */
int
xfs_qm_dqget(
	xfs_mount_t	*mp,
	xfs_inode_t	*ip,	  /* locked inode (optional) */
	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
{
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
	struct xfs_dquot	*dqp;
	int			error;

	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
		return (ESRCH);
	}

#ifdef DEBUG
	if (xfs_do_dqerror) {
		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
			xfs_debug(mp, "Returning error in dqget");
			return (EIO);
		}
	}

	ASSERT(type == XFS_DQ_USER ||
	       type == XFS_DQ_PROJ ||
	       type == XFS_DQ_GROUP);
	if (ip) {
		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
		ASSERT(xfs_inode_dquot(ip, type) == NULL);
	}
#endif

restart:
	mutex_lock(&qi->qi_tree_lock);
	dqp = radix_tree_lookup(tree, id);
	if (dqp) {
		xfs_dqlock(dqp);
		if (dqp->dq_flags & XFS_DQ_FREEING) {
			xfs_dqunlock(dqp);
			mutex_unlock(&qi->qi_tree_lock);
			trace_xfs_dqget_freeing(dqp);
			delay(1);
			goto restart;
		}

		dqp->q_nrefs++;
		mutex_unlock(&qi->qi_tree_lock);

		trace_xfs_dqget_hit(dqp);
		XFS_STATS_INC(xs_qm_dqcachehits);
		*O_dqpp = dqp;
		return 0;
	}
	mutex_unlock(&qi->qi_tree_lock);
	XFS_STATS_INC(xs_qm_dqcachemisses);

	/*
	 * Dquot cache miss. We don't want to keep the inode lock across
	 * a (potential) disk read. Also we don't want to deal with the lock
	 * ordering between quotainode and this inode. OTOH, dropping the inode
	 * lock here means dealing with a chown that can happen before
	 * we re-acquire the lock.
	 */
	if (ip)
		xfs_iunlock(ip, XFS_ILOCK_EXCL);

	error = xfs_qm_dqread(mp, id, type, flags, &dqp);

	if (ip)
		xfs_ilock(ip, XFS_ILOCK_EXCL);

	if (error)
		return error;

	if (ip) {
		/*
		 * A dquot could be attached to this inode by now, since
		 * we had dropped the ilock.
		 */
		if (xfs_this_quota_on(mp, type)) {
			struct xfs_dquot	*dqp1;

			dqp1 = xfs_inode_dquot(ip, type);
			if (dqp1) {
				xfs_qm_dqdestroy(dqp);
				dqp = dqp1;
				xfs_dqlock(dqp);
				goto dqret;
			}
		} else {
			/* inode stays locked on return */
			xfs_qm_dqdestroy(dqp);
			return XFS_ERROR(ESRCH);
		}
	}

	mutex_lock(&qi->qi_tree_lock);
	error = -radix_tree_insert(tree, id, dqp);
	if (unlikely(error)) {
		WARN_ON(error != EEXIST);

		/*
		 * Duplicate found. Just throw away the new dquot and start
		 * over.
		 */
		mutex_unlock(&qi->qi_tree_lock);
		trace_xfs_dqget_dup(dqp);
		xfs_qm_dqdestroy(dqp);
		XFS_STATS_INC(xs_qm_dquot_dups);
		goto restart;
	}

	/*
	 * We return a locked dquot to the caller, with a reference taken
	 */
	xfs_dqlock(dqp);
	dqp->q_nrefs = 1;

	qi->qi_dquots++;
	mutex_unlock(&qi->qi_tree_lock);

 dqret:
	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
	trace_xfs_dqget_miss(dqp);
	*O_dqpp = dqp;
	return (0);
}
Exemplo n.º 6
0
/*
 * Adjust quota limits, and start/stop timers accordingly.
 */
int
xfs_qm_scall_setqlim(
	xfs_mount_t		*mp,
	xfs_dqid_t		id,
	uint			type,
	fs_disk_quota_t		*newlim)
{
	struct xfs_quotainfo	*q = mp->m_quotainfo;
	xfs_disk_dquot_t	*ddq;
	xfs_dquot_t		*dqp;
	xfs_trans_t		*tp;
	int			error;
	xfs_qcnt_t		hard, soft;

	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
		return EINVAL;
	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
		return 0;

	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
		xfs_trans_cancel(tp, 0);
		return (error);
	}

	/*
	 * We don't want to race with a quotaoff so take the quotaoff lock.
	 * (We don't hold an inode lock, so there's nothing else to stop
	 * a quotaoff from happening). (XXXThis doesn't currently happen
	 * because we take the vfslock before calling xfs_qm_sysent).
	 */
	mutex_lock(&q->qi_quotaofflock);

	/*
	 * Get the dquot (locked), and join it to the transaction.
	 * Allocate the dquot if this doesn't exist.
	 */
	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
		ASSERT(error != ENOENT);
		goto out_unlock;
	}
	xfs_trans_dqjoin(tp, dqp);
	ddq = &dqp->q_core;

	/*
	 * Make sure that hardlimits are >= soft limits before changing.
	 */
	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
			be64_to_cpu(ddq->d_blk_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
			be64_to_cpu(ddq->d_blk_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_blk_hardlimit = cpu_to_be64(hard);
		ddq->d_blk_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_bhardlimit = hard;
			q->qi_bsoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
	}
	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
			be64_to_cpu(ddq->d_rtb_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
			be64_to_cpu(ddq->d_rtb_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
		ddq->d_rtb_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_rtbhardlimit = hard;
			q->qi_rtbsoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
	}

	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
		(xfs_qcnt_t) newlim->d_ino_hardlimit :
			be64_to_cpu(ddq->d_ino_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
		(xfs_qcnt_t) newlim->d_ino_softlimit :
			be64_to_cpu(ddq->d_ino_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_ino_hardlimit = cpu_to_be64(hard);
		ddq->d_ino_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_ihardlimit = hard;
			q->qi_isoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
	}

	/*
	 * Update warnings counter(s) if requested
	 */
	if (newlim->d_fieldmask & FS_DQ_BWARNS)
		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
	if (newlim->d_fieldmask & FS_DQ_IWARNS)
		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);

	if (id == 0) {
		/*
		 * Timelimits for the super user set the relative time
		 * the other users can be over quota for this file system.
		 * If it is zero a default is used.  Ditto for the default
		 * soft and hard limit values (already done, above), and
		 * for warnings.
		 */
		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
			q->qi_btimelimit = newlim->d_btimer;
			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
		}
		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
			q->qi_itimelimit = newlim->d_itimer;
			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
		}
		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
			q->qi_rtbtimelimit = newlim->d_rtbtimer;
			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
		}
		if (newlim->d_fieldmask & FS_DQ_BWARNS)
			q->qi_bwarnlimit = newlim->d_bwarns;
		if (newlim->d_fieldmask & FS_DQ_IWARNS)
			q->qi_iwarnlimit = newlim->d_iwarns;
		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
	} else {
		/*
		 * If the user is now over quota, start the timelimit.
		 * The user will not be 'warned'.
		 * Note that we keep the timers ticking, whether enforcement
		 * is on or off. We don't really want to bother with iterating
		 * over all ondisk dquots and turning the timers on/off.
		 */
		xfs_qm_adjust_dqtimers(mp, ddq);
	}
	dqp->dq_flags |= XFS_DQ_DIRTY;
	xfs_trans_log_dquot(tp, dqp);

	error = xfs_trans_commit(tp, 0);
	xfs_qm_dqrele(dqp);

 out_unlock:
	mutex_unlock(&q->qi_quotaofflock);
	return error;
}
int
xfs_qm_scall_setqlim(
	xfs_mount_t		*mp,
	xfs_dqid_t		id,
	uint			type,
	fs_disk_quota_t		*newlim)
{
	struct xfs_quotainfo	*q = mp->m_quotainfo;
	xfs_disk_dquot_t	*ddq;
	xfs_dquot_t		*dqp;
	xfs_trans_t		*tp;
	int			error;
	xfs_qcnt_t		hard, soft;

	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
		return EINVAL;
	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
		return 0;

	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
		xfs_trans_cancel(tp, 0);
		return (error);
	}

	mutex_lock(&q->qi_quotaofflock);

	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
		ASSERT(error != ENOENT);
		goto out_unlock;
	}
	xfs_trans_dqjoin(tp, dqp);
	ddq = &dqp->q_core;

	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
			be64_to_cpu(ddq->d_blk_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
			be64_to_cpu(ddq->d_blk_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_blk_hardlimit = cpu_to_be64(hard);
		ddq->d_blk_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_bhardlimit = hard;
			q->qi_bsoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
	}
	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
			be64_to_cpu(ddq->d_rtb_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
			be64_to_cpu(ddq->d_rtb_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
		ddq->d_rtb_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_rtbhardlimit = hard;
			q->qi_rtbsoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
	}

	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
		(xfs_qcnt_t) newlim->d_ino_hardlimit :
			be64_to_cpu(ddq->d_ino_hardlimit);
	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
		(xfs_qcnt_t) newlim->d_ino_softlimit :
			be64_to_cpu(ddq->d_ino_softlimit);
	if (hard == 0 || hard >= soft) {
		ddq->d_ino_hardlimit = cpu_to_be64(hard);
		ddq->d_ino_softlimit = cpu_to_be64(soft);
		if (id == 0) {
			q->qi_ihardlimit = hard;
			q->qi_isoftlimit = soft;
		}
	} else {
		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
	}

	if (newlim->d_fieldmask & FS_DQ_BWARNS)
		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
	if (newlim->d_fieldmask & FS_DQ_IWARNS)
		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);

	if (id == 0) {
		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
			q->qi_btimelimit = newlim->d_btimer;
			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
		}
		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
			q->qi_itimelimit = newlim->d_itimer;
			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
		}
		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
			q->qi_rtbtimelimit = newlim->d_rtbtimer;
			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
		}
		if (newlim->d_fieldmask & FS_DQ_BWARNS)
			q->qi_bwarnlimit = newlim->d_bwarns;
		if (newlim->d_fieldmask & FS_DQ_IWARNS)
			q->qi_iwarnlimit = newlim->d_iwarns;
		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
	} else {
		xfs_qm_adjust_dqtimers(mp, ddq);
	}
	dqp->dq_flags |= XFS_DQ_DIRTY;
	xfs_trans_log_dquot(tp, dqp);

	error = xfs_trans_commit(tp, 0);
	xfs_qm_dqrele(dqp);

 out_unlock:
	mutex_unlock(&q->qi_quotaofflock);
	return error;
}
int
xfs_qm_scall_quotaon(
	xfs_mount_t	*mp,
	uint		flags)
{
	int		error;
	uint		qf;
	__int64_t	sbflags;

	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
	flags &= ~(XFS_ALL_QUOTA_ACCT);

	sbflags = 0;

	if (flags == 0) {
		xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
			__func__, mp->m_qflags);
		return XFS_ERROR(EINVAL);
	}

	
	ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);

	if (((flags & XFS_UQUOTA_ACCT) == 0 &&
	    (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
	    (flags & XFS_UQUOTA_ENFD))
	    ||
	    ((flags & XFS_PQUOTA_ACCT) == 0 &&
	    (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
	    (flags & XFS_GQUOTA_ACCT) == 0 &&
	    (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
	    (flags & XFS_OQUOTA_ENFD))) {
		xfs_debug(mp,
			"%s: Can't enforce without acct, flags=%x sbflags=%x\n",
			__func__, flags, mp->m_sb.sb_qflags);
		return XFS_ERROR(EINVAL);
	}
	if ((mp->m_qflags & flags) == flags)
		return XFS_ERROR(EEXIST);

	spin_lock(&mp->m_sb_lock);
	qf = mp->m_sb.sb_qflags;
	mp->m_sb.sb_qflags = qf | flags;
	spin_unlock(&mp->m_sb_lock);

	if ((qf & flags) == flags && sbflags == 0)
		return XFS_ERROR(EEXIST);
	sbflags |= XFS_SB_QFLAGS;

	if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
		return (error);
	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
	     (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
	    (flags & XFS_ALL_QUOTA_ENFD) == 0)
		return (0);

	if (! XFS_IS_QUOTA_RUNNING(mp))
		return XFS_ERROR(ESRCH);

	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);

	return (0);
}