/*
 * Translate an internal style on-disk-dquot to the exportable format.
 * The main differences are that the counters/limits are all in Basic
 * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
 * to be converted to the native endianness.
 */
STATIC void
xfs_qm_export_dquot(
	xfs_mount_t		*mp,
	xfs_disk_dquot_t	*src,
	struct fs_disk_quota	*dst)
{
	memset(dst, 0, sizeof(*dst));
	dst->d_version = FS_DQUOT_VERSION;  /* different from src->d_version */
	dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags);
	dst->d_id = be32_to_cpu(src->d_id);
	dst->d_blk_hardlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit));
	dst->d_blk_softlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit));
	dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit);
	dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit);
	dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount));
	dst->d_icount = be64_to_cpu(src->d_icount);
	dst->d_btimer = be32_to_cpu(src->d_btimer);
	dst->d_itimer = be32_to_cpu(src->d_itimer);
	dst->d_iwarns = be16_to_cpu(src->d_iwarns);
	dst->d_bwarns = be16_to_cpu(src->d_bwarns);
	dst->d_rtb_hardlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit));
	dst->d_rtb_softlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit));
	dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount));
	dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer);
	dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns);

	/*
	 * Internally, we don't reset all the timers when quota enforcement
	 * gets turned off. No need to confuse the user level code,
	 * so return zeroes in that case.
	 */
	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) ||
	    (!XFS_IS_OQUOTA_ENFORCED(mp) &&
			(src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
		dst->d_btimer = 0;
		dst->d_itimer = 0;
		dst->d_rtbtimer = 0;
	}

#ifdef DEBUG
	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == XFS_USER_QUOTA) ||
	     (XFS_IS_OQUOTA_ENFORCED(mp) &&
			(dst->d_flags & (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA)))) &&
	    dst->d_id != 0) {
		if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
		    (dst->d_blk_softlimit > 0)) {
			ASSERT(dst->d_btimer != 0);
		}
		if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) &&
		    (dst->d_ino_softlimit > 0)) {
			ASSERT(dst->d_itimer != 0);
		}
	}
#endif
}
Exemple #2
0
/*
 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
 * multiple quotas, we don't know exactly which quota caused an allocation
 * failure. We make a best effort by including each quota under low free space
 * conditions (less than 1% free space) in the scan.
 */
static int
__xfs_inode_free_quota_eofblocks(
	struct xfs_inode	*ip,
	int			(*execute)(struct xfs_mount *mp,
					   struct xfs_eofblocks	*eofb))
{
	int scan = 0;
	struct xfs_eofblocks eofb = {0};
	struct xfs_dquot *dq;

	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));

	/*
	 * Set the scan owner to avoid a potential livelock. Otherwise, the scan
	 * can repeatedly trylock on the inode we're currently processing. We
	 * run a sync scan to increase effectiveness and use the union filter to
	 * cover all applicable quotas in a single scan.
	 */
	eofb.eof_scan_owner = ip->i_ino;
	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;

	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
		if (dq && xfs_dquot_lowsp(dq)) {
			eofb.eof_uid = VFS_I(ip)->i_uid;
			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
			scan = 1;
		}
	}

	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
		if (dq && xfs_dquot_lowsp(dq)) {
			eofb.eof_gid = VFS_I(ip)->i_gid;
			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
			scan = 1;
		}
	}

	if (scan)
		execute(ip->i_mount, &eofb);

	return scan;
}
Exemple #3
0
/*
 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
 * multiple quotas, we don't know exactly which quota caused an allocation
 * failure. We make a best effort by including each quota under low free space
 * conditions (less than 1% free space) in the scan.
 */
static int
__xfs_inode_free_quota_eofblocks(
	struct xfs_inode	*ip,
	int			(*execute)(struct xfs_mount *mp,
					   struct xfs_eofblocks	*eofb))
{
	int scan = 0;
	struct xfs_eofblocks eofb = {0};
	struct xfs_dquot *dq;

	/*
	 * Run a sync scan to increase effectiveness and use the union filter to
	 * cover all applicable quotas in a single scan.
	 */
	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;

	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
		if (dq && xfs_dquot_lowsp(dq)) {
			eofb.eof_uid = VFS_I(ip)->i_uid;
			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
			scan = 1;
		}
	}

	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
		if (dq && xfs_dquot_lowsp(dq)) {
			eofb.eof_gid = VFS_I(ip)->i_gid;
			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
			scan = 1;
		}
	}

	if (scan)
		execute(ip->i_mount, &eofb);

	return scan;
}
Exemple #4
0
/*
 * This reserves disk blocks and inodes against a dquot.
 * Flags indicate if the dquot is to be locked here and also
 * if the blk reservation is for RT or regular blocks.
 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
 */
STATIC int
xfs_trans_dqresv(
    xfs_trans_t	*tp,
    xfs_mount_t	*mp,
    xfs_dquot_t	*dqp,
    long		nblks,
    long		ninos,
    uint		flags)
{
    xfs_qcnt_t	hardlimit;
    xfs_qcnt_t	softlimit;
    time_t		timer;
    xfs_qwarncnt_t	warns;
    xfs_qwarncnt_t	warnlimit;
    xfs_qcnt_t	total_count;
    xfs_qcnt_t	*resbcountp;
    xfs_quotainfo_t	*q = mp->m_quotainfo;
    struct xfs_def_quota	*defq;


    xfs_dqlock(dqp);

    defq = xfs_get_defquota(dqp, q);

    if (flags & XFS_TRANS_DQ_RES_BLKS) {
        hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
        if (!hardlimit)
            hardlimit = defq->bhardlimit;
        softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
        if (!softlimit)
            softlimit = defq->bsoftlimit;
        timer = be32_to_cpu(dqp->q_core.d_btimer);
        warns = be16_to_cpu(dqp->q_core.d_bwarns);
        warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
        resbcountp = &dqp->q_res_bcount;
    } else {
        ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
        hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
        if (!hardlimit)
            hardlimit = defq->rtbhardlimit;
        softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
        if (!softlimit)
            softlimit = defq->rtbsoftlimit;
        timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
        warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
        warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
        resbcountp = &dqp->q_res_rtbcount;
    }

    if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
            dqp->q_core.d_id &&
            ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
             (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
             (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
        if (nblks > 0) {
            /*
             * dquot is locked already. See if we'd go over the
             * hardlimit or exceed the timelimit if we allocate
             * nblks.
             */
            total_count = *resbcountp + nblks;
            if (hardlimit && total_count > hardlimit) {
                xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
                goto error_return;
            }
            if (softlimit && total_count > softlimit) {
                if ((timer != 0 && get_seconds() > timer) ||
                        (warns != 0 && warns >= warnlimit)) {
                    xfs_quota_warn(mp, dqp,
                                   QUOTA_NL_BSOFTLONGWARN);
                    goto error_return;
                }

                xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
            }
        }
        if (ninos > 0) {
            total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
            timer = be32_to_cpu(dqp->q_core.d_itimer);
            warns = be16_to_cpu(dqp->q_core.d_iwarns);
            warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
            hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
            if (!hardlimit)
                hardlimit = defq->ihardlimit;
            softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
            if (!softlimit)
                softlimit = defq->isoftlimit;

            if (hardlimit && total_count > hardlimit) {
                xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
                goto error_return;
            }
            if (softlimit && total_count > softlimit) {
                if  ((timer != 0 && get_seconds() > timer) ||
                        (warns != 0 && warns >= warnlimit)) {
                    xfs_quota_warn(mp, dqp,
                                   QUOTA_NL_ISOFTLONGWARN);
                    goto error_return;
                }
                xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
            }
        }
    }

    /*
     * Change the reservation, but not the actual usage.
     * Note that q_res_bcount = q_core.d_bcount + resv
     */
    (*resbcountp) += (xfs_qcnt_t)nblks;
    if (ninos != 0)
        dqp->q_res_icount += (xfs_qcnt_t)ninos;

    /*
     * note the reservation amt in the trans struct too,
     * so that the transaction knows how much was reserved by
     * it against this particular dquot.
     * We don't do this when we are reserving for a delayed allocation,
     * because we don't have the luxury of a transaction envelope then.
     */
    if (tp) {
        ASSERT(tp->t_dqinfo);
        ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
        if (nblks != 0)
            xfs_trans_mod_dquot(tp, dqp,
                                flags & XFS_QMOPT_RESBLK_MASK,
                                nblks);
        if (ninos != 0)
            xfs_trans_mod_dquot(tp, dqp,
                                XFS_TRANS_DQ_RES_INOS,
                                ninos);
    }
    ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
    ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
    ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));

    xfs_dqunlock(dqp);
    return 0;

error_return:
    xfs_dqunlock(dqp);
    if (flags & XFS_QMOPT_ENOSPC)
        return -ENOSPC;
    return -EDQUOT;
}
int
xfs_qm_scall_getquota(
    struct xfs_mount	*mp,
    xfs_dqid_t		id,
    uint			type,
    struct qc_dqblk		*dst)
{
    struct xfs_dquot	*dqp;
    int			error;

    /*
     * Try to get the dquot. We don't want it allocated on disk, so
     * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
     * exist, we'll get ENOENT back.
     */
    error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
    if (error)
        return error;

    /*
     * If everything's NULL, this dquot doesn't quite exist as far as
     * our utility programs are concerned.
     */
    if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
        error = XFS_ERROR(ENOENT);
        goto out_put;
    }

    memset(dst, 0, sizeof(*dst));
    dst->d_spc_hardlimit =
        XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
    dst->d_spc_softlimit =
        XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
    dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
    dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
    dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
    dst->d_ino_count = dqp->q_res_icount;
    dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
    dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
    dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
    dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
    dst->d_rt_spc_hardlimit =
        XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
    dst->d_rt_spc_softlimit =
        XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
    dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
    dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
    dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);

    /*
     * Internally, we don't reset all the timers when quota enforcement
     * gets turned off. No need to confuse the user level code,
     * so return zeroes in that case.
     */
    if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
            dqp->q_core.d_flags == XFS_DQ_USER) ||
            (!XFS_IS_GQUOTA_ENFORCED(mp) &&
             dqp->q_core.d_flags == XFS_DQ_GROUP) ||
            (!XFS_IS_PQUOTA_ENFORCED(mp) &&
             dqp->q_core.d_flags == XFS_DQ_PROJ)) {
        dst->d_spc_timer = 0;
        dst->d_ino_timer = 0;
        dst->d_rt_spc_timer = 0;
    }

#ifdef DEBUG
    if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
            (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
            (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
            id != 0) {
        if ((dst->d_space > dst->d_spc_softlimit) &&
                (dst->d_spc_softlimit > 0)) {
            ASSERT(dst->d_spc_timer != 0);
        }
        if ((dst->d_ino_count > dst->d_ino_softlimit) &&
                (dst->d_ino_softlimit > 0)) {
            ASSERT(dst->d_ino_timer != 0);
        }
    }
#endif
out_put:
    xfs_qm_dqput(dqp);
    return error;
}
int
xfs_qm_scall_getquota(
	struct xfs_mount	*mp,
	xfs_dqid_t		id,
	uint			type,
	struct fs_disk_quota	*dst)
{
	struct xfs_dquot	*dqp;
	int			error;

	error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
	if (error)
		return error;

	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
		error = XFS_ERROR(ENOENT);
		goto out_put;
	}

	memset(dst, 0, sizeof(*dst));
	dst->d_version = FS_DQUOT_VERSION;
	dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
	dst->d_id = be32_to_cpu(dqp->q_core.d_id);
	dst->d_blk_hardlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
	dst->d_blk_softlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
	dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
	dst->d_icount = dqp->q_res_icount;
	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
	dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
	dst->d_rtb_hardlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
	dst->d_rtb_softlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
	dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);

	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) ||
	    (!XFS_IS_OQUOTA_ENFORCED(mp) &&
			(dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
		dst->d_btimer = 0;
		dst->d_itimer = 0;
		dst->d_rtbtimer = 0;
	}

#ifdef DEBUG
	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
	     (XFS_IS_OQUOTA_ENFORCED(mp) &&
			(dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
	    dst->d_id != 0) {
		if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) &&
		    (dst->d_blk_softlimit > 0)) {
			ASSERT(dst->d_btimer != 0);
		}
		if (((int) dst->d_icount > (int) dst->d_ino_softlimit) &&
		    (dst->d_ino_softlimit > 0)) {
			ASSERT(dst->d_itimer != 0);
		}
	}
#endif
out_put:
	xfs_qm_dqput(dqp);
	return error;
}