コード例 #1
0
ファイル: xfs_qm.c プロジェクト: AdrianHuang/linux-3.8.13
/*
 * Called by dqusage_adjust in doing a quotacheck.
 *
 * Given the inode, and a dquot id this updates both the incore dqout as well
 * as the buffer copy. This is so that once the quotacheck is done, we can
 * just log all the buffers, as opposed to logging numerous updates to
 * individual dquots.
 */
STATIC int
xfs_qm_quotacheck_dqadjust(
	struct xfs_inode	*ip,
	xfs_dqid_t		id,
	uint			type,
	xfs_qcnt_t		nblks,
	xfs_qcnt_t		rtblks)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_dquot	*dqp;
	int			error;

	error = xfs_qm_dqget(mp, ip, id, type,
			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
	if (error) {
		/*
		 * Shouldn't be able to turn off quotas here.
		 */
		ASSERT(error != ESRCH);
		ASSERT(error != ENOENT);
		return error;
	}

	trace_xfs_dqadjust(dqp);

	/*
	 * Adjust the inode count and the block count to reflect this inode's
	 * resource usage.
	 */
	be64_add_cpu(&dqp->q_core.d_icount, 1);
	dqp->q_res_icount++;
	if (nblks) {
		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
		dqp->q_res_bcount += nblks;
	}
	if (rtblks) {
		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
		dqp->q_res_rtbcount += rtblks;
	}

	/*
	 * Set default limits, adjust timers (since we changed usages)
	 *
	 * There are no timers for the default values set in the root dquot.
	 */
	if (dqp->q_core.d_id) {
		xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
	}

	dqp->dq_flags |= XFS_DQ_DIRTY;
	xfs_qm_dqput(dqp);
	return 0;
}
コード例 #2
0
/*
 * Called by xfs_trans_commit() and similar in spirit to
 * xfs_trans_apply_sb_deltas().
 * Go thru all the dquots belonging to this transaction and modify the
 * INCORE dquot to reflect the actual usages.
 * Unreserve just the reservations done by this transaction.
 * dquot is still left locked at exit.
 */
void
xfs_trans_apply_dquot_deltas(
    struct xfs_trans	*tp)
{
    int			i, j;
    struct xfs_dquot	*dqp;
    struct xfs_dqtrx	*qtrx, *qa;
    struct xfs_disk_dquot	*d;
    long			totalbdelta;
    long			totalrtbdelta;

    if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
        return;

    ASSERT(tp->t_dqinfo);
    for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
        qa = tp->t_dqinfo->dqs[j];
        if (qa[0].qt_dquot == NULL)
            continue;

        /*
         * Lock all of the dquots and join them to the transaction.
         */
        xfs_trans_dqlockedjoin(tp, qa);

        for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
            qtrx = &qa[i];
            /*
             * The array of dquots is filled
             * sequentially, not sparsely.
             */
            if ((dqp = qtrx->qt_dquot) == NULL)
                break;

            ASSERT(XFS_DQ_IS_LOCKED(dqp));
            ASSERT(dqp->q_transp == tp);

            /*
             * adjust the actual number of blocks used
             */
            d = &dqp->q_core;

            /*
             * The issue here is - sometimes we don't make a blkquota
             * reservation intentionally to be fair to users
             * (when the amount is small). On the other hand,
             * delayed allocs do make reservations, but that's
             * outside of a transaction, so we have no
             * idea how much was really reserved.
             * So, here we've accumulated delayed allocation blks and
             * non-delay blks. The assumption is that the
             * delayed ones are always reserved (outside of a
             * transaction), and the others may or may not have
             * quota reservations.
             */
            totalbdelta = qtrx->qt_bcount_delta +
                          qtrx->qt_delbcnt_delta;
            totalrtbdelta = qtrx->qt_rtbcount_delta +
                            qtrx->qt_delrtb_delta;
#ifdef DEBUG
            if (totalbdelta < 0)
                ASSERT(be64_to_cpu(d->d_bcount) >=
                       -totalbdelta);

            if (totalrtbdelta < 0)
                ASSERT(be64_to_cpu(d->d_rtbcount) >=
                       -totalrtbdelta);

            if (qtrx->qt_icount_delta < 0)
                ASSERT(be64_to_cpu(d->d_icount) >=
                       -qtrx->qt_icount_delta);
#endif
            if (totalbdelta)
                be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);

            if (qtrx->qt_icount_delta)
                be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);

            if (totalrtbdelta)
                be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);

            /*
             * Get any default limits in use.
             * Start/reset the timer(s) if needed.
             */
            if (d->d_id) {
                xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
                xfs_qm_adjust_dqtimers(tp->t_mountp, d);
            }

            dqp->dq_flags |= XFS_DQ_DIRTY;
            /*
             * add this to the list of items to get logged
             */
            xfs_trans_log_dquot(tp, dqp);
            /*
             * Take off what's left of the original reservation.
             * In case of delayed allocations, there's no
             * reservation that a transaction structure knows of.
             */
            if (qtrx->qt_blk_res != 0) {
                ulong blk_res_used = 0;

                if (qtrx->qt_bcount_delta > 0)
                    blk_res_used = qtrx->qt_bcount_delta;

                if (qtrx->qt_blk_res != blk_res_used) {
                    if (qtrx->qt_blk_res > blk_res_used)
                        dqp->q_res_bcount -= (xfs_qcnt_t)
                                             (qtrx->qt_blk_res -
                                              blk_res_used);
                    else
                        dqp->q_res_bcount -= (xfs_qcnt_t)
                                             (blk_res_used -
                                              qtrx->qt_blk_res);
                }
            } else {
                /*
                 * These blks were never reserved, either inside
                 * a transaction or outside one (in a delayed
                 * allocation). Also, this isn't always a
                 * negative number since we sometimes
                 * deliberately skip quota reservations.
                 */
                if (qtrx->qt_bcount_delta) {
                    dqp->q_res_bcount +=
                        (xfs_qcnt_t)qtrx->qt_bcount_delta;
                }
            }
            /*
             * Adjust the RT reservation.
             */
            if (qtrx->qt_rtblk_res != 0) {
                if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
                    if (qtrx->qt_rtblk_res >
                            qtrx->qt_rtblk_res_used)
                        dqp->q_res_rtbcount -= (xfs_qcnt_t)
                                               (qtrx->qt_rtblk_res -
                                                qtrx->qt_rtblk_res_used);
                    else
                        dqp->q_res_rtbcount -= (xfs_qcnt_t)
                                               (qtrx->qt_rtblk_res_used -
                                                qtrx->qt_rtblk_res);
                }
            } else {
                if (qtrx->qt_rtbcount_delta)
                    dqp->q_res_rtbcount +=
                        (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
            }

            /*
             * Adjust the inode reservation.
             */
            if (qtrx->qt_ino_res != 0) {
                ASSERT(qtrx->qt_ino_res >=
                       qtrx->qt_ino_res_used);
                if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
                    dqp->q_res_icount -= (xfs_qcnt_t)
                                         (qtrx->qt_ino_res -
                                          qtrx->qt_ino_res_used);
            } else {
                if (qtrx->qt_icount_delta)
                    dqp->q_res_icount +=
                        (xfs_qcnt_t)qtrx->qt_icount_delta;
            }

            ASSERT(dqp->q_res_bcount >=
                   be64_to_cpu(dqp->q_core.d_bcount));
            ASSERT(dqp->q_res_icount >=
                   be64_to_cpu(dqp->q_core.d_icount));
            ASSERT(dqp->q_res_rtbcount >=
                   be64_to_cpu(dqp->q_core.d_rtbcount));
        }
    }
}