Example #1
0
/*
 * Called by dqusage_adjust in doing a quotacheck.
 *
 * Given the inode, and a dquot id this updates both the incore dqout as well
 * as the buffer copy. This is so that once the quotacheck is done, we can
 * just log all the buffers, as opposed to logging numerous updates to
 * individual dquots.
 */
STATIC int
xfs_qm_quotacheck_dqadjust(
	struct xfs_inode	*ip,
	xfs_dqid_t		id,
	uint			type,
	xfs_qcnt_t		nblks,
	xfs_qcnt_t		rtblks)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_dquot	*dqp;
	int			error;

	error = xfs_qm_dqget(mp, ip, id, type,
			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
	if (error) {
		/*
		 * Shouldn't be able to turn off quotas here.
		 */
		ASSERT(error != ESRCH);
		ASSERT(error != ENOENT);
		return error;
	}

	trace_xfs_dqadjust(dqp);

	/*
	 * Adjust the inode count and the block count to reflect this inode's
	 * resource usage.
	 */
	be64_add_cpu(&dqp->q_core.d_icount, 1);
	dqp->q_res_icount++;
	if (nblks) {
		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
		dqp->q_res_bcount += nblks;
	}
	if (rtblks) {
		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
		dqp->q_res_rtbcount += rtblks;
	}

	/*
	 * Set default limits, adjust timers (since we changed usages)
	 *
	 * There are no timers for the default values set in the root dquot.
	 */
	if (dqp->q_core.d_id) {
		xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
	}

	dqp->dq_flags |= XFS_DQ_DIRTY;
	xfs_qm_dqput(dqp);
	return 0;
}
Example #2
0
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
				struct blkcipher_walk *walk)
{
	struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
	const unsigned int bsize = CAST5_BLOCK_SIZE;
	unsigned int nbytes = walk->nbytes;
	u64 *src = (u64 *)walk->src.virt.addr;
	u64 *dst = (u64 *)walk->dst.virt.addr;

	/* Process multi-block batch */
	if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
		do {
			cast5_ctr_16way(ctx, (u8 *)dst, (u8 *)src,
					(__be64 *)walk->iv);

			src += CAST5_PARALLEL_BLOCKS;
			dst += CAST5_PARALLEL_BLOCKS;
			nbytes -= bsize * CAST5_PARALLEL_BLOCKS;
		} while (nbytes >= bsize * CAST5_PARALLEL_BLOCKS);

		if (nbytes < bsize)
			goto done;
	}

	/* Handle leftovers */
	do {
		u64 ctrblk;

		if (dst != src)
			*dst = *src;

		ctrblk = *(u64 *)walk->iv;
		be64_add_cpu((__be64 *)walk->iv, 1);

		__cast5_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk);
		*dst ^= ctrblk;

		src += 1;
		dst += 1;
		nbytes -= bsize;
	} while (nbytes >= bsize);

done:
	return nbytes;
}
Example #3
0
/*
 * Called by xfs_trans_commit() and similar in spirit to
 * xfs_trans_apply_sb_deltas().
 * Go thru all the dquots belonging to this transaction and modify the
 * INCORE dquot to reflect the actual usages.
 * Unreserve just the reservations done by this transaction.
 * dquot is still left locked at exit.
 */
void
xfs_trans_apply_dquot_deltas(
    struct xfs_trans	*tp)
{
    int			i, j;
    struct xfs_dquot	*dqp;
    struct xfs_dqtrx	*qtrx, *qa;
    struct xfs_disk_dquot	*d;
    long			totalbdelta;
    long			totalrtbdelta;

    if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
        return;

    ASSERT(tp->t_dqinfo);
    for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
        qa = tp->t_dqinfo->dqs[j];
        if (qa[0].qt_dquot == NULL)
            continue;

        /*
         * Lock all of the dquots and join them to the transaction.
         */
        xfs_trans_dqlockedjoin(tp, qa);

        for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
            qtrx = &qa[i];
            /*
             * The array of dquots is filled
             * sequentially, not sparsely.
             */
            if ((dqp = qtrx->qt_dquot) == NULL)
                break;

            ASSERT(XFS_DQ_IS_LOCKED(dqp));
            ASSERT(dqp->q_transp == tp);

            /*
             * adjust the actual number of blocks used
             */
            d = &dqp->q_core;

            /*
             * The issue here is - sometimes we don't make a blkquota
             * reservation intentionally to be fair to users
             * (when the amount is small). On the other hand,
             * delayed allocs do make reservations, but that's
             * outside of a transaction, so we have no
             * idea how much was really reserved.
             * So, here we've accumulated delayed allocation blks and
             * non-delay blks. The assumption is that the
             * delayed ones are always reserved (outside of a
             * transaction), and the others may or may not have
             * quota reservations.
             */
            totalbdelta = qtrx->qt_bcount_delta +
                          qtrx->qt_delbcnt_delta;
            totalrtbdelta = qtrx->qt_rtbcount_delta +
                            qtrx->qt_delrtb_delta;
#ifdef DEBUG
            if (totalbdelta < 0)
                ASSERT(be64_to_cpu(d->d_bcount) >=
                       -totalbdelta);

            if (totalrtbdelta < 0)
                ASSERT(be64_to_cpu(d->d_rtbcount) >=
                       -totalrtbdelta);

            if (qtrx->qt_icount_delta < 0)
                ASSERT(be64_to_cpu(d->d_icount) >=
                       -qtrx->qt_icount_delta);
#endif
            if (totalbdelta)
                be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);

            if (qtrx->qt_icount_delta)
                be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);

            if (totalrtbdelta)
                be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);

            /*
             * Get any default limits in use.
             * Start/reset the timer(s) if needed.
             */
            if (d->d_id) {
                xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
                xfs_qm_adjust_dqtimers(tp->t_mountp, d);
            }

            dqp->dq_flags |= XFS_DQ_DIRTY;
            /*
             * add this to the list of items to get logged
             */
            xfs_trans_log_dquot(tp, dqp);
            /*
             * Take off what's left of the original reservation.
             * In case of delayed allocations, there's no
             * reservation that a transaction structure knows of.
             */
            if (qtrx->qt_blk_res != 0) {
                ulong blk_res_used = 0;

                if (qtrx->qt_bcount_delta > 0)
                    blk_res_used = qtrx->qt_bcount_delta;

                if (qtrx->qt_blk_res != blk_res_used) {
                    if (qtrx->qt_blk_res > blk_res_used)
                        dqp->q_res_bcount -= (xfs_qcnt_t)
                                             (qtrx->qt_blk_res -
                                              blk_res_used);
                    else
                        dqp->q_res_bcount -= (xfs_qcnt_t)
                                             (blk_res_used -
                                              qtrx->qt_blk_res);
                }
            } else {
                /*
                 * These blks were never reserved, either inside
                 * a transaction or outside one (in a delayed
                 * allocation). Also, this isn't always a
                 * negative number since we sometimes
                 * deliberately skip quota reservations.
                 */
                if (qtrx->qt_bcount_delta) {
                    dqp->q_res_bcount +=
                        (xfs_qcnt_t)qtrx->qt_bcount_delta;
                }
            }
            /*
             * Adjust the RT reservation.
             */
            if (qtrx->qt_rtblk_res != 0) {
                if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
                    if (qtrx->qt_rtblk_res >
                            qtrx->qt_rtblk_res_used)
                        dqp->q_res_rtbcount -= (xfs_qcnt_t)
                                               (qtrx->qt_rtblk_res -
                                                qtrx->qt_rtblk_res_used);
                    else
                        dqp->q_res_rtbcount -= (xfs_qcnt_t)
                                               (qtrx->qt_rtblk_res_used -
                                                qtrx->qt_rtblk_res);
                }
            } else {
                if (qtrx->qt_rtbcount_delta)
                    dqp->q_res_rtbcount +=
                        (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
            }

            /*
             * Adjust the inode reservation.
             */
            if (qtrx->qt_ino_res != 0) {
                ASSERT(qtrx->qt_ino_res >=
                       qtrx->qt_ino_res_used);
                if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
                    dqp->q_res_icount -= (xfs_qcnt_t)
                                         (qtrx->qt_ino_res -
                                          qtrx->qt_ino_res_used);
            } else {
                if (qtrx->qt_icount_delta)
                    dqp->q_res_icount +=
                        (xfs_qcnt_t)qtrx->qt_icount_delta;
            }

            ASSERT(dqp->q_res_bcount >=
                   be64_to_cpu(dqp->q_core.d_bcount));
            ASSERT(dqp->q_res_icount >=
                   be64_to_cpu(dqp->q_core.d_icount));
            ASSERT(dqp->q_res_rtbcount >=
                   be64_to_cpu(dqp->q_core.d_rtbcount));
        }
    }
}
Example #4
0
/*
 * xfs_trans_apply_sb_deltas() is called from the commit code
 * to bring the superblock buffer into the current transaction
 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
 *
 * For now we just look at each field allowed to change and change
 * it if necessary.
 */
STATIC void
xfs_trans_apply_sb_deltas(
    xfs_trans_t    *tp)
{
    xfs_dsb_t    *sbp;
    xfs_buf_t    *bp;
    int        whole = 0;

    bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
    sbp = XFS_BUF_TO_SBP(bp);

    /*
     * Check that superblock mods match the mods made to AGF counters.
     */
    ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
           (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
        tp->t_ag_btree_delta));

    /*
     * Only update the superblock counters if we are logging them
     */
    if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
        if (tp->t_icount_delta)
            be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
        if (tp->t_ifree_delta)
            be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
        if (tp->t_fdblocks_delta)
            be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
        if (tp->t_res_fdblocks_delta)
            be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
    }

    if (tp->t_frextents_delta)
        be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
    if (tp->t_res_frextents_delta)
        be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);

    if (tp->t_dblocks_delta) {
        be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
        whole = 1;
    }
    if (tp->t_agcount_delta) {
        be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
        whole = 1;
    }
    if (tp->t_imaxpct_delta) {
        sbp->sb_imax_pct += tp->t_imaxpct_delta;
        whole = 1;
    }
    if (tp->t_rextsize_delta) {
        be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
        whole = 1;
    }
    if (tp->t_rbmblocks_delta) {
        be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
        whole = 1;
    }
    if (tp->t_rblocks_delta) {
        be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
        whole = 1;
    }
    if (tp->t_rextents_delta) {
        be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
        whole = 1;
    }
    if (tp->t_rextslog_delta) {
        sbp->sb_rextslog += tp->t_rextslog_delta;
        whole = 1;
    }

    if (whole)
        /*
         * Log the whole thing, the fields are noncontiguous.
         */
        xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
    else
        /*
         * Since all the modifiable fields are contiguous, we
         * can get away with this.
         */
        xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
                  offsetof(xfs_dsb_t, sb_frextents) +
                  sizeof(sbp->sb_frextents) - 1);

    tp->t_mountp->m_super->s_dirt = 1;
}
Example #5
0
static inline void inc(be128 *iv)
{
	be64_add_cpu(&iv->b, 1);
	if (!iv->b)
		be64_add_cpu(&iv->a, 1);
}