Example #1
0
STATIC bool
xfs_dquot_buf_verify_crc(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp)
{
	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
	int			ndquots;
	int			i;

	if (!xfs_sb_version_hascrc(&mp->m_sb))
		return true;

	/*
	 * if we are in log recovery, the quota subsystem has not been
	 * initialised so we have no quotainfo structure. In that case, we need
	 * to manually calculate the number of dquots in the buffer.
	 */
	if (mp->m_quotainfo)
		ndquots = mp->m_quotainfo->qi_dqperchunk;
	else
		ndquots = xfs_qm_calc_dquots_per_chunk(mp,
					XFS_BB_TO_FSB(mp, bp->b_length));

	for (i = 0; i < ndquots; i++, d++) {
		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
				 XFS_DQUOT_CRC_OFF))
			return false;
		if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
			return false;
	}
	return true;
}
Example #2
0
STATIC bool
xfs_dquot_buf_verify(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp)
{
	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
	xfs_dqid_t		id = 0;
	int			ndquots;
	int			i;

	/*
	 * if we are in log recovery, the quota subsystem has not been
	 * initialised so we have no quotainfo structure. In that case, we need
	 * to manually calculate the number of dquots in the buffer.
	 */
	if (mp->m_quotainfo)
		ndquots = mp->m_quotainfo->qi_dqperchunk;
	else
		ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);

	/*
	 * On the first read of the buffer, verify that each dquot is valid.
	 * We don't know what the id of the dquot is supposed to be, just that
	 * they should be increasing monotonically within the buffer. If the
	 * first id is corrupt, then it will fail on the second dquot in the
	 * buffer so corruptions could point to the wrong dquot in this case.
	 */
	for (i = 0; i < ndquots; i++) {
		struct xfs_disk_dquot	*ddq;
		int			error;

		ddq = &d[i].dd_diskdq;

		if (i == 0)
			id = be32_to_cpu(ddq->d_id);

		error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
				       "xfs_dquot_buf_verify");
		if (error)
			return false;
	}
	return true;
}
/*
 * This initializes all the quota information that's kept in the
 * mount structure
 */
STATIC int
xfs_qm_init_quotainfo(
	xfs_mount_t	*mp)
{
	xfs_quotainfo_t *qinf;
	int		error;
	xfs_dquot_t	*dqp;

	ASSERT(XFS_IS_QUOTA_RUNNING(mp));

	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);

	/*
	 * See if quotainodes are setup, and if not, allocate them,
	 * and change the superblock accordingly.
	 */
	if ((error = xfs_qm_init_quotainos(mp))) {
		kmem_free(qinf);
		mp->m_quotainfo = NULL;
		return error;
	}

	INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
	INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
	mutex_init(&qinf->qi_tree_lock);

	INIT_LIST_HEAD(&qinf->qi_lru_list);
	qinf->qi_lru_count = 0;
	mutex_init(&qinf->qi_lru_lock);

	/* mutex used to serialize quotaoffs */
	mutex_init(&qinf->qi_quotaofflock);

	/* Precalc some constants */
	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
	qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
							qinf->qi_dqchunklen);

	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);

	/*
	 * We try to get the limits from the superuser's limits fields.
	 * This is quite hacky, but it is standard quota practice.
	 *
	 * We look at the USR dquot with id == 0 first, but if user quotas
	 * are not enabled we goto the GRP dquot with id == 0.
	 * We don't really care to keep separate default limits for user
	 * and group quotas, at least not at this point.
	 *
	 * Since we may not have done a quotacheck by this point, just read
	 * the dquot without attaching it to any hashtables or lists.
	 */
	error = xfs_qm_dqread(mp, 0,
			XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
			 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
			  XFS_DQ_PROJ),
			XFS_QMOPT_DOWARN, &dqp);
	if (!error) {
		xfs_disk_dquot_t	*ddqp = &dqp->q_core;

		/*
		 * The warnings and timers set the grace period given to
		 * a user or group before he or she can not perform any
		 * more writing. If it is zero, a default is used.
		 */
		qinf->qi_btimelimit = ddqp->d_btimer ?
			be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
		qinf->qi_itimelimit = ddqp->d_itimer ?
			be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
		qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
			be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
		qinf->qi_bwarnlimit = ddqp->d_bwarns ?
			be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
		qinf->qi_iwarnlimit = ddqp->d_iwarns ?
			be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
		qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
			be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
		qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
		qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
		qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
		qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
		qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
		qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
 
		xfs_qm_dqdestroy(dqp);
	} else {
		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
		qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
	}

	qinf->qi_shrinker.shrink = xfs_qm_shake;
	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&qinf->qi_shrinker);
	return 0;
}