Ejemplo n.º 1
0
/*
 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
 * the dquot is locked by us, but the flush lock isn't. So, here we are
 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
 * If so, we want to push it out to help us take this item off the AIL as soon
 * as possible.
 *
 * We must not be holding the AIL lock at this point. Calling incore() to
 * search the buffer cache can be a time consuming thing, and AIL lock is a
 * spinlock.
 */
STATIC void
xfs_qm_dquot_logitem_pushbuf(
	struct xfs_log_item	*lip)
{
	struct xfs_dq_logitem	*qlip = DQUOT_ITEM(lip);
	struct xfs_dquot	*dqp = qlip->qli_dquot;
	struct xfs_buf		*bp;

	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	/*
	 * If flushlock isn't locked anymore, chances are that the
	 * inode flush completed and the inode was taken off the AIL.
	 * So, just get out.
	 */
	if (completion_done(&dqp->q_flush) ||
	    !(lip->li_flags & XFS_LI_IN_AIL)) {
		xfs_dqunlock(dqp);
		return;
	}

	bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
			dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
	xfs_dqunlock(dqp);
	if (!bp)
		return;
	if (XFS_BUF_ISDELAYWRITE(bp))
		xfs_buf_delwri_promote(bp);
	xfs_buf_relse(bp);
}
Ejemplo n.º 2
0
/*
 * Look up the dquot in the in-core cache.  If found, the dquot is returned
 * locked and ready to go.
 */
static struct xfs_dquot *
xfs_qm_dqget_cache_lookup(
	struct xfs_mount	*mp,
	struct xfs_quotainfo	*qi,
	struct radix_tree_root	*tree,
	xfs_dqid_t		id)
{
	struct xfs_dquot	*dqp;

restart:
	mutex_lock(&qi->qi_tree_lock);
	dqp = radix_tree_lookup(tree, id);
	if (!dqp) {
		mutex_unlock(&qi->qi_tree_lock);
		XFS_STATS_INC(mp, xs_qm_dqcachemisses);
		return NULL;
	}

	xfs_dqlock(dqp);
	if (dqp->dq_flags & XFS_DQ_FREEING) {
		xfs_dqunlock(dqp);
		mutex_unlock(&qi->qi_tree_lock);
		trace_xfs_dqget_freeing(dqp);
		delay(1);
		goto restart;
	}

	dqp->q_nrefs++;
	mutex_unlock(&qi->qi_tree_lock);

	trace_xfs_dqget_hit(dqp);
	XFS_STATS_INC(mp, xs_qm_dqcachehits);
	return dqp;
}
Ejemplo n.º 3
0
/*
 * Given the logitem, this writes the corresponding dquot entry to disk
 * asynchronously. This is called with the dquot entry securely locked;
 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
 * at the end.
 */
STATIC void
xfs_qm_dquot_logitem_push(
	struct xfs_log_item	*lip)
{
	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;
	int			error;

	ASSERT(XFS_DQ_IS_LOCKED(dqp));
	ASSERT(!completion_done(&dqp->q_flush));

	/*
	 * Since we were able to lock the dquot's flush lock and
	 * we found it on the AIL, the dquot must be dirty.  This
	 * is because the dquot is removed from the AIL while still
	 * holding the flush lock in xfs_dqflush_done().  Thus, if
	 * we found it in the AIL and were able to obtain the flush
	 * lock without sleeping, then there must not have been
	 * anyone in the process of flushing the dquot.
	 */
	error = xfs_qm_dqflush(dqp, 0);
	if (error)
		xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
			"xfs_qm_dquot_logitem_push: push error %d on dqp %p",
			error, dqp);
	xfs_dqunlock(dqp);
}
Ejemplo n.º 4
0
STATIC int
xfs_qm_flush_one(
	struct xfs_dquot	*dqp,
	void			*data)
{
	struct list_head	*buffer_list = data;
	struct xfs_buf		*bp = NULL;
	int			error = 0;

	xfs_dqlock(dqp);
	if (dqp->dq_flags & XFS_DQ_FREEING)
		goto out_unlock;
	if (!XFS_DQ_IS_DIRTY(dqp))
		goto out_unlock;

	xfs_dqflock(dqp);
	error = xfs_qm_dqflush(dqp, &bp);
	if (error)
		goto out_unlock;

	xfs_buf_delwri_queue(bp, buffer_list);
	xfs_buf_relse(bp);
out_unlock:
	xfs_dqunlock(dqp);
	return error;
}
Ejemplo n.º 5
0
STATIC void
xfs_qm_dqput_final(
	struct xfs_dquot	*dqp)
{
	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
	struct xfs_dquot	*gdqp;

	trace_xfs_dqput_free(dqp);

	mutex_lock(&qi->qi_lru_lock);
	if (list_empty(&dqp->q_lru)) {
		list_add_tail(&dqp->q_lru, &qi->qi_lru_list);
		qi->qi_lru_count++;
		XFS_STATS_INC(xs_qm_dquot_unused);
	}
	mutex_unlock(&qi->qi_lru_lock);

	/*
	 * If we just added a udquot to the freelist, then we want to release
	 * the gdquot reference that it (probably) has. Otherwise it'll keep
	 * the gdquot from getting reclaimed.
	 */
	gdqp = dqp->q_gdquot;
	if (gdqp) {
		xfs_dqlock(gdqp);
		dqp->q_gdquot = NULL;
	}
	xfs_dqunlock(dqp);

	/*
	 * If we had a group quota hint, release it now.
	 */
	if (gdqp)
		xfs_qm_dqput(gdqp);
}
Ejemplo n.º 6
0
/*
 * Given the logitem, this writes the corresponding dquot entry to disk
 * asynchronously. This is called with the dquot entry securely locked;
 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
 * at the end.
 */
STATIC void
xfs_qm_dquot_logitem_push(
	xfs_dq_logitem_t	*logitem)
{
	xfs_dquot_t	*dqp;
	int		error;

	dqp = logitem->qli_dquot;

	ASSERT(XFS_DQ_IS_LOCKED(dqp));
	ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));

	/*
	 * Since we were able to lock the dquot's flush lock and
	 * we found it on the AIL, the dquot must be dirty.  This
	 * is because the dquot is removed from the AIL while still
	 * holding the flush lock in xfs_dqflush_done().  Thus, if
	 * we found it in the AIL and were able to obtain the flush
	 * lock without sleeping, then there must not have been
	 * anyone in the process of flushing the dquot.
	 */
	error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
	if (error)
		xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
			"xfs_qm_dquot_logitem_push: push error %d on dqp %p",
			error, dqp);
	xfs_dqunlock(dqp);
}
Ejemplo n.º 7
0
/*
 * Release the reservations, and adjust the dquots accordingly.
 * This is called only when the transaction is being aborted. If by
 * any chance we have done dquot modifications incore (ie. deltas) already,
 * we simply throw those away, since that's the expected behavior
 * when a transaction is curtailed without a commit.
 */
void
xfs_trans_unreserve_and_mod_dquots(
	xfs_trans_t		*tp)
{
	int			i, j;
	xfs_dquot_t		*dqp;
	xfs_dqtrx_t		*qtrx, *qa;
	boolean_t		locked;

	if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
		return;

	qa = tp->t_dqinfo->dqa_usrdquots;

	for (j = 0; j < 2; j++) {
		for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
			qtrx = &qa[i];
			/*
			 * We assume that the array of dquots is filled
			 * sequentially, not sparsely.
			 */
			if ((dqp = qtrx->qt_dquot) == NULL)
				break;
			/*
			 * Unreserve the original reservation. We don't care
			 * about the number of blocks used field, or deltas.
			 * Also we don't bother to zero the fields.
			 */
			locked = B_FALSE;
			if (qtrx->qt_blk_res) {
				xfs_dqlock(dqp);
				locked = B_TRUE;
				dqp->q_res_bcount -=
					(xfs_qcnt_t)qtrx->qt_blk_res;
			}
			if (qtrx->qt_ino_res) {
				if (!locked) {
					xfs_dqlock(dqp);
					locked = B_TRUE;
				}
				dqp->q_res_icount -=
					(xfs_qcnt_t)qtrx->qt_ino_res;
			}

			if (qtrx->qt_rtblk_res) {
				if (!locked) {
					xfs_dqlock(dqp);
					locked = B_TRUE;
				}
				dqp->q_res_rtbcount -=
					(xfs_qcnt_t)qtrx->qt_rtblk_res;
			}
			if (locked)
				xfs_dqunlock(dqp);

		}
		qa = tp->t_dqinfo->dqa_grpdquots;
	}
}
Ejemplo n.º 8
0
/*
 * Release a reference to the dquot (decrement ref-count) and unlock it.
 *
 * If there is a group quota attached to this dquot, carefully release that
 * too without tripping over deadlocks'n'stuff.
 */
void
xfs_qm_dqput(
	struct xfs_dquot	*dqp)
{
	ASSERT(dqp->q_nrefs > 0);
	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	trace_xfs_dqput(dqp);

	if (--dqp->q_nrefs > 0)
		xfs_dqunlock(dqp);
	else
		xfs_qm_dqput_final(dqp);
}
Ejemplo n.º 9
0
STATIC int
xfs_qm_dqattach_one(
	xfs_inode_t	*ip,
	xfs_dqid_t	id,
	uint		type,
	uint		doalloc,
	xfs_dquot_t	**IO_idqpp)
{
	xfs_dquot_t	*dqp;
	int		error;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	error = 0;

	/*
	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
	 * or &i_gdquot. This made the code look weird, but made the logic a lot
	 * simpler.
	 */
	dqp = *IO_idqpp;
	if (dqp) {
		trace_xfs_dqattach_found(dqp);
		return 0;
	}

	/*
	 * Find the dquot from somewhere. This bumps the reference count of
	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
	 * turned off suddenly.
	 */
	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
			     doalloc | XFS_QMOPT_DOWARN, &dqp);
	if (error)
		return error;

	trace_xfs_dqattach_get(dqp);

	/*
	 * dqget may have dropped and re-acquired the ilock, but it guarantees
	 * that the dquot returned is the one that should go in the inode.
	 */
	*IO_idqpp = dqp;
	xfs_dqunlock(dqp);
	return 0;
}
Ejemplo n.º 10
0
/*
 * Release a reference to the dquot (decrement ref-count) and unlock it.
 *
 * If there is a group quota attached to this dquot, carefully release that
 * too without tripping over deadlocks'n'stuff.
 */
void
xfs_qm_dqput(
	struct xfs_dquot	*dqp)
{
	ASSERT(dqp->q_nrefs > 0);
	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	trace_xfs_dqput(dqp);

	if (--dqp->q_nrefs == 0) {
		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
		trace_xfs_dqput_free(dqp);

		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
			XFS_STATS_INC(xs_qm_dquot_unused);
	}
	xfs_dqunlock(dqp);
}
Ejemplo n.º 11
0
/*
 * Unlock the dquot associated with the log item.
 * Clear the fields of the dquot and dquot log item that
 * are specific to the current transaction.  If the
 * hold flags is set, do not unlock the dquot.
 */
STATIC void
xfs_qm_dquot_logitem_unlock(
	struct xfs_log_item	*lip)
{
	struct xfs_dquot	*dqp = DQUOT_ITEM(lip)->qli_dquot;

	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	/*
	 * Clear the transaction pointer in the dquot
	 */
	dqp->q_transp = NULL;

	/*
	 * dquots are never 'held' from getting unlocked at the end of
	 * a transaction.  Their locking and unlocking is hidden inside the
	 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
	 * for the logitem.
	 */
	xfs_dqunlock(dqp);
}
Ejemplo n.º 12
0
STATIC void
xfs_qm_dqput_final(
	struct xfs_dquot	*dqp)
{
	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
	struct xfs_dquot	*gdqp;
	struct xfs_dquot	*pdqp;

	trace_xfs_dqput_free(dqp);

	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
		XFS_STATS_INC(xs_qm_dquot_unused);

	/*
	 * If we just added a udquot to the freelist, then we want to release
	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
	 * keep the gdquot/pdquot from getting reclaimed.
	 */
	gdqp = dqp->q_gdquot;
	if (gdqp) {
		xfs_dqlock(gdqp);
		dqp->q_gdquot = NULL;
	}

	pdqp = dqp->q_pdquot;
	if (pdqp) {
		xfs_dqlock(pdqp);
		dqp->q_pdquot = NULL;
	}
	xfs_dqunlock(dqp);

	/*
	 * If we had a group/project quota hint, release it now.
	 */
	if (gdqp)
		xfs_qm_dqput(gdqp);
	if (pdqp)
		xfs_qm_dqput(pdqp);
}
Ejemplo n.º 13
0
/*
 * Given a udquot and gdquot, attach a ptr to the group dquot in the
 * udquot as a hint for future lookups.
 */
STATIC void
xfs_qm_dqattach_grouphint(
	xfs_dquot_t	*udq,
	xfs_dquot_t	*gdq)
{
	xfs_dquot_t	*tmp;

	xfs_dqlock(udq);

	tmp = udq->q_gdquot;
	if (tmp) {
		if (tmp == gdq)
			goto done;

		udq->q_gdquot = NULL;
		xfs_qm_dqrele(tmp);
	}

	udq->q_gdquot = xfs_qm_dqhold(gdq);
done:
	xfs_dqunlock(udq);
}
/*
 * Release a reference to the dquot (decrement ref-count) and unlock it.
 *
 * If there is a group quota attached to this dquot, carefully release that
 * too without tripping over deadlocks'n'stuff.
 */
void
xfs_qm_dqput(
	struct xfs_dquot	*dqp)
{
	ASSERT(dqp->q_nrefs > 0);
	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	trace_xfs_dqput(dqp);

	if (--dqp->q_nrefs == 0) {
		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
		trace_xfs_dqput_free(dqp);

		mutex_lock(&qi->qi_lru_lock);
		if (list_empty(&dqp->q_lru)) {
			list_add_tail(&dqp->q_lru, &qi->qi_lru_list);
			qi->qi_lru_count++;
			XFS_STATS_INC(xs_qm_dquot_unused);
		}
		mutex_unlock(&qi->qi_lru_lock);

	}
	xfs_dqunlock(dqp);
}
Ejemplo n.º 15
0
/*
 * This reserves disk blocks and inodes against a dquot.
 * Flags indicate if the dquot is to be locked here and also
 * if the blk reservation is for RT or regular blocks.
 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
 * Returns EDQUOT if quota is exceeded.
 */
STATIC int
xfs_trans_dqresv(
	xfs_trans_t	*tp,
	xfs_mount_t	*mp,
	xfs_dquot_t	*dqp,
	long		nblks,
	long		ninos,
	uint		flags)
{
	int		error;
	xfs_qcnt_t	hardlimit;
	xfs_qcnt_t	softlimit;
	time_t		timer;
	xfs_qwarncnt_t	warns;
	xfs_qwarncnt_t	warnlimit;
	xfs_qcnt_t	count;
	xfs_qcnt_t	*resbcountp;
	xfs_quotainfo_t	*q = mp->m_quotainfo;

	if (! (flags & XFS_QMOPT_DQLOCK)) {
		xfs_dqlock(dqp);
	}
	ASSERT(XFS_DQ_IS_LOCKED(dqp));
	if (flags & XFS_TRANS_DQ_RES_BLKS) {
		hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
		if (!hardlimit)
			hardlimit = q->qi_bhardlimit;
		softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
		if (!softlimit)
			softlimit = q->qi_bsoftlimit;
		timer = be32_to_cpu(dqp->q_core.d_btimer);
		warns = be16_to_cpu(dqp->q_core.d_bwarns);
		warnlimit = XFS_QI_BWARNLIMIT(dqp->q_mount);
		resbcountp = &dqp->q_res_bcount;
	} else {
		ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
		hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
		if (!hardlimit)
			hardlimit = q->qi_rtbhardlimit;
		softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
		if (!softlimit)
			softlimit = q->qi_rtbsoftlimit;
		timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
		warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
		warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);
		resbcountp = &dqp->q_res_rtbcount;
	}
	error = 0;

	if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
	    dqp->q_core.d_id &&
	    XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) {
#ifdef QUOTADEBUG
		cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"
			  " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);
#endif
		if (nblks > 0) {
			/*
			 * dquot is locked already. See if we'd go over the
			 * hardlimit or exceed the timelimit if we allocate
			 * nblks.
			 */
			if (hardlimit > 0ULL &&
			     (hardlimit <= nblks + *resbcountp)) {
				error = EDQUOT;
				goto error_return;
			}

			if (softlimit > 0ULL &&
			     (softlimit <= nblks + *resbcountp)) {
				/*
				 * If timer or warnings has expired,
				 * return EDQUOT
				 */
				if ((timer != 0 && get_seconds() > timer) ||
				    (warns != 0 && warns >= warnlimit)) {
					error = EDQUOT;
					goto error_return;
				}
			}
		}
		if (ninos > 0) {
			count = be64_to_cpu(dqp->q_core.d_icount);
			timer = be32_to_cpu(dqp->q_core.d_itimer);
			warns = be16_to_cpu(dqp->q_core.d_iwarns);
			warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount);
			hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
			if (!hardlimit)
				hardlimit = q->qi_ihardlimit;
			softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
			if (!softlimit)
				softlimit = q->qi_isoftlimit;
			if (hardlimit > 0ULL && count >= hardlimit) {
				error = EDQUOT;
				goto error_return;
			} else if (softlimit > 0ULL && count >= softlimit) {
				/*
				 * If timer or warnings has expired,
				 * return EDQUOT
				 */
				if ((timer != 0 && get_seconds() > timer) ||
				     (warns != 0 && warns >= warnlimit)) {
					error = EDQUOT;
					goto error_return;
				}
			}
		}
	}

	/*
	 * Change the reservation, but not the actual usage.
	 * Note that q_res_bcount = q_core.d_bcount + resv
	 */
	(*resbcountp) += (xfs_qcnt_t)nblks;
	if (ninos != 0)
		dqp->q_res_icount += (xfs_qcnt_t)ninos;

	/*
	 * note the reservation amt in the trans struct too,
	 * so that the transaction knows how much was reserved by
	 * it against this particular dquot.
	 * We don't do this when we are reserving for a delayed allocation,
	 * because we don't have the luxury of a transaction envelope then.
	 */
	if (tp) {
		ASSERT(tp->t_dqinfo);
		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
		if (nblks != 0)
			xfs_trans_mod_dquot(tp, dqp,
					    flags & XFS_QMOPT_RESBLK_MASK,
					    nblks);
		if (ninos != 0)
			xfs_trans_mod_dquot(tp, dqp,
					    XFS_TRANS_DQ_RES_INOS,
					    ninos);
	}
	ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
	ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
	ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));

error_return:
	if (! (flags & XFS_QMOPT_DQLOCK)) {
		xfs_dqunlock(dqp);
	}
	return (error);
}
Ejemplo n.º 16
0
STATIC int
xfs_qm_dqattach_one(
	xfs_inode_t	*ip,
	xfs_dqid_t	id,
	uint		type,
	uint		doalloc,
	xfs_dquot_t	*udqhint, /* hint */
	xfs_dquot_t	**IO_idqpp)
{
	xfs_dquot_t	*dqp;
	int		error;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	error = 0;

	/*
	 * See if we already have it in the inode itself. IO_idqpp is
	 * &i_udquot or &i_gdquot. This made the code look weird, but
	 * made the logic a lot simpler.
	 */
	dqp = *IO_idqpp;
	if (dqp) {
		trace_xfs_dqattach_found(dqp);
		return 0;
	}

	/*
	 * udqhint is the i_udquot field in inode, and is non-NULL only
	 * when the type arg is group/project. Its purpose is to save a
	 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
	 * the user dquot.
	 */
	if (udqhint) {
		ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
		xfs_dqlock(udqhint);

		/*
		 * No need to take dqlock to look at the id.
		 *
		 * The ID can't change until it gets reclaimed, and it won't
		 * be reclaimed as long as we have a ref from inode and we
		 * hold the ilock.
		 */
		dqp = udqhint->q_gdquot;
		if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
			ASSERT(*IO_idqpp == NULL);

			*IO_idqpp = xfs_qm_dqhold(dqp);
			xfs_dqunlock(udqhint);
			return 0;
		}

		/*
		 * We can't hold a dquot lock when we call the dqget code.
		 * We'll deadlock in no time, because of (not conforming to)
		 * lock ordering - the inodelock comes before any dquot lock,
		 * and we may drop and reacquire the ilock in xfs_qm_dqget().
		 */
		xfs_dqunlock(udqhint);
	}

	/*
	 * Find the dquot from somewhere. This bumps the
	 * reference count of dquot and returns it locked.
	 * This can return ENOENT if dquot didn't exist on
	 * disk and we didn't ask it to allocate;
	 * ESRCH if quotas got turned off suddenly.
	 */
	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
			     doalloc | XFS_QMOPT_DOWARN, &dqp);
	if (error)
		return error;

	trace_xfs_dqattach_get(dqp);

	/*
	 * dqget may have dropped and re-acquired the ilock, but it guarantees
	 * that the dquot returned is the one that should go in the inode.
	 */
	*IO_idqpp = dqp;
	xfs_dqunlock(dqp);
	return 0;
}
Ejemplo n.º 17
0
/*
 * Given an inode, a uid, gid and prid make sure that we have
 * allocated relevant dquot(s) on disk, and that we won't exceed inode
 * quotas by creating this file.
 * This also attaches dquot(s) to the given inode after locking it,
 * and returns the dquots corresponding to the uid and/or gid.
 *
 * in	: inode (unlocked)
 * out	: udquot, gdquot with references taken and unlocked
 */
int
xfs_qm_vop_dqalloc(
	struct xfs_inode	*ip,
	uid_t			uid,
	gid_t			gid,
	prid_t			prid,
	uint			flags,
	struct xfs_dquot	**O_udqpp,
	struct xfs_dquot	**O_gdqpp)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_dquot	*uq, *gq;
	int			error;
	uint			lockflags;

	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
		return 0;

	lockflags = XFS_ILOCK_EXCL;
	xfs_ilock(ip, lockflags);

	if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
		gid = ip->i_d.di_gid;

	/*
	 * Attach the dquot(s) to this inode, doing a dquot allocation
	 * if necessary. The dquot(s) will not be locked.
	 */
	if (XFS_NOT_DQATTACHED(mp, ip)) {
		error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
		if (error) {
			xfs_iunlock(ip, lockflags);
			return error;
		}
	}

	uq = gq = NULL;
	if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
		if (ip->i_d.di_uid != uid) {
			/*
			 * What we need is the dquot that has this uid, and
			 * if we send the inode to dqget, the uid of the inode
			 * takes priority over what's sent in the uid argument.
			 * We must unlock inode here before calling dqget if
			 * we're not sending the inode, because otherwise
			 * we'll deadlock by doing trans_reserve while
			 * holding ilock.
			 */
			xfs_iunlock(ip, lockflags);
			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
						 XFS_DQ_USER,
						 XFS_QMOPT_DQALLOC |
						 XFS_QMOPT_DOWARN,
						 &uq))) {
				ASSERT(error != ENOENT);
				return error;
			}
			/*
			 * Get the ilock in the right order.
			 */
			xfs_dqunlock(uq);
			lockflags = XFS_ILOCK_SHARED;
			xfs_ilock(ip, lockflags);
		} else {
			/*
			 * Take an extra reference, because we'll return
			 * this to caller
			 */
			ASSERT(ip->i_udquot);
			uq = xfs_qm_dqhold(ip->i_udquot);
		}
	}
	if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
		if (ip->i_d.di_gid != gid) {
			xfs_iunlock(ip, lockflags);
			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
						 XFS_DQ_GROUP,
						 XFS_QMOPT_DQALLOC |
						 XFS_QMOPT_DOWARN,
						 &gq))) {
				if (uq)
					xfs_qm_dqrele(uq);
				ASSERT(error != ENOENT);
				return error;
			}
			xfs_dqunlock(gq);
			lockflags = XFS_ILOCK_SHARED;
			xfs_ilock(ip, lockflags);
		} else {
			ASSERT(ip->i_gdquot);
			gq = xfs_qm_dqhold(ip->i_gdquot);
		}
	} else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
		if (xfs_get_projid(ip) != prid) {
			xfs_iunlock(ip, lockflags);
			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
						 XFS_DQ_PROJ,
						 XFS_QMOPT_DQALLOC |
						 XFS_QMOPT_DOWARN,
						 &gq))) {
				if (uq)
					xfs_qm_dqrele(uq);
				ASSERT(error != ENOENT);
				return (error);
			}
			xfs_dqunlock(gq);
			lockflags = XFS_ILOCK_SHARED;
			xfs_ilock(ip, lockflags);
		} else {
			ASSERT(ip->i_gdquot);
			gq = xfs_qm_dqhold(ip->i_gdquot);
		}
	}
	if (uq)
		trace_xfs_dquot_dqalloc(ip);

	xfs_iunlock(ip, lockflags);
	if (O_udqpp)
		*O_udqpp = uq;
	else if (uq)
		xfs_qm_dqrele(uq);
	if (O_gdqpp)
		*O_gdqpp = gq;
	else if (gq)
		xfs_qm_dqrele(gq);
	return 0;
}
Ejemplo n.º 18
0
STATIC void
xfs_qm_dqreclaim_one(
	struct xfs_dquot	*dqp,
	struct list_head	*buffer_list,
	struct list_head	*dispose_list)
{
	struct xfs_mount	*mp = dqp->q_mount;
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	int			error;

	if (!xfs_dqlock_nowait(dqp))
		goto out_move_tail;

	/*
	 * This dquot has acquired a reference in the meantime remove it from
	 * the freelist and try again.
	 */
	if (dqp->q_nrefs) {
		xfs_dqunlock(dqp);

		trace_xfs_dqreclaim_want(dqp);
		XFS_STATS_INC(xs_qm_dqwants);

		list_del_init(&dqp->q_lru);
		qi->qi_lru_count--;
		XFS_STATS_DEC(xs_qm_dquot_unused);
		return;
	}

	/*
	 * Try to grab the flush lock. If this dquot is in the process of
	 * getting flushed to disk, we don't want to reclaim it.
	 */
	if (!xfs_dqflock_nowait(dqp))
		goto out_unlock_move_tail;

	if (XFS_DQ_IS_DIRTY(dqp)) {
		struct xfs_buf	*bp = NULL;

		trace_xfs_dqreclaim_dirty(dqp);

		error = xfs_qm_dqflush(dqp, &bp);
		if (error) {
			xfs_warn(mp, "%s: dquot %p flush failed",
				 __func__, dqp);
			goto out_unlock_move_tail;
		}

		xfs_buf_delwri_queue(bp, buffer_list);
		xfs_buf_relse(bp);
		/*
		 * Give the dquot another try on the freelist, as the
		 * flushing will take some time.
		 */
		goto out_unlock_move_tail;
	}
	xfs_dqfunlock(dqp);

	/*
	 * Prevent lookups now that we are past the point of no return.
	 */
	dqp->dq_flags |= XFS_DQ_FREEING;
	xfs_dqunlock(dqp);

	ASSERT(dqp->q_nrefs == 0);
	list_move_tail(&dqp->q_lru, dispose_list);
	qi->qi_lru_count--;
	XFS_STATS_DEC(xs_qm_dquot_unused);

	trace_xfs_dqreclaim_done(dqp);
	XFS_STATS_INC(xs_qm_dqreclaims);
	return;

	/*
	 * Move the dquot to the tail of the list so that we don't spin on it.
	 */
out_unlock_move_tail:
	xfs_dqunlock(dqp);
out_move_tail:
	list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
	trace_xfs_dqreclaim_busy(dqp);
	XFS_STATS_INC(xs_qm_dqreclaim_misses);
}
Ejemplo n.º 19
0
/*
 * Purge a dquot from all tracking data structures and free it.
 */
STATIC int
xfs_qm_dqpurge(
	struct xfs_dquot	*dqp,
	void			*data)
{
	struct xfs_mount	*mp = dqp->q_mount;
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	struct xfs_dquot	*gdqp = NULL;

	xfs_dqlock(dqp);
	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
		xfs_dqunlock(dqp);
		return EAGAIN;
	}

	/*
	 * If this quota has a group hint attached, prepare for releasing it
	 * now.
	 */
	gdqp = dqp->q_gdquot;
	if (gdqp) {
		xfs_dqlock(gdqp);
		dqp->q_gdquot = NULL;
	}

	dqp->dq_flags |= XFS_DQ_FREEING;

	xfs_dqflock(dqp);

	/*
	 * If we are turning this type of quotas off, we don't care
	 * about the dirty metadata sitting in this dquot. OTOH, if
	 * we're unmounting, we do care, so we flush it and wait.
	 */
	if (XFS_DQ_IS_DIRTY(dqp)) {
		struct xfs_buf	*bp = NULL;
		int		error;

		/*
		 * We don't care about getting disk errors here. We need
		 * to purge this dquot anyway, so we go ahead regardless.
		 */
		error = xfs_qm_dqflush(dqp, &bp);
		if (error) {
			xfs_warn(mp, "%s: dquot %p flush failed",
				__func__, dqp);
		} else {
			error = xfs_bwrite(bp);
			xfs_buf_relse(bp);
		}
		xfs_dqflock(dqp);
	}

	ASSERT(atomic_read(&dqp->q_pincount) == 0);
	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));

	xfs_dqfunlock(dqp);
	xfs_dqunlock(dqp);

	radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
			  be32_to_cpu(dqp->q_core.d_id));
	qi->qi_dquots--;

	/*
	 * We move dquots to the freelist as soon as their reference count
	 * hits zero, so it really should be on the freelist here.
	 */
	mutex_lock(&qi->qi_lru_lock);
	ASSERT(!list_empty(&dqp->q_lru));
	list_del_init(&dqp->q_lru);
	qi->qi_lru_count--;
	XFS_STATS_DEC(xs_qm_dquot_unused);
	mutex_unlock(&qi->qi_lru_lock);

	xfs_qm_dqdestroy(dqp);

	if (gdqp)
		xfs_qm_dqput(gdqp);
	return 0;
}
Ejemplo n.º 20
0
/*
 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
 * a locked dquot, doing an allocation (if requested) as needed.
 * When both an inode and an id are given, the inode's id takes precedence.
 * That is, if the id changes while we don't hold the ilock inside this
 * function, the new dquot is returned, not necessarily the one requested
 * in the id argument.
 */
int
xfs_qm_dqget(
	xfs_mount_t	*mp,
	xfs_inode_t	*ip,	  /* locked inode (optional) */
	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
{
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
	struct xfs_dquot	*dqp;
	int			error;

	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
		return (ESRCH);
	}

#ifdef DEBUG
	if (xfs_do_dqerror) {
		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
			xfs_debug(mp, "Returning error in dqget");
			return (EIO);
		}
	}

	ASSERT(type == XFS_DQ_USER ||
	       type == XFS_DQ_PROJ ||
	       type == XFS_DQ_GROUP);
	if (ip) {
		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
		ASSERT(xfs_inode_dquot(ip, type) == NULL);
	}
#endif

restart:
	mutex_lock(&qi->qi_tree_lock);
	dqp = radix_tree_lookup(tree, id);
	if (dqp) {
		xfs_dqlock(dqp);
		if (dqp->dq_flags & XFS_DQ_FREEING) {
			xfs_dqunlock(dqp);
			mutex_unlock(&qi->qi_tree_lock);
			trace_xfs_dqget_freeing(dqp);
			delay(1);
			goto restart;
		}

		dqp->q_nrefs++;
		mutex_unlock(&qi->qi_tree_lock);

		trace_xfs_dqget_hit(dqp);
		XFS_STATS_INC(xs_qm_dqcachehits);
		*O_dqpp = dqp;
		return 0;
	}
	mutex_unlock(&qi->qi_tree_lock);
	XFS_STATS_INC(xs_qm_dqcachemisses);

	/*
	 * Dquot cache miss. We don't want to keep the inode lock across
	 * a (potential) disk read. Also we don't want to deal with the lock
	 * ordering between quotainode and this inode. OTOH, dropping the inode
	 * lock here means dealing with a chown that can happen before
	 * we re-acquire the lock.
	 */
	if (ip)
		xfs_iunlock(ip, XFS_ILOCK_EXCL);

	error = xfs_qm_dqread(mp, id, type, flags, &dqp);

	if (ip)
		xfs_ilock(ip, XFS_ILOCK_EXCL);

	if (error)
		return error;

	if (ip) {
		/*
		 * A dquot could be attached to this inode by now, since
		 * we had dropped the ilock.
		 */
		if (xfs_this_quota_on(mp, type)) {
			struct xfs_dquot	*dqp1;

			dqp1 = xfs_inode_dquot(ip, type);
			if (dqp1) {
				xfs_qm_dqdestroy(dqp);
				dqp = dqp1;
				xfs_dqlock(dqp);
				goto dqret;
			}
		} else {
			/* inode stays locked on return */
			xfs_qm_dqdestroy(dqp);
			return XFS_ERROR(ESRCH);
		}
	}

	mutex_lock(&qi->qi_tree_lock);
	error = -radix_tree_insert(tree, id, dqp);
	if (unlikely(error)) {
		WARN_ON(error != EEXIST);

		/*
		 * Duplicate found. Just throw away the new dquot and start
		 * over.
		 */
		mutex_unlock(&qi->qi_tree_lock);
		trace_xfs_dqget_dup(dqp);
		xfs_qm_dqdestroy(dqp);
		XFS_STATS_INC(xs_qm_dquot_dups);
		goto restart;
	}

	/*
	 * We return a locked dquot to the caller, with a reference taken
	 */
	xfs_dqlock(dqp);
	dqp->q_nrefs = 1;

	qi->qi_dquots++;
	mutex_unlock(&qi->qi_tree_lock);

 dqret:
	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
	trace_xfs_dqget_miss(dqp);
	*O_dqpp = dqp;
	return (0);
}
Ejemplo n.º 21
0
/*
 * This reserves disk blocks and inodes against a dquot.
 * Flags indicate if the dquot is to be locked here and also
 * if the blk reservation is for RT or regular blocks.
 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
 * Returns EDQUOT if quota is exceeded.
 */
STATIC int
xfs_trans_dqresv(
	xfs_trans_t	*tp,
	xfs_dquot_t	*dqp,
	long		nblks,
	long		ninos,
	uint		flags)
{
	int		error;
	xfs_qcnt_t	hardlimit;
	xfs_qcnt_t	softlimit;
	time_t		btimer;
	xfs_qcnt_t	*resbcountp;

	if (! (flags & XFS_QMOPT_DQLOCK)) {
		xfs_dqlock(dqp);
	}
	ASSERT(XFS_DQ_IS_LOCKED(dqp));
	if (flags & XFS_TRANS_DQ_RES_BLKS) {
		hardlimit = INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT);
		softlimit = INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT);
		btimer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT);
		resbcountp = &dqp->q_res_bcount;
	} else {
		ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
		hardlimit = INT_GET(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT);
		softlimit = INT_GET(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT);
		btimer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT);
		resbcountp = &dqp->q_res_rtbcount;
	}
	error = 0;

	if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
	    !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT) &&
	    XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) {
#ifdef QUOTADEBUG
		printk("BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?\n",
			nblks, *resbcountp, hardlimit);
#endif
		if (nblks > 0) {
			/*
			 * dquot is locked already. See if we'd go over the
			 * hardlimit or exceed the timelimit if we allocate
			 * nblks.
			 */
			if (hardlimit > 0ULL &&
			     (hardlimit <= nblks + *resbcountp)) {
				error = EDQUOT;
				goto error_return;
			}

			if (softlimit > 0ULL &&
			     (softlimit <= nblks + *resbcountp)) {
				/*
				 * If timer or warnings has expired,
				 * return EDQUOT
				 */
				if ((btimer != 0 && CURRENT_TIME > btimer) ||
				    (!INT_ISZERO(dqp->q_core.d_bwarns, ARCH_CONVERT) &&
				     INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) >=
				     XFS_QI_BWARNLIMIT(dqp->q_mount))) {
					error = EDQUOT;
					goto error_return;
				}
			}
		}
		if (ninos > 0) {
			if (INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT) > 0ULL &&
			    INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >=
			    INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT)) {
				error = EDQUOT;
				goto error_return;
			} else if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) > 0ULL &&
				   INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >=
				   INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) {
				/*
				 * If timer or warnings has expired,
				 * return EDQUOT
				 */
				if ((!INT_ISZERO(dqp->q_core.d_itimer, ARCH_CONVERT) &&
				     CURRENT_TIME > INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)) ||
				    (!INT_ISZERO(dqp->q_core.d_iwarns, ARCH_CONVERT) &&
				     INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) >=
				     XFS_QI_IWARNLIMIT(dqp->q_mount))) {
					error = EDQUOT;
					goto error_return;
				}
			}
		}
	}

	/*
	 * Change the reservation, but not the actual usage.
	 * Note that q_res_bcount = q_core.d_bcount + resv
	 */
	(*resbcountp) += (xfs_qcnt_t)nblks;
	if (ninos != 0)
		dqp->q_res_icount += (xfs_qcnt_t)ninos;

	/*
	 * note the reservation amt in the trans struct too,
	 * so that the transaction knows how much was reserved by
	 * it against this particular dquot.
	 * We don't do this when we are reserving for a delayed allocation,
	 * because we don't have the luxury of a transaction envelope then.
	 */
	if (tp) {
		ASSERT(tp->t_dqinfo);
		ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
		if (nblks != 0)
			xfs_trans_mod_dquot(tp, dqp,
					    flags & XFS_QMOPT_RESBLK_MASK,
					    nblks);
		if (ninos != 0)
			xfs_trans_mod_dquot(tp, dqp,
					    XFS_TRANS_DQ_RES_INOS,
					    ninos);
	}
	ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
	ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
	ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));

error_return:
	if (! (flags & XFS_QMOPT_DQLOCK)) {
		xfs_dqunlock(dqp);
	}
	return (error);
}
Ejemplo n.º 22
0
/*
 * This reserves disk blocks and inodes against a dquot.
 * Flags indicate if the dquot is to be locked here and also
 * if the blk reservation is for RT or regular blocks.
 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
 */
STATIC int
xfs_trans_dqresv(
    xfs_trans_t	*tp,
    xfs_mount_t	*mp,
    xfs_dquot_t	*dqp,
    long		nblks,
    long		ninos,
    uint		flags)
{
    xfs_qcnt_t	hardlimit;
    xfs_qcnt_t	softlimit;
    time_t		timer;
    xfs_qwarncnt_t	warns;
    xfs_qwarncnt_t	warnlimit;
    xfs_qcnt_t	total_count;
    xfs_qcnt_t	*resbcountp;
    xfs_quotainfo_t	*q = mp->m_quotainfo;
    struct xfs_def_quota	*defq;


    xfs_dqlock(dqp);

    defq = xfs_get_defquota(dqp, q);

    if (flags & XFS_TRANS_DQ_RES_BLKS) {
        hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
        if (!hardlimit)
            hardlimit = defq->bhardlimit;
        softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
        if (!softlimit)
            softlimit = defq->bsoftlimit;
        timer = be32_to_cpu(dqp->q_core.d_btimer);
        warns = be16_to_cpu(dqp->q_core.d_bwarns);
        warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
        resbcountp = &dqp->q_res_bcount;
    } else {
        ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
        hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
        if (!hardlimit)
            hardlimit = defq->rtbhardlimit;
        softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
        if (!softlimit)
            softlimit = defq->rtbsoftlimit;
        timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
        warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
        warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
        resbcountp = &dqp->q_res_rtbcount;
    }

    if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
            dqp->q_core.d_id &&
            ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
             (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) ||
             (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) {
        if (nblks > 0) {
            /*
             * dquot is locked already. See if we'd go over the
             * hardlimit or exceed the timelimit if we allocate
             * nblks.
             */
            total_count = *resbcountp + nblks;
            if (hardlimit && total_count > hardlimit) {
                xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
                goto error_return;
            }
            if (softlimit && total_count > softlimit) {
                if ((timer != 0 && get_seconds() > timer) ||
                        (warns != 0 && warns >= warnlimit)) {
                    xfs_quota_warn(mp, dqp,
                                   QUOTA_NL_BSOFTLONGWARN);
                    goto error_return;
                }

                xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
            }
        }
        if (ninos > 0) {
            total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
            timer = be32_to_cpu(dqp->q_core.d_itimer);
            warns = be16_to_cpu(dqp->q_core.d_iwarns);
            warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
            hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
            if (!hardlimit)
                hardlimit = defq->ihardlimit;
            softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
            if (!softlimit)
                softlimit = defq->isoftlimit;

            if (hardlimit && total_count > hardlimit) {
                xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
                goto error_return;
            }
            if (softlimit && total_count > softlimit) {
                if  ((timer != 0 && get_seconds() > timer) ||
                        (warns != 0 && warns >= warnlimit)) {
                    xfs_quota_warn(mp, dqp,
                                   QUOTA_NL_ISOFTLONGWARN);
                    goto error_return;
                }
                xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
            }
        }
    }

    /*
     * Change the reservation, but not the actual usage.
     * Note that q_res_bcount = q_core.d_bcount + resv
     */
    (*resbcountp) += (xfs_qcnt_t)nblks;
    if (ninos != 0)
        dqp->q_res_icount += (xfs_qcnt_t)ninos;

    /*
     * note the reservation amt in the trans struct too,
     * so that the transaction knows how much was reserved by
     * it against this particular dquot.
     * We don't do this when we are reserving for a delayed allocation,
     * because we don't have the luxury of a transaction envelope then.
     */
    if (tp) {
        ASSERT(tp->t_dqinfo);
        ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
        if (nblks != 0)
            xfs_trans_mod_dquot(tp, dqp,
                                flags & XFS_QMOPT_RESBLK_MASK,
                                nblks);
        if (ninos != 0)
            xfs_trans_mod_dquot(tp, dqp,
                                XFS_TRANS_DQ_RES_INOS,
                                ninos);
    }
    ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
    ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
    ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));

    xfs_dqunlock(dqp);
    return 0;

error_return:
    xfs_dqunlock(dqp);
    if (flags & XFS_QMOPT_ENOSPC)
        return -ENOSPC;
    return -EDQUOT;
}
/*
 * Adjust quota limits, and start/stop timers accordingly.
 */
int
xfs_qm_scall_setqlim(
    struct xfs_mount	*mp,
    xfs_dqid_t		id,
    uint			type,
    struct qc_dqblk		*newlim)
{
    struct xfs_quotainfo	*q = mp->m_quotainfo;
    struct xfs_disk_dquot	*ddq;
    struct xfs_dquot	*dqp;
    struct xfs_trans	*tp;
    int			error;
    xfs_qcnt_t		hard, soft;

    if (newlim->d_fieldmask & ~XFS_QC_MASK)
        return EINVAL;
    if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
        return 0;

    /*
     * We don't want to race with a quotaoff so take the quotaoff lock.
     * We don't hold an inode lock, so there's nothing else to stop
     * a quotaoff from happening.
     */
    mutex_lock(&q->qi_quotaofflock);

    /*
     * Get the dquot (locked) before we start, as we need to do a
     * transaction to allocate it if it doesn't exist. Once we have the
     * dquot, unlock it so we can start the next transaction safely. We hold
     * a reference to the dquot, so it's safe to do this unlock/lock without
     * it being reclaimed in the mean time.
     */
    error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp);
    if (error) {
        ASSERT(error != ENOENT);
        goto out_unlock;
    }
    xfs_dqunlock(dqp);

    tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
    error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);
    if (error) {
        xfs_trans_cancel(tp, 0);
        goto out_rele;
    }

    xfs_dqlock(dqp);
    xfs_trans_dqjoin(tp, dqp);
    ddq = &dqp->q_core;

    /*
     * Make sure that hardlimits are >= soft limits before changing.
     */
    hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
           be64_to_cpu(ddq->d_blk_hardlimit);
    soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
           be64_to_cpu(ddq->d_blk_softlimit);
    if (hard == 0 || hard >= soft) {
        ddq->d_blk_hardlimit = cpu_to_be64(hard);
        ddq->d_blk_softlimit = cpu_to_be64(soft);
        xfs_dquot_set_prealloc_limits(dqp);
        if (id == 0) {
            q->qi_bhardlimit = hard;
            q->qi_bsoftlimit = soft;
        }
    } else {
        xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
    }
    hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
           be64_to_cpu(ddq->d_rtb_hardlimit);
    soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
           (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
           be64_to_cpu(ddq->d_rtb_softlimit);
    if (hard == 0 || hard >= soft) {
        ddq->d_rtb_hardlimit = cpu_to_be64(hard);
        ddq->d_rtb_softlimit = cpu_to_be64(soft);
        if (id == 0) {
            q->qi_rtbhardlimit = hard;
            q->qi_rtbsoftlimit = soft;
        }
    } else {
        xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
    }

    hard = (newlim->d_fieldmask & QC_INO_HARD) ?
           (xfs_qcnt_t) newlim->d_ino_hardlimit :
           be64_to_cpu(ddq->d_ino_hardlimit);
    soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
           (xfs_qcnt_t) newlim->d_ino_softlimit :
           be64_to_cpu(ddq->d_ino_softlimit);
    if (hard == 0 || hard >= soft) {
        ddq->d_ino_hardlimit = cpu_to_be64(hard);
        ddq->d_ino_softlimit = cpu_to_be64(soft);
        if (id == 0) {
            q->qi_ihardlimit = hard;
            q->qi_isoftlimit = soft;
        }
    } else {
        xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
    }

    /*
     * Update warnings counter(s) if requested
     */
    if (newlim->d_fieldmask & QC_SPC_WARNS)
        ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
    if (newlim->d_fieldmask & QC_INO_WARNS)
        ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
    if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
        ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);

    if (id == 0) {
        /*
         * Timelimits for the super user set the relative time
         * the other users can be over quota for this file system.
         * If it is zero a default is used.  Ditto for the default
         * soft and hard limit values (already done, above), and
         * for warnings.
         */
        if (newlim->d_fieldmask & QC_SPC_TIMER) {
            q->qi_btimelimit = newlim->d_spc_timer;
            ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
        }
        if (newlim->d_fieldmask & QC_INO_TIMER) {
            q->qi_itimelimit = newlim->d_ino_timer;
            ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
        }
        if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
            q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
            ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
        }
        if (newlim->d_fieldmask & QC_SPC_WARNS)
            q->qi_bwarnlimit = newlim->d_spc_warns;
        if (newlim->d_fieldmask & QC_INO_WARNS)
            q->qi_iwarnlimit = newlim->d_ino_warns;
        if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
            q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
    } else {
        /*
         * If the user is now over quota, start the timelimit.
         * The user will not be 'warned'.
         * Note that we keep the timers ticking, whether enforcement
         * is on or off. We don't really want to bother with iterating
         * over all ondisk dquots and turning the timers on/off.
         */
        xfs_qm_adjust_dqtimers(mp, ddq);
    }
    dqp->dq_flags |= XFS_DQ_DIRTY;
    xfs_trans_log_dquot(tp, dqp);

    error = xfs_trans_commit(tp, 0);

out_rele:
    xfs_qm_dqrele(dqp);
out_unlock:
    mutex_unlock(&q->qi_quotaofflock);
    return error;
}
/*
 * Lookup a dquot in the incore dquot hashtable. We keep two separate
 * hashtables for user and group dquots; and, these are global tables
 * inside the XQM, not per-filesystem tables.
 * The hash chain must be locked by caller, and it is left locked
 * on return. Returning dquot is locked.
 */
STATIC int
xfs_qm_dqlookup(
	xfs_mount_t		*mp,
	xfs_dqid_t		id,
	xfs_dqhash_t		*qh,
	xfs_dquot_t		**O_dqpp)
{
	xfs_dquot_t		*dqp;
	uint			flist_locked;
	xfs_dquot_t		*d;

	ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));

	flist_locked = B_FALSE;

	/*
	 * Traverse the hashchain looking for a match
	 */
	for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) {
		/*
		 * We already have the hashlock. We don't need the
		 * dqlock to look at the id field of the dquot, since the
		 * id can't be modified without the hashlock anyway.
		 */
		if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
			xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP");
			/*
			 * All in core dquots must be on the dqlist of mp
			 */
			ASSERT(dqp->MPL_PREVP != NULL);

			xfs_dqlock(dqp);
			if (dqp->q_nrefs == 0) {
				ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
				if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
					xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT");

					/*
					 * We may have raced with dqreclaim_one()
					 * (and lost). So, flag that we don't
					 * want the dquot to be reclaimed.
					 */
					dqp->dq_flags |= XFS_DQ_WANT;
					xfs_dqunlock(dqp);
					xfs_qm_freelist_lock(xfs_Gqm);
					xfs_dqlock(dqp);
					dqp->dq_flags &= ~(XFS_DQ_WANT);
				}
				flist_locked = B_TRUE;
			}

			/*
			 * id couldn't have changed; we had the hashlock all
			 * along
			 */
			ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);

			if (flist_locked) {
				if (dqp->q_nrefs != 0) {
					xfs_qm_freelist_unlock(xfs_Gqm);
					flist_locked = B_FALSE;
				} else {
					/*
					 * take it off the freelist
					 */
					xfs_dqtrace_entry(dqp,
							"DQLOOKUP: TAKEOFF FL");
					XQM_FREELIST_REMOVE(dqp);
					/* xfs_qm_freelist_print(&(xfs_Gqm->
							qm_dqfreelist),
							"after removal"); */
				}
			}

			/*
			 * grab a reference
			 */
			XFS_DQHOLD(dqp);

			if (flist_locked)
				xfs_qm_freelist_unlock(xfs_Gqm);
			/*
			 * move the dquot to the front of the hashchain
			 */
			ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
			if (dqp->HL_PREVP != &qh->qh_next) {
				xfs_dqtrace_entry(dqp,
						  "DQLOOKUP: HASH MOVETOFRONT");
				if ((d = dqp->HL_NEXT))
					d->HL_PREVP = dqp->HL_PREVP;
				*(dqp->HL_PREVP) = d;
				d = qh->qh_next;
				d->HL_PREVP = &dqp->HL_NEXT;
				dqp->HL_NEXT = d;
				dqp->HL_PREVP = &qh->qh_next;
				qh->qh_next = dqp;
			}
			xfs_dqtrace_entry(dqp, "LOOKUP END");
			*O_dqpp = dqp;
			ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
			return (0);
		}
	}

	*O_dqpp = NULL;
	ASSERT(XFS_DQ_IS_HASH_LOCKED(qh));
	return (1);
}
/* ARGSUSED */
int
xfs_qm_dqpurge(
	xfs_dquot_t	*dqp,
	uint		flags)
{
	xfs_dqhash_t	*thishash;
	xfs_mount_t	*mp;

	mp = dqp->q_mount;

	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
	ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash));

	xfs_dqlock(dqp);
	/*
	 * We really can't afford to purge a dquot that is
	 * referenced, because these are hard refs.
	 * It shouldn't happen in general because we went thru _all_ inodes in
	 * dqrele_all_inodes before calling this and didn't let the mountlock go.
	 * However it is possible that we have dquots with temporary
	 * references that are not attached to an inode. e.g. see xfs_setattr().
	 */
	if (dqp->q_nrefs != 0) {
		xfs_dqunlock(dqp);
		XFS_DQ_HASH_UNLOCK(dqp->q_hash);
		return (1);
	}

	ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));

	/*
	 * If we're turning off quotas, we have to make sure that, for
	 * example, we don't delete quota disk blocks while dquots are
	 * in the process of getting written to those disk blocks.
	 * This dquot might well be on AIL, and we can't leave it there
	 * if we're turning off quotas. Basically, we need this flush
	 * lock, and are willing to block on it.
	 */
	if (! xfs_qm_dqflock_nowait(dqp)) {
		/*
		 * Block on the flush lock after nudging dquot buffer,
		 * if it is incore.
		 */
		xfs_qm_dqflock_pushbuf_wait(dqp);
	}

	/*
	 * XXXIf we're turning this type of quotas off, we don't care
	 * about the dirty metadata sitting in this dquot. OTOH, if
	 * we're unmounting, we do care, so we flush it and wait.
	 */
	if (XFS_DQ_IS_DIRTY(dqp)) {
		xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY");
		/* dqflush unlocks dqflock */
		/*
		 * Given that dqpurge is a very rare occurrence, it is OK
		 * that we're holding the hashlist and mplist locks
		 * across the disk write. But, ... XXXsup
		 *
		 * We don't care about getting disk errors here. We need
		 * to purge this dquot anyway, so we go ahead regardless.
		 */
		(void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);
		xfs_dqflock(dqp);
	}
	ASSERT(dqp->q_pincount == 0);
	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));

	thishash = dqp->q_hash;
	XQM_HASHLIST_REMOVE(thishash, dqp);
	XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp);
	/*
	 * XXX Move this to the front of the freelist, if we can get the
	 * freelist lock.
	 */
	ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));

	dqp->q_mount = NULL;
	dqp->q_hash = NULL;
	dqp->dq_flags = XFS_DQ_INACTIVE;
	memset(&dqp->q_core, 0, sizeof(dqp->q_core));
	xfs_dqfunlock(dqp);
	xfs_dqunlock(dqp);
	XFS_DQ_HASH_UNLOCK(thishash);
	return (0);
}
/*
 * Release a reference to the dquot (decrement ref-count)
 * and unlock it. If there is a group quota attached to this
 * dquot, carefully release that too without tripping over
 * deadlocks'n'stuff.
 */
void
xfs_qm_dqput(
	xfs_dquot_t	*dqp)
{
	xfs_dquot_t	*gdqp;

	ASSERT(dqp->q_nrefs > 0);
	ASSERT(XFS_DQ_IS_LOCKED(dqp));
	xfs_dqtrace_entry(dqp, "DQPUT");

	if (dqp->q_nrefs != 1) {
		dqp->q_nrefs--;
		xfs_dqunlock(dqp);
		return;
	}

	/*
	 * drop the dqlock and acquire the freelist and dqlock
	 * in the right order; but try to get it out-of-order first
	 */
	if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
		xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT");
		xfs_dqunlock(dqp);
		xfs_qm_freelist_lock(xfs_Gqm);
		xfs_dqlock(dqp);
	}

	while (1) {
		gdqp = NULL;

		/* We can't depend on nrefs being == 1 here */
		if (--dqp->q_nrefs == 0) {
			xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST");
			/*
			 * insert at end of the freelist.
			 */
			XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp);

			/*
			 * If we just added a udquot to the freelist, then
			 * we want to release the gdquot reference that
			 * it (probably) has. Otherwise it'll keep the
			 * gdquot from getting reclaimed.
			 */
			if ((gdqp = dqp->q_gdquot)) {
				/*
				 * Avoid a recursive dqput call
				 */
				xfs_dqlock(gdqp);
				dqp->q_gdquot = NULL;
			}

			/* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist),
			   "@@@@@++ Free list (after append) @@@@@+");
			   */
		}
		xfs_dqunlock(dqp);

		/*
		 * If we had a group quota inside the user quota as a hint,
		 * release it now.
		 */
		if (! gdqp)
			break;
		dqp = gdqp;
	}
	xfs_qm_freelist_unlock(xfs_Gqm);
}
Ejemplo n.º 27
0
/*
 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
 * the dquot is locked by us, but the flush lock isn't. So, here we are
 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
 * If so, we want to push it out to help us take this item off the AIL as soon
 * as possible.
 *
 * We must not be holding the AIL lock at this point. Calling incore() to
 * search the buffer cache can be a time consuming thing, and AIL lock is a
 * spinlock.
 */
STATIC void
xfs_qm_dquot_logitem_pushbuf(
	xfs_dq_logitem_t    *qip)
{
	xfs_dquot_t	*dqp;
	xfs_mount_t	*mp;
	xfs_buf_t	*bp;
	uint		dopush;

	dqp = qip->qli_dquot;
	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	/*
	 * The qli_pushbuf_flag keeps others from
	 * trying to duplicate our effort.
	 */
	ASSERT(qip->qli_pushbuf_flag != 0);
	ASSERT(qip->qli_push_owner == current_pid());

	/*
	 * If flushlock isn't locked anymore, chances are that the
	 * inode flush completed and the inode was taken off the AIL.
	 * So, just get out.
	 */
	if (!issemalocked(&(dqp->q_flock))  ||
	    ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
		qip->qli_pushbuf_flag = 0;
		xfs_dqunlock(dqp);
		return;
	}
	mp = dqp->q_mount;
	bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
		    XFS_QI_DQCHUNKLEN(mp),
		    XFS_INCORE_TRYLOCK);
	if (bp != NULL) {
		if (XFS_BUF_ISDELAYWRITE(bp)) {
			dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
				  issemalocked(&(dqp->q_flock)));
			qip->qli_pushbuf_flag = 0;
			xfs_dqunlock(dqp);

			if (XFS_BUF_ISPINNED(bp)) {
				xfs_log_force(mp, (xfs_lsn_t)0,
					      XFS_LOG_FORCE);
			}
			if (dopush) {
				int	error;
#ifdef XFSRACEDEBUG
				delay_for_intr();
				delay(300);
#endif
				error = xfs_bawrite(mp, bp);
				if (error)
					xfs_fs_cmn_err(CE_WARN, mp,
	"xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
							error, qip, bp);
			} else {
				xfs_buf_relse(bp);
			}
		} else {
			qip->qli_pushbuf_flag = 0;
			xfs_dqunlock(dqp);
			xfs_buf_relse(bp);
		}
		return;
	}

	qip->qli_pushbuf_flag = 0;
	xfs_dqunlock(dqp);
}