int
xfs_qm_scall_getquota(
	xfs_mount_t	*mp,
	xfs_dqid_t	id,
	uint		type,
	fs_disk_quota_t *out)
{
	xfs_dquot_t	*dqp;
	int		error;

	/*
	 * Try to get the dquot. We don't want it allocated on disk, so
	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
	 * exist, we'll get ENOENT back.
	 */
	if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
		return (error);
	}

	/*
	 * If everything's NULL, this dquot doesn't quite exist as far as
	 * our utility programs are concerned.
	 */
	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
		xfs_qm_dqput(dqp);
		return XFS_ERROR(ENOENT);
	}
	/* xfs_qm_dqprint(dqp); */
	/*
	 * Convert the disk dquot to the exportable format
	 */
	xfs_qm_export_dquot(mp, &dqp->q_core, out);
	xfs_qm_dqput(dqp);
	return (error ? XFS_ERROR(EFAULT) : 0);
}
Example #2
0
/*
 * Starting at @id and progressing upwards, look for an initialized incore
 * dquot, lock it, and return it.
 */
int
xfs_qm_dqget_next(
	struct xfs_mount	*mp,
	xfs_dqid_t		id,
	uint			type,
	struct xfs_dquot	**dqpp)
{
	struct xfs_dquot	*dqp;
	int			error = 0;

	*dqpp = NULL;
	for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
		error = xfs_qm_dqget(mp, id, type, false, &dqp);
		if (error == -ENOENT)
			continue;
		else if (error != 0)
			break;

		if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
			*dqpp = dqp;
			return 0;
		}

		xfs_qm_dqput(dqp);
	}

	return error;
}
Example #3
0
/*
 * Iterate every dquot of a particular type.  The caller must ensure that the
 * particular quota type is active.  iter_fn can return negative error codes,
 * or XFS_BTREE_QUERY_RANGE_ABORT to indicate that it wants to stop iterating.
 */
int
xfs_qm_dqiterate(
	struct xfs_mount	*mp,
	uint			dqtype,
	xfs_qm_dqiterate_fn	iter_fn,
	void			*priv)
{
	struct xfs_dquot	*dq;
	xfs_dqid_t		id = 0;
	int			error;

	do {
		error = xfs_qm_dqget_next(mp, id, dqtype, &dq);
		if (error == -ENOENT)
			return 0;
		if (error)
			return error;

		error = iter_fn(dq, dqtype, priv);
		id = be32_to_cpu(dq->q_core.d_id);
		xfs_qm_dqput(dq);
		id++;
	} while (error == 0 && id != 0);

	return error;
}
STATIC void
xfs_qm_dqput_final(
	struct xfs_dquot	*dqp)
{
	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
	struct xfs_dquot	*gdqp;

	trace_xfs_dqput_free(dqp);

	mutex_lock(&qi->qi_lru_lock);
	if (list_empty(&dqp->q_lru)) {
		list_add_tail(&dqp->q_lru, &qi->qi_lru_list);
		qi->qi_lru_count++;
		XFS_STATS_INC(xs_qm_dquot_unused);
	}
	mutex_unlock(&qi->qi_lru_lock);

	/*
	 * If we just added a udquot to the freelist, then we want to release
	 * the gdquot reference that it (probably) has. Otherwise it'll keep
	 * the gdquot from getting reclaimed.
	 */
	gdqp = dqp->q_gdquot;
	if (gdqp) {
		xfs_dqlock(gdqp);
		dqp->q_gdquot = NULL;
	}
	xfs_dqunlock(dqp);

	/*
	 * If we had a group quota hint, release it now.
	 */
	if (gdqp)
		xfs_qm_dqput(gdqp);
}
Example #5
0
/*
 * Called by dqusage_adjust in doing a quotacheck.
 *
 * Given the inode, and a dquot id this updates both the incore dqout as well
 * as the buffer copy. This is so that once the quotacheck is done, we can
 * just log all the buffers, as opposed to logging numerous updates to
 * individual dquots.
 */
STATIC int
xfs_qm_quotacheck_dqadjust(
	struct xfs_inode	*ip,
	xfs_dqid_t		id,
	uint			type,
	xfs_qcnt_t		nblks,
	xfs_qcnt_t		rtblks)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_dquot	*dqp;
	int			error;

	error = xfs_qm_dqget(mp, ip, id, type,
			     XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
	if (error) {
		/*
		 * Shouldn't be able to turn off quotas here.
		 */
		ASSERT(error != ESRCH);
		ASSERT(error != ENOENT);
		return error;
	}

	trace_xfs_dqadjust(dqp);

	/*
	 * Adjust the inode count and the block count to reflect this inode's
	 * resource usage.
	 */
	be64_add_cpu(&dqp->q_core.d_icount, 1);
	dqp->q_res_icount++;
	if (nblks) {
		be64_add_cpu(&dqp->q_core.d_bcount, nblks);
		dqp->q_res_bcount += nblks;
	}
	if (rtblks) {
		be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
		dqp->q_res_rtbcount += rtblks;
	}

	/*
	 * Set default limits, adjust timers (since we changed usages)
	 *
	 * There are no timers for the default values set in the root dquot.
	 */
	if (dqp->q_core.d_id) {
		xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
		xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
	}

	dqp->dq_flags |= XFS_DQ_DIRTY;
	xfs_qm_dqput(dqp);
	return 0;
}
/*
 * Directory tree accounting is implemented using project quotas, where
 * the project identifier is inherited from parent directories.
 * A statvfs (df, etc.) of a directory that is using project quota should
 * return a statvfs of the project, not the entire filesystem.
 * This makes such trees appear as if they are filesystems in themselves.
 */
void
xfs_qm_statvfs(
	xfs_inode_t		*ip,
	struct kstatfs		*statp)
{
	xfs_mount_t		*mp = ip->i_mount;
	xfs_dquot_t		*dqp;

	if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) {
		xfs_fill_statvfs_from_dquot(statp, dqp);
		xfs_qm_dqput(dqp);
	}
}
Example #7
0
STATIC void
xfs_qm_dqput_final(
	struct xfs_dquot	*dqp)
{
	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
	struct xfs_dquot	*gdqp;
	struct xfs_dquot	*pdqp;

	trace_xfs_dqput_free(dqp);

	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
		XFS_STATS_INC(xs_qm_dquot_unused);

	/*
	 * If we just added a udquot to the freelist, then we want to release
	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
	 * keep the gdquot/pdquot from getting reclaimed.
	 */
	gdqp = dqp->q_gdquot;
	if (gdqp) {
		xfs_dqlock(gdqp);
		dqp->q_gdquot = NULL;
	}

	pdqp = dqp->q_pdquot;
	if (pdqp) {
		xfs_dqlock(pdqp);
		dqp->q_pdquot = NULL;
	}
	xfs_dqunlock(dqp);

	/*
	 * If we had a group/project quota hint, release it now.
	 */
	if (gdqp)
		xfs_qm_dqput(gdqp);
	if (pdqp)
		xfs_qm_dqput(pdqp);
}
Example #8
0
STATIC void
xfs_dqtest_cmp(
	xfs_dqtest_t	*d)
{
	xfs_dquot_t	*dqp;
	int		error;

	/* xfs_qm_dqtest_print(d); */
	if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0,
				 &dqp))) {
		xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error);
		return;
	}
	xfs_dqtest_cmp2(d, dqp);
	xfs_qm_dqput(dqp);
}
/*
 * Release a dquot. Flush it if dirty, then dqput() it.
 * dquot must not be locked.
 */
void
xfs_qm_dqrele(
	xfs_dquot_t	*dqp)
{
	ASSERT(dqp);
	xfs_dqtrace_entry(dqp, "DQRELE");

	xfs_dqlock(dqp);
	/*
	 * We don't care to flush it if the dquot is dirty here.
	 * That will create stutters that we want to avoid.
	 * Instead we do a delayed write when we try to reclaim
	 * a dirty dquot. Also xfs_sync will take part of the burden...
	 */
	xfs_qm_dqput(dqp);
}
Example #10
0
/*
 * Purge a dquot from all tracking data structures and free it.
 */
STATIC int
xfs_qm_dqpurge(
	struct xfs_dquot	*dqp,
	void			*data)
{
	struct xfs_mount	*mp = dqp->q_mount;
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	struct xfs_dquot	*gdqp = NULL;

	xfs_dqlock(dqp);
	if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
		xfs_dqunlock(dqp);
		return EAGAIN;
	}

	/*
	 * If this quota has a group hint attached, prepare for releasing it
	 * now.
	 */
	gdqp = dqp->q_gdquot;
	if (gdqp) {
		xfs_dqlock(gdqp);
		dqp->q_gdquot = NULL;
	}

	dqp->dq_flags |= XFS_DQ_FREEING;

	xfs_dqflock(dqp);

	/*
	 * If we are turning this type of quotas off, we don't care
	 * about the dirty metadata sitting in this dquot. OTOH, if
	 * we're unmounting, we do care, so we flush it and wait.
	 */
	if (XFS_DQ_IS_DIRTY(dqp)) {
		struct xfs_buf	*bp = NULL;
		int		error;

		/*
		 * We don't care about getting disk errors here. We need
		 * to purge this dquot anyway, so we go ahead regardless.
		 */
		error = xfs_qm_dqflush(dqp, &bp);
		if (error) {
			xfs_warn(mp, "%s: dquot %p flush failed",
				__func__, dqp);
		} else {
			error = xfs_bwrite(bp);
			xfs_buf_relse(bp);
		}
		xfs_dqflock(dqp);
	}

	ASSERT(atomic_read(&dqp->q_pincount) == 0);
	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
	       !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));

	xfs_dqfunlock(dqp);
	xfs_dqunlock(dqp);

	radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
			  be32_to_cpu(dqp->q_core.d_id));
	qi->qi_dquots--;

	/*
	 * We move dquots to the freelist as soon as their reference count
	 * hits zero, so it really should be on the freelist here.
	 */
	mutex_lock(&qi->qi_lru_lock);
	ASSERT(!list_empty(&dqp->q_lru));
	list_del_init(&dqp->q_lru);
	qi->qi_lru_count--;
	XFS_STATS_DEC(xs_qm_dquot_unused);
	mutex_unlock(&qi->qi_lru_lock);

	xfs_qm_dqdestroy(dqp);

	if (gdqp)
		xfs_qm_dqput(gdqp);
	return 0;
}
/*
 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
 * a locked dquot, doing an allocation (if requested) as needed.
 * When both an inode and an id are given, the inode's id takes precedence.
 * That is, if the id changes while we don't hold the ilock inside this
 * function, the new dquot is returned, not necessarily the one requested
 * in the id argument.
 */
int
xfs_qm_dqget(
	xfs_mount_t	*mp,
	xfs_inode_t	*ip,	  /* locked inode (optional) */
	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
{
	xfs_dquot_t	*dqp;
	xfs_dqhash_t	*h;
	uint		version;
	int		error;

	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
		return (ESRCH);
	}
	h = XFS_DQ_HASH(mp, id, type);

#ifdef DEBUG
	if (xfs_do_dqerror) {
		if ((xfs_dqerror_target == mp->m_ddev_targp) &&
		    (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
			cmn_err(CE_DEBUG, "Returning error in dqget");
			return (EIO);
		}
	}
#endif

 again:

#ifdef DEBUG
	ASSERT(type == XFS_DQ_USER ||
	       type == XFS_DQ_PROJ ||
	       type == XFS_DQ_GROUP);
	if (ip) {
		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
		if (type == XFS_DQ_USER)
			ASSERT(ip->i_udquot == NULL);
		else
			ASSERT(ip->i_gdquot == NULL);
	}
#endif
	XFS_DQ_HASH_LOCK(h);

	/*
	 * Look in the cache (hashtable).
	 * The chain is kept locked during lookup.
	 */
	if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) {
		XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
		/*
		 * The dquot was found, moved to the front of the chain,
		 * taken off the freelist if it was on it, and locked
		 * at this point. Just unlock the hashchain and return.
		 */
		ASSERT(*O_dqpp);
		ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
		XFS_DQ_HASH_UNLOCK(h);
		xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");
		return (0);	/* success */
	}
	XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);

	/*
	 * Dquot cache miss. We don't want to keep the inode lock across
	 * a (potential) disk read. Also we don't want to deal with the lock
	 * ordering between quotainode and this inode. OTOH, dropping the inode
	 * lock here means dealing with a chown that can happen before
	 * we re-acquire the lock.
	 */
	if (ip)
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
	/*
	 * Save the hashchain version stamp, and unlock the chain, so that
	 * we don't keep the lock across a disk read
	 */
	version = h->qh_version;
	XFS_DQ_HASH_UNLOCK(h);

	/*
	 * Allocate the dquot on the kernel heap, and read the ondisk
	 * portion off the disk. Also, do all the necessary initialization
	 * This can return ENOENT if dquot didn't exist on disk and we didn't
	 * ask it to allocate; ESRCH if quotas got turned off suddenly.
	 */
	if ((error = xfs_qm_idtodq(mp, id, type,
				  flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR|
					   XFS_QMOPT_DOWARN),
				  &dqp))) {
		if (ip)
			xfs_ilock(ip, XFS_ILOCK_EXCL);
		return (error);
	}

	/*
	 * See if this is mount code calling to look at the overall quota limits
	 * which are stored in the id == 0 user or group's dquot.
	 * Since we may not have done a quotacheck by this point, just return
	 * the dquot without attaching it to any hashtables, lists, etc, or even
	 * taking a reference.
	 * The caller must dqdestroy this once done.
	 */
	if (flags & XFS_QMOPT_DQSUSER) {
		ASSERT(id == 0);
		ASSERT(! ip);
		goto dqret;
	}

	/*
	 * Dquot lock comes after hashlock in the lock ordering
	 */
	if (ip) {
		xfs_ilock(ip, XFS_ILOCK_EXCL);
		if (! XFS_IS_DQTYPE_ON(mp, type)) {
			/* inode stays locked on return */
			xfs_qm_dqdestroy(dqp);
			return XFS_ERROR(ESRCH);
		}
		/*
		 * A dquot could be attached to this inode by now, since
		 * we had dropped the ilock.
		 */
		if (type == XFS_DQ_USER) {
			if (ip->i_udquot) {
				xfs_qm_dqdestroy(dqp);
				dqp = ip->i_udquot;
				xfs_dqlock(dqp);
				goto dqret;
			}
		} else {
			if (ip->i_gdquot) {
				xfs_qm_dqdestroy(dqp);
				dqp = ip->i_gdquot;
				xfs_dqlock(dqp);
				goto dqret;
			}
		}
	}

	/*
	 * Hashlock comes after ilock in lock order
	 */
	XFS_DQ_HASH_LOCK(h);
	if (version != h->qh_version) {
		xfs_dquot_t *tmpdqp;
		/*
		 * Now, see if somebody else put the dquot in the
		 * hashtable before us. This can happen because we didn't
		 * keep the hashchain lock. We don't have to worry about
		 * lock order between the two dquots here since dqp isn't
		 * on any findable lists yet.
		 */
		if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) {
			/*
			 * Duplicate found. Just throw away the new dquot
			 * and start over.
			 */
			xfs_qm_dqput(tmpdqp);
			XFS_DQ_HASH_UNLOCK(h);
			xfs_qm_dqdestroy(dqp);
			XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
			goto again;
		}
	}

	/*
	 * Put the dquot at the beginning of the hash-chain and mp's list
	 * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock ..
	 */
	ASSERT(XFS_DQ_IS_HASH_LOCKED(h));
	dqp->q_hash = h;
	XQM_HASHLIST_INSERT(h, dqp);

	/*
	 * Attach this dquot to this filesystem's list of all dquots,
	 * kept inside the mount structure in m_quotainfo field
	 */
	xfs_qm_mplist_lock(mp);

	/*
	 * We return a locked dquot to the caller, with a reference taken
	 */
	xfs_dqlock(dqp);
	dqp->q_nrefs = 1;

	XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp);

	xfs_qm_mplist_unlock(mp);
	XFS_DQ_HASH_UNLOCK(h);
 dqret:
	ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip));
	xfs_dqtrace_entry(dqp, "DQGET DONE");
	*O_dqpp = dqp;
	return (0);
}
int
xfs_qm_scall_getquota(
    struct xfs_mount	*mp,
    xfs_dqid_t		id,
    uint			type,
    struct qc_dqblk		*dst)
{
    struct xfs_dquot	*dqp;
    int			error;

    /*
     * Try to get the dquot. We don't want it allocated on disk, so
     * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
     * exist, we'll get ENOENT back.
     */
    error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
    if (error)
        return error;

    /*
     * If everything's NULL, this dquot doesn't quite exist as far as
     * our utility programs are concerned.
     */
    if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
        error = XFS_ERROR(ENOENT);
        goto out_put;
    }

    memset(dst, 0, sizeof(*dst));
    dst->d_spc_hardlimit =
        XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
    dst->d_spc_softlimit =
        XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
    dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
    dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
    dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
    dst->d_ino_count = dqp->q_res_icount;
    dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
    dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
    dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
    dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
    dst->d_rt_spc_hardlimit =
        XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
    dst->d_rt_spc_softlimit =
        XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
    dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
    dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
    dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);

    /*
     * Internally, we don't reset all the timers when quota enforcement
     * gets turned off. No need to confuse the user level code,
     * so return zeroes in that case.
     */
    if ((!XFS_IS_UQUOTA_ENFORCED(mp) &&
            dqp->q_core.d_flags == XFS_DQ_USER) ||
            (!XFS_IS_GQUOTA_ENFORCED(mp) &&
             dqp->q_core.d_flags == XFS_DQ_GROUP) ||
            (!XFS_IS_PQUOTA_ENFORCED(mp) &&
             dqp->q_core.d_flags == XFS_DQ_PROJ)) {
        dst->d_spc_timer = 0;
        dst->d_ino_timer = 0;
        dst->d_rt_spc_timer = 0;
    }

#ifdef DEBUG
    if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
            (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
            (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
            id != 0) {
        if ((dst->d_space > dst->d_spc_softlimit) &&
                (dst->d_spc_softlimit > 0)) {
            ASSERT(dst->d_spc_timer != 0);
        }
        if ((dst->d_ino_count > dst->d_ino_softlimit) &&
                (dst->d_ino_softlimit > 0)) {
            ASSERT(dst->d_ino_timer != 0);
        }
    }
#endif
out_put:
    xfs_qm_dqput(dqp);
    return error;
}
Example #13
0
/*
 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
 * a locked dquot, doing an allocation (if requested) as needed.
 * When both an inode and an id are given, the inode's id takes precedence.
 * That is, if the id changes while we don't hold the ilock inside this
 * function, the new dquot is returned, not necessarily the one requested
 * in the id argument.
 */
int
xfs_qm_dqget(
	xfs_mount_t	*mp,
	xfs_inode_t	*ip,	  /* locked inode (optional) */
	xfs_dqid_t	id,	  /* uid/projid/gid depending on type */
	uint		type,	  /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
	uint		flags,	  /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
	xfs_dquot_t	**O_dqpp) /* OUT : locked incore dquot */
{
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
	struct xfs_dquot	*dqp;
	int			error;

	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
	if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
	    (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
	    (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
		return -ESRCH;
	}

	ASSERT(type == XFS_DQ_USER ||
	       type == XFS_DQ_PROJ ||
	       type == XFS_DQ_GROUP);
	if (ip) {
		ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
		ASSERT(xfs_inode_dquot(ip, type) == NULL);
	}

restart:
	mutex_lock(&qi->qi_tree_lock);
	dqp = radix_tree_lookup(tree, id);
	if (dqp) {
		xfs_dqlock(dqp);
		if (dqp->dq_flags & XFS_DQ_FREEING) {
			xfs_dqunlock(dqp);
			mutex_unlock(&qi->qi_tree_lock);
			trace_xfs_dqget_freeing(dqp);
			delay(1);
			goto restart;
		}

		/* uninit / unused quota found in radix tree, keep looking  */
		if (flags & XFS_QMOPT_DQNEXT) {
			if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
				xfs_dqunlock(dqp);
				mutex_unlock(&qi->qi_tree_lock);
				error = xfs_dq_get_next_id(mp, type, &id);
				if (error)
					return error;
				goto restart;
			}
		}

		dqp->q_nrefs++;
		mutex_unlock(&qi->qi_tree_lock);

		trace_xfs_dqget_hit(dqp);
		XFS_STATS_INC(mp, xs_qm_dqcachehits);
		*O_dqpp = dqp;
		return 0;
	}
	mutex_unlock(&qi->qi_tree_lock);
	XFS_STATS_INC(mp, xs_qm_dqcachemisses);

	/*
	 * Dquot cache miss. We don't want to keep the inode lock across
	 * a (potential) disk read. Also we don't want to deal with the lock
	 * ordering between quotainode and this inode. OTOH, dropping the inode
	 * lock here means dealing with a chown that can happen before
	 * we re-acquire the lock.
	 */
	if (ip)
		xfs_iunlock(ip, XFS_ILOCK_EXCL);

	error = xfs_qm_dqread(mp, id, type, flags, &dqp);

	if (ip)
		xfs_ilock(ip, XFS_ILOCK_EXCL);

	/* If we are asked to find next active id, keep looking */
	if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
		error = xfs_dq_get_next_id(mp, type, &id);
		if (!error)
			goto restart;
	}

	if (error)
		return error;

	if (ip) {
		/*
		 * A dquot could be attached to this inode by now, since
		 * we had dropped the ilock.
		 */
		if (xfs_this_quota_on(mp, type)) {
			struct xfs_dquot	*dqp1;

			dqp1 = xfs_inode_dquot(ip, type);
			if (dqp1) {
				xfs_qm_dqdestroy(dqp);
				dqp = dqp1;
				xfs_dqlock(dqp);
				goto dqret;
			}
		} else {
			/* inode stays locked on return */
			xfs_qm_dqdestroy(dqp);
			return -ESRCH;
		}
	}

	mutex_lock(&qi->qi_tree_lock);
	error = radix_tree_insert(tree, id, dqp);
	if (unlikely(error)) {
		WARN_ON(error != -EEXIST);

		/*
		 * Duplicate found. Just throw away the new dquot and start
		 * over.
		 */
		mutex_unlock(&qi->qi_tree_lock);
		trace_xfs_dqget_dup(dqp);
		xfs_qm_dqdestroy(dqp);
		XFS_STATS_INC(mp, xs_qm_dquot_dups);
		goto restart;
	}

	/*
	 * We return a locked dquot to the caller, with a reference taken
	 */
	xfs_dqlock(dqp);
	dqp->q_nrefs = 1;

	qi->qi_dquots++;
	mutex_unlock(&qi->qi_tree_lock);

	/* If we are asked to find next active id, keep looking */
	if (flags & XFS_QMOPT_DQNEXT) {
		if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
			xfs_qm_dqput(dqp);
			error = xfs_dq_get_next_id(mp, type, &id);
			if (error)
				return error;
			goto restart;
		}
	}

 dqret:
	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
	trace_xfs_dqget_miss(dqp);
	*O_dqpp = dqp;
	return 0;
}
int
xfs_qm_scall_getquota(
	struct xfs_mount	*mp,
	xfs_dqid_t		id,
	uint			type,
	struct fs_disk_quota	*dst)
{
	struct xfs_dquot	*dqp;
	int			error;

	error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
	if (error)
		return error;

	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
		error = XFS_ERROR(ENOENT);
		goto out_put;
	}

	memset(dst, 0, sizeof(*dst));
	dst->d_version = FS_DQUOT_VERSION;
	dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
	dst->d_id = be32_to_cpu(dqp->q_core.d_id);
	dst->d_blk_hardlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
	dst->d_blk_softlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
	dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
	dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
	dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
	dst->d_icount = dqp->q_res_icount;
	dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
	dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
	dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
	dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
	dst->d_rtb_hardlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
	dst->d_rtb_softlimit =
		XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
	dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
	dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
	dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);

	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) ||
	    (!XFS_IS_OQUOTA_ENFORCED(mp) &&
			(dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
		dst->d_btimer = 0;
		dst->d_itimer = 0;
		dst->d_rtbtimer = 0;
	}

#ifdef DEBUG
	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
	     (XFS_IS_OQUOTA_ENFORCED(mp) &&
			(dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
	    dst->d_id != 0) {
		if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) &&
		    (dst->d_blk_softlimit > 0)) {
			ASSERT(dst->d_btimer != 0);
		}
		if (((int) dst->d_icount > (int) dst->d_ino_softlimit) &&
		    (dst->d_ino_softlimit > 0)) {
			ASSERT(dst->d_itimer != 0);
		}
	}
#endif
out_put:
	xfs_qm_dqput(dqp);
	return error;
}