/* * This is called to free all the memory associated with a dquot */ void xfs_qm_dqdestroy( xfs_dquot_t *dqp) { ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); mutex_destroy(&dqp->q_qlock); sv_destroy(&dqp->q_pinwait); #ifdef XFS_DQUOT_TRACE if (dqp->q_trace) ktrace_free(dqp->q_trace); dqp->q_trace = NULL; #endif kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); atomic_dec(&xfs_Gqm->qm_totaldquots); }
/* * Lookup a dquot in the incore dquot hashtable. We keep two separate * hashtables for user and group dquots; and, these are global tables * inside the XQM, not per-filesystem tables. * The hash chain must be locked by caller, and it is left locked * on return. Returning dquot is locked. */ STATIC int xfs_qm_dqlookup( xfs_mount_t *mp, xfs_dqid_t id, xfs_dqhash_t *qh, xfs_dquot_t **O_dqpp) { xfs_dquot_t *dqp; uint flist_locked; xfs_dquot_t *d; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); flist_locked = B_FALSE; /* * Traverse the hashchain looking for a match */ for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) { /* * We already have the hashlock. We don't need the * dqlock to look at the id field of the dquot, since the * id can't be modified without the hashlock anyway. */ if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) { xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP"); /* * All in core dquots must be on the dqlist of mp */ ASSERT(dqp->MPL_PREVP != NULL); xfs_dqlock(dqp); if (dqp->q_nrefs == 0) { ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT"); /* * We may have raced with dqreclaim_one() * (and lost). So, flag that we don't * want the dquot to be reclaimed. */ dqp->dq_flags |= XFS_DQ_WANT; xfs_dqunlock(dqp); xfs_qm_freelist_lock(xfs_Gqm); xfs_dqlock(dqp); dqp->dq_flags &= ~(XFS_DQ_WANT); } flist_locked = B_TRUE; } /* * id couldn't have changed; we had the hashlock all * along */ ASSERT(be32_to_cpu(dqp->q_core.d_id) == id); if (flist_locked) { if (dqp->q_nrefs != 0) { xfs_qm_freelist_unlock(xfs_Gqm); flist_locked = B_FALSE; } else { /* * take it off the freelist */ xfs_dqtrace_entry(dqp, "DQLOOKUP: TAKEOFF FL"); XQM_FREELIST_REMOVE(dqp); /* xfs_qm_freelist_print(&(xfs_Gqm-> qm_dqfreelist), "after removal"); */ } } /* * grab a reference */ XFS_DQHOLD(dqp); if (flist_locked) xfs_qm_freelist_unlock(xfs_Gqm); /* * move the dquot to the front of the hashchain */ ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); if (dqp->HL_PREVP != &qh->qh_next) { xfs_dqtrace_entry(dqp, "DQLOOKUP: HASH MOVETOFRONT"); if ((d = dqp->HL_NEXT)) d->HL_PREVP = dqp->HL_PREVP; *(dqp->HL_PREVP) = d; d = qh->qh_next; d->HL_PREVP = &dqp->HL_NEXT; dqp->HL_NEXT = d; dqp->HL_PREVP = &qh->qh_next; qh->qh_next = dqp; } xfs_dqtrace_entry(dqp, "LOOKUP END"); *O_dqpp = dqp; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); return (0); } } *O_dqpp = NULL; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); return (1); }
/* ARGSUSED */ int xfs_qm_dqpurge( xfs_dquot_t *dqp, uint flags) { xfs_dqhash_t *thishash; xfs_mount_t *mp; mp = dqp->q_mount; ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash)); xfs_dqlock(dqp); /* * We really can't afford to purge a dquot that is * referenced, because these are hard refs. * It shouldn't happen in general because we went thru _all_ inodes in * dqrele_all_inodes before calling this and didn't let the mountlock go. * However it is possible that we have dquots with temporary * references that are not attached to an inode. e.g. see xfs_setattr(). */ if (dqp->q_nrefs != 0) { xfs_dqunlock(dqp); XFS_DQ_HASH_UNLOCK(dqp->q_hash); return (1); } ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); /* * If we're turning off quotas, we have to make sure that, for * example, we don't delete quota disk blocks while dquots are * in the process of getting written to those disk blocks. * This dquot might well be on AIL, and we can't leave it there * if we're turning off quotas. Basically, we need this flush * lock, and are willing to block on it. */ if (! xfs_qm_dqflock_nowait(dqp)) { /* * Block on the flush lock after nudging dquot buffer, * if it is incore. */ xfs_qm_dqflock_pushbuf_wait(dqp); } /* * XXXIf we're turning this type of quotas off, we don't care * about the dirty metadata sitting in this dquot. OTOH, if * we're unmounting, we do care, so we flush it and wait. */ if (XFS_DQ_IS_DIRTY(dqp)) { xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); /* dqflush unlocks dqflock */ /* * Given that dqpurge is a very rare occurrence, it is OK * that we're holding the hashlist and mplist locks * across the disk write. But, ... XXXsup * * We don't care about getting disk errors here. We need * to purge this dquot anyway, so we go ahead regardless. */ (void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC); xfs_dqflock(dqp); } ASSERT(dqp->q_pincount == 0); ASSERT(XFS_FORCED_SHUTDOWN(mp) || !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); thishash = dqp->q_hash; XQM_HASHLIST_REMOVE(thishash, dqp); XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp); /* * XXX Move this to the front of the freelist, if we can get the * freelist lock. */ ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); dqp->q_mount = NULL; dqp->q_hash = NULL; dqp->dq_flags = XFS_DQ_INACTIVE; memset(&dqp->q_core, 0, sizeof(dqp->q_core)); xfs_dqfunlock(dqp); xfs_dqunlock(dqp); XFS_DQ_HASH_UNLOCK(thishash); return (0); }