/* * Release the reservations, and adjust the dquots accordingly. * This is called only when the transaction is being aborted. If by * any chance we have done dquot modifications incore (ie. deltas) already, * we simply throw those away, since that's the expected behavior * when a transaction is curtailed without a commit. */ void xfs_trans_unreserve_and_mod_dquots( xfs_trans_t *tp) { int i, j; xfs_dquot_t *dqp; xfs_dqtrx_t *qtrx, *qa; boolean_t locked; if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) return; qa = tp->t_dqinfo->dqa_usrdquots; for (j = 0; j < 2; j++) { for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { qtrx = &qa[i]; /* * We assume that the array of dquots is filled * sequentially, not sparsely. */ if ((dqp = qtrx->qt_dquot) == NULL) break; /* * Unreserve the original reservation. We don't care * about the number of blocks used field, or deltas. * Also we don't bother to zero the fields. */ locked = B_FALSE; if (qtrx->qt_blk_res) { xfs_dqlock(dqp); locked = B_TRUE; dqp->q_res_bcount -= (xfs_qcnt_t)qtrx->qt_blk_res; } if (qtrx->qt_ino_res) { if (!locked) { xfs_dqlock(dqp); locked = B_TRUE; } dqp->q_res_icount -= (xfs_qcnt_t)qtrx->qt_ino_res; } if (qtrx->qt_rtblk_res) { if (!locked) { xfs_dqlock(dqp); locked = B_TRUE; } dqp->q_res_rtbcount -= (xfs_qcnt_t)qtrx->qt_rtblk_res; } if (locked) xfs_dqunlock(dqp); } qa = tp->t_dqinfo->dqa_grpdquots; } }
STATIC int xfs_qm_flush_one( struct xfs_dquot *dqp, void *data) { struct list_head *buffer_list = data; struct xfs_buf *bp = NULL; int error = 0; xfs_dqlock(dqp); if (dqp->dq_flags & XFS_DQ_FREEING) goto out_unlock; if (!XFS_DQ_IS_DIRTY(dqp)) goto out_unlock; xfs_dqflock(dqp); error = xfs_qm_dqflush(dqp, &bp); if (error) goto out_unlock; xfs_buf_delwri_queue(bp, buffer_list); xfs_buf_relse(bp); out_unlock: xfs_dqunlock(dqp); return error; }
/* * Try to insert a new dquot into the in-core cache. If an error occurs the * caller should throw away the dquot and start over. Otherwise, the dquot * is returned locked (and held by the cache) as if there had been a cache * hit. */ static int xfs_qm_dqget_cache_insert( struct xfs_mount *mp, struct xfs_quotainfo *qi, struct radix_tree_root *tree, xfs_dqid_t id, struct xfs_dquot *dqp) { int error; mutex_lock(&qi->qi_tree_lock); error = radix_tree_insert(tree, id, dqp); if (unlikely(error)) { /* Duplicate found! Caller must try again. */ WARN_ON(error != -EEXIST); mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_dup(dqp); return error; } /* Return a locked dquot to the caller, with a reference taken. */ xfs_dqlock(dqp); dqp->q_nrefs = 1; qi->qi_dquots++; mutex_unlock(&qi->qi_tree_lock); return 0; }
/* * Look up the dquot in the in-core cache. If found, the dquot is returned * locked and ready to go. */ static struct xfs_dquot * xfs_qm_dqget_cache_lookup( struct xfs_mount *mp, struct xfs_quotainfo *qi, struct radix_tree_root *tree, xfs_dqid_t id) { struct xfs_dquot *dqp; restart: mutex_lock(&qi->qi_tree_lock); dqp = radix_tree_lookup(tree, id); if (!dqp) { mutex_unlock(&qi->qi_tree_lock); XFS_STATS_INC(mp, xs_qm_dqcachemisses); return NULL; } xfs_dqlock(dqp); if (dqp->dq_flags & XFS_DQ_FREEING) { xfs_dqunlock(dqp); mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_freeing(dqp); delay(1); goto restart; } dqp->q_nrefs++; mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_hit(dqp); XFS_STATS_INC(mp, xs_qm_dqcachehits); return dqp; }
STATIC void xfs_qm_dqput_final( struct xfs_dquot *dqp) { struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; struct xfs_dquot *gdqp; trace_xfs_dqput_free(dqp); mutex_lock(&qi->qi_lru_lock); if (list_empty(&dqp->q_lru)) { list_add_tail(&dqp->q_lru, &qi->qi_lru_list); qi->qi_lru_count++; XFS_STATS_INC(xs_qm_dquot_unused); } mutex_unlock(&qi->qi_lru_lock); /* * If we just added a udquot to the freelist, then we want to release * the gdquot reference that it (probably) has. Otherwise it'll keep * the gdquot from getting reclaimed. */ gdqp = dqp->q_gdquot; if (gdqp) { xfs_dqlock(gdqp); dqp->q_gdquot = NULL; } xfs_dqunlock(dqp); /* * If we had a group quota hint, release it now. */ if (gdqp) xfs_qm_dqput(gdqp); }
STATIC void xfs_qm_dqput_final( struct xfs_dquot *dqp) { struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; struct xfs_dquot *gdqp; struct xfs_dquot *pdqp; trace_xfs_dqput_free(dqp); if (list_lru_add(&qi->qi_lru, &dqp->q_lru)) XFS_STATS_INC(xs_qm_dquot_unused); /* * If we just added a udquot to the freelist, then we want to release * the gdquot/pdquot reference that it (probably) has. Otherwise it'll * keep the gdquot/pdquot from getting reclaimed. */ gdqp = dqp->q_gdquot; if (gdqp) { xfs_dqlock(gdqp); dqp->q_gdquot = NULL; } pdqp = dqp->q_pdquot; if (pdqp) { xfs_dqlock(pdqp); dqp->q_pdquot = NULL; } xfs_dqunlock(dqp); /* * If we had a group/project quota hint, release it now. */ if (gdqp) xfs_qm_dqput(gdqp); if (pdqp) xfs_qm_dqput(pdqp); }
/* * Given an array of dqtrx structures, lock all the dquots associated and join * them to the transaction, provided they have been modified. We know that the * highest number of dquots of one type - usr, grp and prj - involved in a * transaction is 3 so we don't need to make this very generic. */ STATIC void xfs_trans_dqlockedjoin( xfs_trans_t *tp, xfs_dqtrx_t *q) { ASSERT(q[0].qt_dquot != NULL); if (q[1].qt_dquot == NULL) { xfs_dqlock(q[0].qt_dquot); xfs_trans_dqjoin(tp, q[0].qt_dquot); } else { ASSERT(XFS_QM_TRANS_MAXDQS == 2); xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); xfs_trans_dqjoin(tp, q[0].qt_dquot); xfs_trans_dqjoin(tp, q[1].qt_dquot); } }
/* * Release a dquot. Flush it if dirty, then dqput() it. * dquot must not be locked. */ void xfs_qm_dqrele( xfs_dquot_t *dqp) { ASSERT(dqp); xfs_dqtrace_entry(dqp, "DQRELE"); xfs_dqlock(dqp); /* * We don't care to flush it if the dquot is dirty here. * That will create stutters that we want to avoid. * Instead we do a delayed write when we try to reclaim * a dirty dquot. Also xfs_sync will take part of the burden... */ xfs_qm_dqput(dqp); }
/* * Given a udquot and gdquot, attach a ptr to the group dquot in the * udquot as a hint for future lookups. */ STATIC void xfs_qm_dqattach_grouphint( xfs_dquot_t *udq, xfs_dquot_t *gdq) { xfs_dquot_t *tmp; xfs_dqlock(udq); tmp = udq->q_gdquot; if (tmp) { if (tmp == gdq) goto done; udq->q_gdquot = NULL; xfs_qm_dqrele(tmp); } udq->q_gdquot = xfs_qm_dqhold(gdq); done: xfs_dqunlock(udq); }
void xfs_dqlock2( xfs_dquot_t *d1, xfs_dquot_t *d2) { if (d1 && d2) { ASSERT(d1 != d2); if (INT_GET(d1->q_core.d_id, ARCH_CONVERT) > INT_GET(d2->q_core.d_id, ARCH_CONVERT)) { xfs_dqlock(d2); xfs_dqlock(d1); } else { xfs_dqlock(d1); xfs_dqlock(d2); } } else { if (d1) { xfs_dqlock(d1); } else if (d2) { xfs_dqlock(d2); } } }
void xfs_dqlock2( xfs_dquot_t *d1, xfs_dquot_t *d2) { if (d1 && d2) { ASSERT(d1 != d2); if (be32_to_cpu(d1->q_core.d_id) > be32_to_cpu(d2->q_core.d_id)) { xfs_dqlock(d2); xfs_dqlock(d1); } else { xfs_dqlock(d1); xfs_dqlock(d2); } } else { if (d1) { xfs_dqlock(d1); } else if (d2) { xfs_dqlock(d2); } } }
/* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. * Returns EDQUOT if quota is exceeded. */ STATIC int xfs_trans_dqresv( xfs_trans_t *tp, xfs_mount_t *mp, xfs_dquot_t *dqp, long nblks, long ninos, uint flags) { int error; xfs_qcnt_t hardlimit; xfs_qcnt_t softlimit; time_t timer; xfs_qwarncnt_t warns; xfs_qwarncnt_t warnlimit; xfs_qcnt_t count; xfs_qcnt_t *resbcountp; xfs_quotainfo_t *q = mp->m_quotainfo; if (! (flags & XFS_QMOPT_DQLOCK)) { xfs_dqlock(dqp); } ASSERT(XFS_DQ_IS_LOCKED(dqp)); if (flags & XFS_TRANS_DQ_RES_BLKS) { hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); if (!hardlimit) hardlimit = q->qi_bhardlimit; softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); if (!softlimit) softlimit = q->qi_bsoftlimit; timer = be32_to_cpu(dqp->q_core.d_btimer); warns = be16_to_cpu(dqp->q_core.d_bwarns); warnlimit = XFS_QI_BWARNLIMIT(dqp->q_mount); resbcountp = &dqp->q_res_bcount; } else { ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); if (!hardlimit) hardlimit = q->qi_rtbhardlimit; softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); if (!softlimit) softlimit = q->qi_rtbsoftlimit; timer = be32_to_cpu(dqp->q_core.d_rtbtimer); warns = be16_to_cpu(dqp->q_core.d_rtbwarns); warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount); resbcountp = &dqp->q_res_rtbcount; } error = 0; if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_core.d_id && XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) { #ifdef QUOTADEBUG cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld" " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit); #endif if (nblks > 0) { /* * dquot is locked already. See if we'd go over the * hardlimit or exceed the timelimit if we allocate * nblks. */ if (hardlimit > 0ULL && (hardlimit <= nblks + *resbcountp)) { error = EDQUOT; goto error_return; } if (softlimit > 0ULL && (softlimit <= nblks + *resbcountp)) { /* * If timer or warnings has expired, * return EDQUOT */ if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { error = EDQUOT; goto error_return; } } } if (ninos > 0) { count = be64_to_cpu(dqp->q_core.d_icount); timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount); hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); if (!hardlimit) hardlimit = q->qi_ihardlimit; softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); if (!softlimit) softlimit = q->qi_isoftlimit; if (hardlimit > 0ULL && count >= hardlimit) { error = EDQUOT; goto error_return; } else if (softlimit > 0ULL && count >= softlimit) { /* * If timer or warnings has expired, * return EDQUOT */ if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { error = EDQUOT; goto error_return; } } } } /* * Change the reservation, but not the actual usage. * Note that q_res_bcount = q_core.d_bcount + resv */ (*resbcountp) += (xfs_qcnt_t)nblks; if (ninos != 0) dqp->q_res_icount += (xfs_qcnt_t)ninos; /* * note the reservation amt in the trans struct too, * so that the transaction knows how much was reserved by * it against this particular dquot. * We don't do this when we are reserving for a delayed allocation, * because we don't have the luxury of a transaction envelope then. */ if (tp) { ASSERT(tp->t_dqinfo); ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (nblks != 0) xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, nblks); if (ninos != 0) xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); } ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); error_return: if (! (flags & XFS_QMOPT_DQLOCK)) { xfs_dqunlock(dqp); } return (error); }
STATIC int xfs_qm_dqattach_one( xfs_inode_t *ip, xfs_dqid_t id, uint type, uint doalloc, xfs_dquot_t *udqhint, /* hint */ xfs_dquot_t **IO_idqpp) { xfs_dquot_t *dqp; int error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); error = 0; /* * See if we already have it in the inode itself. IO_idqpp is * &i_udquot or &i_gdquot. This made the code look weird, but * made the logic a lot simpler. */ dqp = *IO_idqpp; if (dqp) { trace_xfs_dqattach_found(dqp); return 0; } /* * udqhint is the i_udquot field in inode, and is non-NULL only * when the type arg is group/project. Its purpose is to save a * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside * the user dquot. */ if (udqhint) { ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); xfs_dqlock(udqhint); /* * No need to take dqlock to look at the id. * * The ID can't change until it gets reclaimed, and it won't * be reclaimed as long as we have a ref from inode and we * hold the ilock. */ dqp = udqhint->q_gdquot; if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { ASSERT(*IO_idqpp == NULL); *IO_idqpp = xfs_qm_dqhold(dqp); xfs_dqunlock(udqhint); return 0; } /* * We can't hold a dquot lock when we call the dqget code. * We'll deadlock in no time, because of (not conforming to) * lock ordering - the inodelock comes before any dquot lock, * and we may drop and reacquire the ilock in xfs_qm_dqget(). */ xfs_dqunlock(udqhint); } /* * Find the dquot from somewhere. This bumps the * reference count of dquot and returns it locked. * This can return ENOENT if dquot didn't exist on * disk and we didn't ask it to allocate; * ESRCH if quotas got turned off suddenly. */ error = xfs_qm_dqget(ip->i_mount, ip, id, type, doalloc | XFS_QMOPT_DOWARN, &dqp); if (error) return error; trace_xfs_dqattach_get(dqp); /* * dqget may have dropped and re-acquired the ilock, but it guarantees * that the dquot returned is the one that should go in the inode. */ *IO_idqpp = dqp; xfs_dqunlock(dqp); return 0; }
/* * Purge a dquot from all tracking data structures and free it. */ STATIC int xfs_qm_dqpurge( struct xfs_dquot *dqp, void *data) { struct xfs_mount *mp = dqp->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; struct xfs_dquot *gdqp = NULL; xfs_dqlock(dqp); if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { xfs_dqunlock(dqp); return EAGAIN; } /* * If this quota has a group hint attached, prepare for releasing it * now. */ gdqp = dqp->q_gdquot; if (gdqp) { xfs_dqlock(gdqp); dqp->q_gdquot = NULL; } dqp->dq_flags |= XFS_DQ_FREEING; xfs_dqflock(dqp); /* * If we are turning this type of quotas off, we don't care * about the dirty metadata sitting in this dquot. OTOH, if * we're unmounting, we do care, so we flush it and wait. */ if (XFS_DQ_IS_DIRTY(dqp)) { struct xfs_buf *bp = NULL; int error; /* * We don't care about getting disk errors here. We need * to purge this dquot anyway, so we go ahead regardless. */ error = xfs_qm_dqflush(dqp, &bp); if (error) { xfs_warn(mp, "%s: dquot %p flush failed", __func__, dqp); } else { error = xfs_bwrite(bp); xfs_buf_relse(bp); } xfs_dqflock(dqp); } ASSERT(atomic_read(&dqp->q_pincount) == 0); ASSERT(XFS_FORCED_SHUTDOWN(mp) || !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); xfs_dqfunlock(dqp); xfs_dqunlock(dqp); radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), be32_to_cpu(dqp->q_core.d_id)); qi->qi_dquots--; /* * We move dquots to the freelist as soon as their reference count * hits zero, so it really should be on the freelist here. */ mutex_lock(&qi->qi_lru_lock); ASSERT(!list_empty(&dqp->q_lru)); list_del_init(&dqp->q_lru); qi->qi_lru_count--; XFS_STATS_DEC(xs_qm_dquot_unused); mutex_unlock(&qi->qi_lru_lock); xfs_qm_dqdestroy(dqp); if (gdqp) xfs_qm_dqput(gdqp); return 0; }
/* * Return the dquot for a given inode and type. If @can_alloc is true, then * allocate blocks if needed. The inode's ILOCK must be held and it must not * have already had an inode attached. */ int xfs_qm_dqget_inode( struct xfs_inode *ip, uint type, bool can_alloc, struct xfs_dquot **O_dqpp) { struct xfs_mount *mp = ip->i_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; struct radix_tree_root *tree = xfs_dquot_tree(qi, type); struct xfs_dquot *dqp; xfs_dqid_t id; int error; error = xfs_qm_dqget_checks(mp, type); if (error) return error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(xfs_inode_dquot(ip, type) == NULL); id = xfs_qm_id_for_quotatype(ip, type); restart: dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id); if (dqp) { *O_dqpp = dqp; return 0; } /* * Dquot cache miss. We don't want to keep the inode lock across * a (potential) disk read. Also we don't want to deal with the lock * ordering between quotainode and this inode. OTOH, dropping the inode * lock here means dealing with a chown that can happen before * we re-acquire the lock. */ xfs_iunlock(ip, XFS_ILOCK_EXCL); error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp); xfs_ilock(ip, XFS_ILOCK_EXCL); if (error) return error; /* * A dquot could be attached to this inode by now, since we had * dropped the ilock. */ if (xfs_this_quota_on(mp, type)) { struct xfs_dquot *dqp1; dqp1 = xfs_inode_dquot(ip, type); if (dqp1) { xfs_qm_dqdestroy(dqp); dqp = dqp1; xfs_dqlock(dqp); goto dqret; } } else { /* inode stays locked on return */ xfs_qm_dqdestroy(dqp); return -ESRCH; } error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp); if (error) { /* * Duplicate found. Just throw away the new dquot and start * over. */ xfs_qm_dqdestroy(dqp); XFS_STATS_INC(mp, xs_qm_dquot_dups); goto restart; } dqret: ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); trace_xfs_dqget_miss(dqp); *O_dqpp = dqp; return 0; }
/* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. */ STATIC int xfs_trans_dqresv( xfs_trans_t *tp, xfs_mount_t *mp, xfs_dquot_t *dqp, long nblks, long ninos, uint flags) { xfs_qcnt_t hardlimit; xfs_qcnt_t softlimit; time_t timer; xfs_qwarncnt_t warns; xfs_qwarncnt_t warnlimit; xfs_qcnt_t total_count; xfs_qcnt_t *resbcountp; xfs_quotainfo_t *q = mp->m_quotainfo; struct xfs_def_quota *defq; xfs_dqlock(dqp); defq = xfs_get_defquota(dqp, q); if (flags & XFS_TRANS_DQ_RES_BLKS) { hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); if (!hardlimit) hardlimit = defq->bhardlimit; softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); if (!softlimit) softlimit = defq->bsoftlimit; timer = be32_to_cpu(dqp->q_core.d_btimer); warns = be16_to_cpu(dqp->q_core.d_bwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit; resbcountp = &dqp->q_res_bcount; } else { ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); if (!hardlimit) hardlimit = defq->rtbhardlimit; softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); if (!softlimit) softlimit = defq->rtbsoftlimit; timer = be32_to_cpu(dqp->q_core.d_rtbtimer); warns = be16_to_cpu(dqp->q_core.d_rtbwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit; resbcountp = &dqp->q_res_rtbcount; } if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_core.d_id && ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) || (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) { if (nblks > 0) { /* * dquot is locked already. See if we'd go over the * hardlimit or exceed the timelimit if we allocate * nblks. */ total_count = *resbcountp + nblks; if (hardlimit && total_count > hardlimit) { xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); goto error_return; } if (softlimit && total_count > softlimit) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTLONGWARN); goto error_return; } xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN); } } if (ninos > 0) { total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); if (!hardlimit) hardlimit = defq->ihardlimit; softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); if (!softlimit) softlimit = defq->isoftlimit; if (hardlimit && total_count > hardlimit) { xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); goto error_return; } if (softlimit && total_count > softlimit) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTLONGWARN); goto error_return; } xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN); } } } /* * Change the reservation, but not the actual usage. * Note that q_res_bcount = q_core.d_bcount + resv */ (*resbcountp) += (xfs_qcnt_t)nblks; if (ninos != 0) dqp->q_res_icount += (xfs_qcnt_t)ninos; /* * note the reservation amt in the trans struct too, * so that the transaction knows how much was reserved by * it against this particular dquot. * We don't do this when we are reserving for a delayed allocation, * because we don't have the luxury of a transaction envelope then. */ if (tp) { ASSERT(tp->t_dqinfo); ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (nblks != 0) xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, nblks); if (ninos != 0) xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); } ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); xfs_dqunlock(dqp); return 0; error_return: xfs_dqunlock(dqp); if (flags & XFS_QMOPT_ENOSPC) return -ENOSPC; return -EDQUOT; }
/* * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a * a locked dquot, doing an allocation (if requested) as needed. * When both an inode and an id are given, the inode's id takes precedence. * That is, if the id changes while we don't hold the ilock inside this * function, the new dquot is returned, not necessarily the one requested * in the id argument. */ int xfs_qm_dqget( xfs_mount_t *mp, xfs_inode_t *ip, /* locked inode (optional) */ xfs_dqid_t id, /* uid/projid/gid depending on type */ uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ { struct xfs_quotainfo *qi = mp->m_quotainfo; struct radix_tree_root *tree = xfs_dquot_tree(qi, type); struct xfs_dquot *dqp; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { return (ESRCH); } #ifdef DEBUG if (xfs_do_dqerror) { if ((xfs_dqerror_target == mp->m_ddev_targp) && (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { xfs_debug(mp, "Returning error in dqget"); return (EIO); } } ASSERT(type == XFS_DQ_USER || type == XFS_DQ_PROJ || type == XFS_DQ_GROUP); if (ip) { ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(xfs_inode_dquot(ip, type) == NULL); } #endif restart: mutex_lock(&qi->qi_tree_lock); dqp = radix_tree_lookup(tree, id); if (dqp) { xfs_dqlock(dqp); if (dqp->dq_flags & XFS_DQ_FREEING) { xfs_dqunlock(dqp); mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_freeing(dqp); delay(1); goto restart; } dqp->q_nrefs++; mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_hit(dqp); XFS_STATS_INC(xs_qm_dqcachehits); *O_dqpp = dqp; return 0; } mutex_unlock(&qi->qi_tree_lock); XFS_STATS_INC(xs_qm_dqcachemisses); /* * Dquot cache miss. We don't want to keep the inode lock across * a (potential) disk read. Also we don't want to deal with the lock * ordering between quotainode and this inode. OTOH, dropping the inode * lock here means dealing with a chown that can happen before * we re-acquire the lock. */ if (ip) xfs_iunlock(ip, XFS_ILOCK_EXCL); error = xfs_qm_dqread(mp, id, type, flags, &dqp); if (ip) xfs_ilock(ip, XFS_ILOCK_EXCL); if (error) return error; if (ip) { /* * A dquot could be attached to this inode by now, since * we had dropped the ilock. */ if (xfs_this_quota_on(mp, type)) { struct xfs_dquot *dqp1; dqp1 = xfs_inode_dquot(ip, type); if (dqp1) { xfs_qm_dqdestroy(dqp); dqp = dqp1; xfs_dqlock(dqp); goto dqret; } } else { /* inode stays locked on return */ xfs_qm_dqdestroy(dqp); return XFS_ERROR(ESRCH); } } mutex_lock(&qi->qi_tree_lock); error = -radix_tree_insert(tree, id, dqp); if (unlikely(error)) { WARN_ON(error != EEXIST); /* * Duplicate found. Just throw away the new dquot and start * over. */ mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_dup(dqp); xfs_qm_dqdestroy(dqp); XFS_STATS_INC(xs_qm_dquot_dups); goto restart; } /* * We return a locked dquot to the caller, with a reference taken */ xfs_dqlock(dqp); dqp->q_nrefs = 1; qi->qi_dquots++; mutex_unlock(&qi->qi_tree_lock); dqret: ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); trace_xfs_dqget_miss(dqp); *O_dqpp = dqp; return (0); }
/* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. * Returns EDQUOT if quota is exceeded. */ STATIC int xfs_trans_dqresv( xfs_trans_t *tp, xfs_dquot_t *dqp, long nblks, long ninos, uint flags) { int error; xfs_qcnt_t hardlimit; xfs_qcnt_t softlimit; time_t btimer; xfs_qcnt_t *resbcountp; if (! (flags & XFS_QMOPT_DQLOCK)) { xfs_dqlock(dqp); } ASSERT(XFS_DQ_IS_LOCKED(dqp)); if (flags & XFS_TRANS_DQ_RES_BLKS) { hardlimit = INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT); softlimit = INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT); btimer = INT_GET(dqp->q_core.d_btimer, ARCH_CONVERT); resbcountp = &dqp->q_res_bcount; } else { ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); hardlimit = INT_GET(dqp->q_core.d_rtb_hardlimit, ARCH_CONVERT); softlimit = INT_GET(dqp->q_core.d_rtb_softlimit, ARCH_CONVERT); btimer = INT_GET(dqp->q_core.d_rtbtimer, ARCH_CONVERT); resbcountp = &dqp->q_res_rtbcount; } error = 0; if ((flags & XFS_QMOPT_FORCE_RES) == 0 && !INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT) && XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) { #ifdef QUOTADEBUG printk("BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?\n", nblks, *resbcountp, hardlimit); #endif if (nblks > 0) { /* * dquot is locked already. See if we'd go over the * hardlimit or exceed the timelimit if we allocate * nblks. */ if (hardlimit > 0ULL && (hardlimit <= nblks + *resbcountp)) { error = EDQUOT; goto error_return; } if (softlimit > 0ULL && (softlimit <= nblks + *resbcountp)) { /* * If timer or warnings has expired, * return EDQUOT */ if ((btimer != 0 && CURRENT_TIME > btimer) || (!INT_ISZERO(dqp->q_core.d_bwarns, ARCH_CONVERT) && INT_GET(dqp->q_core.d_bwarns, ARCH_CONVERT) >= XFS_QI_BWARNLIMIT(dqp->q_mount))) { error = EDQUOT; goto error_return; } } } if (ninos > 0) { if (INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT) > 0ULL && INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT)) { error = EDQUOT; goto error_return; } else if (INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT) > 0ULL && INT_GET(dqp->q_core.d_icount, ARCH_CONVERT) >= INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT)) { /* * If timer or warnings has expired, * return EDQUOT */ if ((!INT_ISZERO(dqp->q_core.d_itimer, ARCH_CONVERT) && CURRENT_TIME > INT_GET(dqp->q_core.d_itimer, ARCH_CONVERT)) || (!INT_ISZERO(dqp->q_core.d_iwarns, ARCH_CONVERT) && INT_GET(dqp->q_core.d_iwarns, ARCH_CONVERT) >= XFS_QI_IWARNLIMIT(dqp->q_mount))) { error = EDQUOT; goto error_return; } } } } /* * Change the reservation, but not the actual usage. * Note that q_res_bcount = q_core.d_bcount + resv */ (*resbcountp) += (xfs_qcnt_t)nblks; if (ninos != 0) dqp->q_res_icount += (xfs_qcnt_t)ninos; /* * note the reservation amt in the trans struct too, * so that the transaction knows how much was reserved by * it against this particular dquot. * We don't do this when we are reserving for a delayed allocation, * because we don't have the luxury of a transaction envelope then. */ if (tp) { ASSERT(tp->t_dqinfo); ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (nblks != 0) xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, nblks); if (ninos != 0) xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); } ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT)); ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT)); ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT)); error_return: if (! (flags & XFS_QMOPT_DQLOCK)) { xfs_dqunlock(dqp); } return (error); }
/* * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a * a locked dquot, doing an allocation (if requested) as needed. * When both an inode and an id are given, the inode's id takes precedence. * That is, if the id changes while we don't hold the ilock inside this * function, the new dquot is returned, not necessarily the one requested * in the id argument. */ int xfs_qm_dqget( xfs_mount_t *mp, xfs_inode_t *ip, /* locked inode (optional) */ xfs_dqid_t id, /* uid/projid/gid depending on type */ uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ { xfs_dquot_t *dqp; xfs_dqhash_t *h; uint version; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { return (ESRCH); } h = XFS_DQ_HASH(mp, id, type); #ifdef DEBUG if (xfs_do_dqerror) { if ((xfs_dqerror_target == mp->m_ddev_targp) && (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { cmn_err(CE_DEBUG, "Returning error in dqget"); return (EIO); } } #endif again: #ifdef DEBUG ASSERT(type == XFS_DQ_USER || type == XFS_DQ_PROJ || type == XFS_DQ_GROUP); if (ip) { ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); if (type == XFS_DQ_USER) ASSERT(ip->i_udquot == NULL); else ASSERT(ip->i_gdquot == NULL); } #endif XFS_DQ_HASH_LOCK(h); /* * Look in the cache (hashtable). * The chain is kept locked during lookup. */ if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) { XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); /* * The dquot was found, moved to the front of the chain, * taken off the freelist if it was on it, and locked * at this point. Just unlock the hashchain and return. */ ASSERT(*O_dqpp); ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); XFS_DQ_HASH_UNLOCK(h); xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)"); return (0); /* success */ } XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); /* * Dquot cache miss. We don't want to keep the inode lock across * a (potential) disk read. Also we don't want to deal with the lock * ordering between quotainode and this inode. OTOH, dropping the inode * lock here means dealing with a chown that can happen before * we re-acquire the lock. */ if (ip) xfs_iunlock(ip, XFS_ILOCK_EXCL); /* * Save the hashchain version stamp, and unlock the chain, so that * we don't keep the lock across a disk read */ version = h->qh_version; XFS_DQ_HASH_UNLOCK(h); /* * Allocate the dquot on the kernel heap, and read the ondisk * portion off the disk. Also, do all the necessary initialization * This can return ENOENT if dquot didn't exist on disk and we didn't * ask it to allocate; ESRCH if quotas got turned off suddenly. */ if ((error = xfs_qm_idtodq(mp, id, type, flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR| XFS_QMOPT_DOWARN), &dqp))) { if (ip) xfs_ilock(ip, XFS_ILOCK_EXCL); return (error); } /* * See if this is mount code calling to look at the overall quota limits * which are stored in the id == 0 user or group's dquot. * Since we may not have done a quotacheck by this point, just return * the dquot without attaching it to any hashtables, lists, etc, or even * taking a reference. * The caller must dqdestroy this once done. */ if (flags & XFS_QMOPT_DQSUSER) { ASSERT(id == 0); ASSERT(! ip); goto dqret; } /* * Dquot lock comes after hashlock in the lock ordering */ if (ip) { xfs_ilock(ip, XFS_ILOCK_EXCL); if (! XFS_IS_DQTYPE_ON(mp, type)) { /* inode stays locked on return */ xfs_qm_dqdestroy(dqp); return XFS_ERROR(ESRCH); } /* * A dquot could be attached to this inode by now, since * we had dropped the ilock. */ if (type == XFS_DQ_USER) { if (ip->i_udquot) { xfs_qm_dqdestroy(dqp); dqp = ip->i_udquot; xfs_dqlock(dqp); goto dqret; } } else { if (ip->i_gdquot) { xfs_qm_dqdestroy(dqp); dqp = ip->i_gdquot; xfs_dqlock(dqp); goto dqret; } } } /* * Hashlock comes after ilock in lock order */ XFS_DQ_HASH_LOCK(h); if (version != h->qh_version) { xfs_dquot_t *tmpdqp; /* * Now, see if somebody else put the dquot in the * hashtable before us. This can happen because we didn't * keep the hashchain lock. We don't have to worry about * lock order between the two dquots here since dqp isn't * on any findable lists yet. */ if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) { /* * Duplicate found. Just throw away the new dquot * and start over. */ xfs_qm_dqput(tmpdqp); XFS_DQ_HASH_UNLOCK(h); xfs_qm_dqdestroy(dqp); XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); goto again; } } /* * Put the dquot at the beginning of the hash-chain and mp's list * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. */ ASSERT(XFS_DQ_IS_HASH_LOCKED(h)); dqp->q_hash = h; XQM_HASHLIST_INSERT(h, dqp); /* * Attach this dquot to this filesystem's list of all dquots, * kept inside the mount structure in m_quotainfo field */ xfs_qm_mplist_lock(mp); /* * We return a locked dquot to the caller, with a reference taken */ xfs_dqlock(dqp); dqp->q_nrefs = 1; XQM_MPLIST_INSERT(&(XFS_QI_MPL_LIST(mp)), dqp); xfs_qm_mplist_unlock(mp); XFS_DQ_HASH_UNLOCK(h); dqret: ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip)); xfs_dqtrace_entry(dqp, "DQGET DONE"); *O_dqpp = dqp; return (0); }
/* * Lookup a dquot in the incore dquot hashtable. We keep two separate * hashtables for user and group dquots; and, these are global tables * inside the XQM, not per-filesystem tables. * The hash chain must be locked by caller, and it is left locked * on return. Returning dquot is locked. */ STATIC int xfs_qm_dqlookup( xfs_mount_t *mp, xfs_dqid_t id, xfs_dqhash_t *qh, xfs_dquot_t **O_dqpp) { xfs_dquot_t *dqp; uint flist_locked; xfs_dquot_t *d; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); flist_locked = B_FALSE; /* * Traverse the hashchain looking for a match */ for (dqp = qh->qh_next; dqp != NULL; dqp = dqp->HL_NEXT) { /* * We already have the hashlock. We don't need the * dqlock to look at the id field of the dquot, since the * id can't be modified without the hashlock anyway. */ if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) { xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP"); /* * All in core dquots must be on the dqlist of mp */ ASSERT(dqp->MPL_PREVP != NULL); xfs_dqlock(dqp); if (dqp->q_nrefs == 0) { ASSERT (XFS_DQ_IS_ON_FREELIST(dqp)); if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT"); /* * We may have raced with dqreclaim_one() * (and lost). So, flag that we don't * want the dquot to be reclaimed. */ dqp->dq_flags |= XFS_DQ_WANT; xfs_dqunlock(dqp); xfs_qm_freelist_lock(xfs_Gqm); xfs_dqlock(dqp); dqp->dq_flags &= ~(XFS_DQ_WANT); } flist_locked = B_TRUE; } /* * id couldn't have changed; we had the hashlock all * along */ ASSERT(be32_to_cpu(dqp->q_core.d_id) == id); if (flist_locked) { if (dqp->q_nrefs != 0) { xfs_qm_freelist_unlock(xfs_Gqm); flist_locked = B_FALSE; } else { /* * take it off the freelist */ xfs_dqtrace_entry(dqp, "DQLOOKUP: TAKEOFF FL"); XQM_FREELIST_REMOVE(dqp); /* xfs_qm_freelist_print(&(xfs_Gqm-> qm_dqfreelist), "after removal"); */ } } /* * grab a reference */ XFS_DQHOLD(dqp); if (flist_locked) xfs_qm_freelist_unlock(xfs_Gqm); /* * move the dquot to the front of the hashchain */ ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); if (dqp->HL_PREVP != &qh->qh_next) { xfs_dqtrace_entry(dqp, "DQLOOKUP: HASH MOVETOFRONT"); if ((d = dqp->HL_NEXT)) d->HL_PREVP = dqp->HL_PREVP; *(dqp->HL_PREVP) = d; d = qh->qh_next; d->HL_PREVP = &dqp->HL_NEXT; dqp->HL_NEXT = d; dqp->HL_PREVP = &qh->qh_next; qh->qh_next = dqp; } xfs_dqtrace_entry(dqp, "LOOKUP END"); *O_dqpp = dqp; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); return (0); } } *O_dqpp = NULL; ASSERT(XFS_DQ_IS_HASH_LOCKED(qh)); return (1); }
/* ARGSUSED */ int xfs_qm_dqpurge( xfs_dquot_t *dqp, uint flags) { xfs_dqhash_t *thishash; xfs_mount_t *mp; mp = dqp->q_mount; ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp)); ASSERT(XFS_DQ_IS_HASH_LOCKED(dqp->q_hash)); xfs_dqlock(dqp); /* * We really can't afford to purge a dquot that is * referenced, because these are hard refs. * It shouldn't happen in general because we went thru _all_ inodes in * dqrele_all_inodes before calling this and didn't let the mountlock go. * However it is possible that we have dquots with temporary * references that are not attached to an inode. e.g. see xfs_setattr(). */ if (dqp->q_nrefs != 0) { xfs_dqunlock(dqp); XFS_DQ_HASH_UNLOCK(dqp->q_hash); return (1); } ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); /* * If we're turning off quotas, we have to make sure that, for * example, we don't delete quota disk blocks while dquots are * in the process of getting written to those disk blocks. * This dquot might well be on AIL, and we can't leave it there * if we're turning off quotas. Basically, we need this flush * lock, and are willing to block on it. */ if (! xfs_qm_dqflock_nowait(dqp)) { /* * Block on the flush lock after nudging dquot buffer, * if it is incore. */ xfs_qm_dqflock_pushbuf_wait(dqp); } /* * XXXIf we're turning this type of quotas off, we don't care * about the dirty metadata sitting in this dquot. OTOH, if * we're unmounting, we do care, so we flush it and wait. */ if (XFS_DQ_IS_DIRTY(dqp)) { xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY"); /* dqflush unlocks dqflock */ /* * Given that dqpurge is a very rare occurrence, it is OK * that we're holding the hashlist and mplist locks * across the disk write. But, ... XXXsup * * We don't care about getting disk errors here. We need * to purge this dquot anyway, so we go ahead regardless. */ (void) xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC); xfs_dqflock(dqp); } ASSERT(dqp->q_pincount == 0); ASSERT(XFS_FORCED_SHUTDOWN(mp) || !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); thishash = dqp->q_hash; XQM_HASHLIST_REMOVE(thishash, dqp); XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(mp)), dqp); /* * XXX Move this to the front of the freelist, if we can get the * freelist lock. */ ASSERT(XFS_DQ_IS_ON_FREELIST(dqp)); dqp->q_mount = NULL; dqp->q_hash = NULL; dqp->dq_flags = XFS_DQ_INACTIVE; memset(&dqp->q_core, 0, sizeof(dqp->q_core)); xfs_dqfunlock(dqp); xfs_dqunlock(dqp); XFS_DQ_HASH_UNLOCK(thishash); return (0); }
/* * Release a reference to the dquot (decrement ref-count) * and unlock it. If there is a group quota attached to this * dquot, carefully release that too without tripping over * deadlocks'n'stuff. */ void xfs_qm_dqput( xfs_dquot_t *dqp) { xfs_dquot_t *gdqp; ASSERT(dqp->q_nrefs > 0); ASSERT(XFS_DQ_IS_LOCKED(dqp)); xfs_dqtrace_entry(dqp, "DQPUT"); if (dqp->q_nrefs != 1) { dqp->q_nrefs--; xfs_dqunlock(dqp); return; } /* * drop the dqlock and acquire the freelist and dqlock * in the right order; but try to get it out-of-order first */ if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) { xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT"); xfs_dqunlock(dqp); xfs_qm_freelist_lock(xfs_Gqm); xfs_dqlock(dqp); } while (1) { gdqp = NULL; /* We can't depend on nrefs being == 1 here */ if (--dqp->q_nrefs == 0) { xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST"); /* * insert at end of the freelist. */ XQM_FREELIST_INSERT(&(xfs_Gqm->qm_dqfreelist), dqp); /* * If we just added a udquot to the freelist, then * we want to release the gdquot reference that * it (probably) has. Otherwise it'll keep the * gdquot from getting reclaimed. */ if ((gdqp = dqp->q_gdquot)) { /* * Avoid a recursive dqput call */ xfs_dqlock(gdqp); dqp->q_gdquot = NULL; } /* xfs_qm_freelist_print(&(xfs_Gqm->qm_dqfreelist), "@@@@@++ Free list (after append) @@@@@+"); */ } xfs_dqunlock(dqp); /* * If we had a group quota inside the user quota as a hint, * release it now. */ if (! gdqp) break; dqp = gdqp; } xfs_qm_freelist_unlock(xfs_Gqm); }
/* * Adjust quota limits, and start/stop timers accordingly. */ int xfs_qm_scall_setqlim( struct xfs_mount *mp, xfs_dqid_t id, uint type, struct qc_dqblk *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_disk_dquot *ddq; struct xfs_dquot *dqp; struct xfs_trans *tp; int error; xfs_qcnt_t hard, soft; if (newlim->d_fieldmask & ~XFS_QC_MASK) return EINVAL; if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) return 0; /* * We don't want to race with a quotaoff so take the quotaoff lock. * We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening. */ mutex_lock(&q->qi_quotaofflock); /* * Get the dquot (locked) before we start, as we need to do a * transaction to allocate it if it doesn't exist. Once we have the * dquot, unlock it so we can start the next transaction safely. We hold * a reference to the dquot, so it's safe to do this unlock/lock without * it being reclaimed in the mean time. */ error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp); if (error) { ASSERT(error != ENOENT); goto out_unlock; } xfs_dqunlock(dqp); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0); if (error) { xfs_trans_cancel(tp, 0); goto out_rele; } xfs_dqlock(dqp); xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & QC_SPC_HARD) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); xfs_dquot_set_prealloc_limits(dqp); if (id == 0) { q->qi_bhardlimit = hard; q->qi_bsoftlimit = soft; } } else { xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); } hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_rtbhardlimit = hard; q->qi_rtbsoftlimit = soft; } } else { xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); } hard = (newlim->d_fieldmask & QC_INO_HARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); soft = (newlim->d_fieldmask & QC_INO_SOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_ihardlimit = hard; q->qi_isoftlimit = soft; } } else { xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft); } /* * Update warnings counter(s) if requested */ if (newlim->d_fieldmask & QC_SPC_WARNS) ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); if (newlim->d_fieldmask & QC_INO_WARNS) ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); if (newlim->d_fieldmask & QC_RT_SPC_WARNS) ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above), and * for warnings. */ if (newlim->d_fieldmask & QC_SPC_TIMER) { q->qi_btimelimit = newlim->d_spc_timer; ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); } if (newlim->d_fieldmask & QC_INO_TIMER) { q->qi_itimelimit = newlim->d_ino_timer; ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); } if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { q->qi_rtbtimelimit = newlim->d_rt_spc_timer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); } if (newlim->d_fieldmask & QC_SPC_WARNS) q->qi_bwarnlimit = newlim->d_spc_warns; if (newlim->d_fieldmask & QC_INO_WARNS) q->qi_iwarnlimit = newlim->d_ino_warns; if (newlim->d_fieldmask & QC_RT_SPC_WARNS) q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; } else { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp, 0); out_rele: xfs_qm_dqrele(dqp); out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; }