/* * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that * the dquot is locked by us, but the flush lock isn't. So, here we are * going to see if the relevant dquot buffer is incore, waiting on DELWRI. * If so, we want to push it out to help us take this item off the AIL as soon * as possible. * * We must not be holding the AIL lock at this point. Calling incore() to * search the buffer cache can be a time consuming thing, and AIL lock is a * spinlock. */ STATIC void xfs_qm_dquot_logitem_pushbuf( xfs_dq_logitem_t *qip) { xfs_dquot_t *dqp; xfs_mount_t *mp; xfs_buf_t *bp; uint dopush; dqp = qip->qli_dquot; ASSERT(XFS_DQ_IS_LOCKED(dqp)); /* * The qli_pushbuf_flag keeps others from * trying to duplicate our effort. */ ASSERT(qip->qli_pushbuf_flag != 0); ASSERT(qip->qli_push_owner == current_pid()); /* * If flushlock isn't locked anymore, chances are that the * inode flush completed and the inode was taken off the AIL. * So, just get out. */ if (!issemalocked(&(dqp->q_flock)) || ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { qip->qli_pushbuf_flag = 0; xfs_dqunlock(dqp); return; } mp = dqp->q_mount; bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno, XFS_QI_DQCHUNKLEN(mp), XFS_INCORE_TRYLOCK); if (bp != NULL) { if (XFS_BUF_ISDELAYWRITE(bp)) { dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && issemalocked(&(dqp->q_flock))); qip->qli_pushbuf_flag = 0; xfs_dqunlock(dqp); if (XFS_BUF_ISPINNED(bp)) { xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); } if (dopush) { int error; #ifdef XFSRACEDEBUG delay_for_intr(); delay(300); #endif error = xfs_bawrite(mp, bp); if (error) xfs_fs_cmn_err(CE_WARN, mp, "xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p", error, qip, bp); } else { xfs_buf_relse(bp); } } else { qip->qli_pushbuf_flag = 0; xfs_dqunlock(dqp); xfs_buf_relse(bp); } return; } qip->qli_pushbuf_flag = 0; xfs_dqunlock(dqp); }
/* * xfs_trans_push_ail * * This routine is called to move the tail of the AIL * forward. It does this by trying to flush items in the AIL * whose lsns are below the given threshold_lsn. * * The routine returns the lsn of the tail of the log. */ xfs_lsn_t xfs_trans_push_ail( xfs_mount_t *mp, xfs_lsn_t threshold_lsn) { xfs_lsn_t lsn; xfs_log_item_t *lip; int gen; int restarts; int lock_result; int flush_log; SPLDECL(s); #define XFS_TRANS_PUSH_AIL_RESTARTS 10 AIL_LOCK(mp,s); lip = xfs_trans_first_ail(mp, &gen); if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) { /* * Just return if the AIL is empty. */ AIL_UNLOCK(mp, s); return (xfs_lsn_t)0; } XFS_STATS_INC(xs_push_ail); /* * While the item we are looking at is below the given threshold * try to flush it out. Make sure to limit the number of times * we allow xfs_trans_next_ail() to restart scanning from the * beginning of the list. We'd like not to stop until we've at least * tried to push on everything in the AIL with an LSN less than * the given threshold. However, we may give up before that if * we realize that we've been holding the AIL_LOCK for 'too long', * blocking interrupts. Currently, too long is < 500us roughly. */ flush_log = 0; restarts = 0; while (((restarts < XFS_TRANS_PUSH_AIL_RESTARTS) && (XFS_LSN_CMP(lip->li_lsn, threshold_lsn) < 0))) { /* * If we can lock the item without sleeping, unlock * the AIL lock and flush the item. Then re-grab the * AIL lock so we can look for the next item on the * AIL. Since we unlock the AIL while we flush the * item, the next routine may start over again at the * the beginning of the list if anything has changed. * That is what the generation count is for. * * If we can't lock the item, either its holder will flush * it or it is already being flushed or it is being relogged. * In any of these case it is being taken care of and we * can just skip to the next item in the list. */ lock_result = IOP_TRYLOCK(lip); switch (lock_result) { case XFS_ITEM_SUCCESS: AIL_UNLOCK(mp, s); XFS_STATS_INC(xs_push_ail_success); IOP_PUSH(lip); AIL_LOCK(mp,s); break; case XFS_ITEM_PUSHBUF: AIL_UNLOCK(mp, s); XFS_STATS_INC(xs_push_ail_pushbuf); #ifdef XFSRACEDEBUG delay_for_intr(); delay(300); #endif ASSERT(lip->li_ops->iop_pushbuf); ASSERT(lip); IOP_PUSHBUF(lip); AIL_LOCK(mp,s); break; case XFS_ITEM_PINNED: XFS_STATS_INC(xs_push_ail_pinned); flush_log = 1; break; case XFS_ITEM_LOCKED: XFS_STATS_INC(xs_push_ail_locked); break; case XFS_ITEM_FLUSHING: XFS_STATS_INC(xs_push_ail_flushing); break; default: ASSERT(0); break; } lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); if (lip == NULL) { break; } if (XFS_FORCED_SHUTDOWN(mp)) { /* * Just return if we shut down during the last try. */ AIL_UNLOCK(mp, s); return (xfs_lsn_t)0; } } if (flush_log) { /* * If something we need to push out was pinned, then * push out the log so it will become unpinned and * move forward in the AIL. */ AIL_UNLOCK(mp, s); XFS_STATS_INC(xs_push_ail_flush); xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); AIL_LOCK(mp, s); } lip = xfs_ail_min(&(mp->m_ail)); if (lip == NULL) { lsn = (xfs_lsn_t)0; } else { lsn = lip->li_lsn; } AIL_UNLOCK(mp, s); return lsn; } /* xfs_trans_push_ail */