示例#1
0
/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 * Just call bunpin() on the buffer to do this.
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
 */
void
xfs_buf_item_unpin(
	xfs_buf_log_item_t	*bip,
	int			stale)
{
	xfs_mount_t	*mp;
	xfs_buf_t	*bp;
	int		freed;
	SPLDECL(s);

	bp = bip->bli_buf;
	ASSERT(bp != NULL);
	ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
	ASSERT(atomic_read(&bip->bli_refcount) > 0);
	xfs_buf_item_trace("UNPIN", bip);
	xfs_buftrace("XFS_UNPIN", bp);

	freed = atomic_dec_and_test(&bip->bli_refcount);
	mp = bip->bli_item.li_mountp;
	xfs_bunpin(bp);
	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
		ASSERT(XFS_BUF_ISSTALE(bp));
		ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
		xfs_buf_item_trace("UNPIN STALE", bip);
		xfs_buftrace("XFS_UNPIN STALE", bp);
		/*
		 * If we get called here because of an IO error, we may
		 * or may not have the item on the AIL. xfs_trans_delete_ail()
		 * will take care of that situation.
		 * xfs_trans_delete_ail() drops the AIL lock.
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
			xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
			XFS_BUF_SET_FSPRIVATE(bp, NULL);
			XFS_BUF_CLR_IODONE_FUNC(bp);
		} else {
			AIL_LOCK(mp,s);
			xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s);
			xfs_buf_item_relse(bp);
			ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
		}
		xfs_buf_relse(bp);
	}
示例#2
0
/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
 *
 * If the remove flag is set we are called from uncommit in the
 * forced-shutdown path.  If that is true and the reference count on
 * the log item is going to drop to zero we need to free the item's
 * descriptor in the transaction.
 */
STATIC void
xfs_buf_item_unpin(
	struct xfs_log_item	*lip,
	int			remove)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	xfs_buf_t	*bp = bip->bli_buf;
	struct xfs_ail	*ailp = lip->li_ailp;
	int		stale = bip->bli_flags & XFS_BLI_STALE;
	int		freed;

	ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
	ASSERT(atomic_read(&bip->bli_refcount) > 0);

	trace_xfs_buf_item_unpin(bip);

	freed = atomic_dec_and_test(&bip->bli_refcount);

	if (atomic_dec_and_test(&bp->b_pin_count))
		wake_up_all(&bp->b_waiters);

	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
		ASSERT(XFS_BUF_ISSTALE(bp));
		ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);

		trace_xfs_buf_item_unpin_stale(bip);

		if (remove) {
			/*
			 * If we are in a transaction context, we have to
			 * remove the log item from the transaction as we are
			 * about to release our reference to the buffer.  If we
			 * don't, the unlock that occurs later in
			 * xfs_trans_uncommit() will try to reference the
			 * buffer which we no longer have a hold on.
			 */
			if (lip->li_desc)
				xfs_trans_del_item(lip);

			/*
			 * Since the transaction no longer refers to the buffer,
			 * the buffer should no longer refer to the transaction.
			 */
			XFS_BUF_SET_FSPRIVATE2(bp, NULL);
		}

		/*
		 * If we get called here because of an IO error, we may
		 * or may not have the item on the AIL. xfs_trans_ail_delete()
		 * will take care of that situation.
		 * xfs_trans_ail_delete() drops the AIL lock.
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
			xfs_buf_do_callbacks(bp);
			XFS_BUF_SET_FSPRIVATE(bp, NULL);
			XFS_BUF_CLR_IODONE_FUNC(bp);
		} else {
			spin_lock(&ailp->xa_lock);
			xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
			xfs_buf_item_relse(bp);
			ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
		}
		xfs_buf_relse(bp);
	}
示例#3
0
/*
 * This is the inode flushing I/O completion routine.  It is called
 * from interrupt level when the buffer containing the inode is
 * flushed to disk.  It is responsible for removing the inode item
 * from the AIL if it has not been re-logged, and unlocking the inode's
 * flush lock.
 *
 * To reduce AIL lock traffic as much as possible, we scan the buffer log item
 * list for other inodes that will run this function. We remove them from the
 * buffer list so we can process all the inode IO completions in one AIL lock
 * traversal.
 */
void
xfs_iflush_done(
    struct xfs_buf		*bp,
    struct xfs_log_item	*lip)
{
    struct xfs_inode_log_item *iip;
    struct xfs_log_item	*blip;
    struct xfs_log_item	*next;
    struct xfs_log_item	*prev;
    struct xfs_ail		*ailp = lip->li_ailp;
    int			need_ail = 0;

    /*
     * Scan the buffer IO completions for other inodes being completed and
     * attach them to the current inode log item.
     */
    blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
    prev = NULL;
    while (blip != NULL) {
        if (lip->li_cb != xfs_iflush_done) {
            prev = blip;
            blip = blip->li_bio_list;
            continue;
        }

        /* remove from list */
        next = blip->li_bio_list;
        if (!prev) {
            XFS_BUF_SET_FSPRIVATE(bp, next);
        } else {
            prev->li_bio_list = next;
        }

        /* add to current list */
        blip->li_bio_list = lip->li_bio_list;
        lip->li_bio_list = blip;

        /*
         * while we have the item, do the unlocked check for needing
         * the AIL lock.
         */
        iip = INODE_ITEM(blip);
        if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
            need_ail++;

        blip = next;
    }

    /* make sure we capture the state of the initial inode. */
    iip = INODE_ITEM(lip);
    if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
        need_ail++;

    /*
     * We only want to pull the item from the AIL if it is
     * actually there and its location in the log has not
     * changed since we started the flush.  Thus, we only bother
     * if the ili_logged flag is set and the inode's lsn has not
     * changed.  First we check the lsn outside
     * the lock since it's cheaper, and then we recheck while
     * holding the lock before removing the inode from the AIL.
     */
    if (need_ail) {
        struct xfs_log_item *log_items[need_ail];
        int i = 0;
        spin_lock(&ailp->xa_lock);
        for (blip = lip; blip; blip = blip->li_bio_list) {
            iip = INODE_ITEM(blip);
            if (iip->ili_logged &&
                    blip->li_lsn == iip->ili_flush_lsn) {
                log_items[i++] = blip;
            }
            ASSERT(i <= need_ail);
        }
        /* xfs_trans_ail_delete_bulk() drops the AIL lock. */
        xfs_trans_ail_delete_bulk(ailp, log_items, i);
    }


    /*
     * clean up and unlock the flush lock now we are done. We can clear the
     * ili_last_fields bits now that we know that the data corresponding to
     * them is safely on disk.
     */
    for (blip = lip; blip; blip = next) {
        next = blip->li_bio_list;
        blip->li_bio_list = NULL;

        iip = INODE_ITEM(blip);
        iip->ili_logged = 0;
        iip->ili_last_fields = 0;
        xfs_ifunlock(iip->ili_inode);
    }
}