コード例 #1
0
/*
 * Give the buffer a little push if it is incore and
 * wait on the flush lock.
 */
void
xfs_dqflock_pushbuf_wait(
	xfs_dquot_t	*dqp)
{
	xfs_mount_t	*mp = dqp->q_mount;
	xfs_buf_t	*bp;

	/*
	 * Check to see if the dquot has been flushed delayed
	 * write.  If so, grab its buffer and send it
	 * out immediately.  We'll be able to acquire
	 * the flush lock when the I/O completes.
	 */
	bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno,
			mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
	if (!bp)
		goto out_lock;

	if (XFS_BUF_ISDELAYWRITE(bp)) {
		if (xfs_buf_ispinned(bp))
			xfs_log_force(mp, 0);
		xfs_buf_delwri_promote(bp);
		wake_up_process(bp->b_target->bt_task);
	}
	xfs_buf_relse(bp);
out_lock:
	xfs_dqflock(dqp);
}
コード例 #2
0
ファイル: xfs_inode_item.c プロジェクト: GerardGarcia/linux
/*
 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
 * failed to get the inode flush lock but did get the inode locked SHARED.
 * Here we're trying to see if the inode buffer is incore, and if so whether it's
 * marked delayed write. If that's the case, we'll promote it and that will
 * allow the caller to write the buffer by triggering the xfsbufd to run.
 */
STATIC bool
xfs_inode_item_pushbuf(
	struct xfs_log_item	*lip)
{
	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
	struct xfs_inode	*ip = iip->ili_inode;
	struct xfs_buf		*bp;
	bool			ret = true;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));

	/*
	 * If a flush is not in progress anymore, chances are that the
	 * inode was taken off the AIL. So, just get out.
	 */
	if (completion_done(&ip->i_flush) ||
	    !(lip->li_flags & XFS_LI_IN_AIL)) {
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return true;
	}

	bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
			iip->ili_format.ilf_len, XBF_TRYLOCK);

	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	if (!bp)
		return true;
	if (XFS_BUF_ISDELAYWRITE(bp))
		xfs_buf_delwri_promote(bp);
	if (xfs_buf_ispinned(bp))
		ret = false;
	xfs_buf_relse(bp);
	return ret;
}
/*
 * Give the buffer a little push if it is incore and
 * wait on the flush lock.
 */
void
xfs_qm_dqflock_pushbuf_wait(
	xfs_dquot_t	*dqp)
{
	xfs_buf_t	*bp;

	/*
	 * Check to see if the dquot has been flushed delayed
	 * write.  If so, grab its buffer and send it
	 * out immediately.  We'll be able to acquire
	 * the flush lock when the I/O completes.
	 */
	bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
		    XFS_QI_DQCHUNKLEN(dqp->q_mount),
		    XFS_INCORE_TRYLOCK);
	if (bp != NULL) {
		if (XFS_BUF_ISDELAYWRITE(bp)) {
			if (XFS_BUF_ISPINNED(bp)) {
				xfs_log_force(dqp->q_mount,
					      (xfs_lsn_t)0,
					      XFS_LOG_FORCE);
			}
			xfs_bawrite(dqp->q_mount, bp);
		} else {
			xfs_buf_relse(bp);
		}
	}
	xfs_dqflock(dqp);
}
コード例 #4
0
ファイル: xfs_dquot_item.c プロジェクト: 3sOx/asuswrt-merlin
/*
 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
 * the dquot is locked by us, but the flush lock isn't. So, here we are
 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
 * If so, we want to push it out to help us take this item off the AIL as soon
 * as possible.
 *
 * We must not be holding the AIL lock at this point. Calling incore() to
 * search the buffer cache can be a time consuming thing, and AIL lock is a
 * spinlock.
 */
STATIC void
xfs_qm_dquot_logitem_pushbuf(
	struct xfs_log_item	*lip)
{
	struct xfs_dq_logitem	*qlip = DQUOT_ITEM(lip);
	struct xfs_dquot	*dqp = qlip->qli_dquot;
	struct xfs_buf		*bp;

	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	/*
	 * If flushlock isn't locked anymore, chances are that the
	 * inode flush completed and the inode was taken off the AIL.
	 * So, just get out.
	 */
	if (completion_done(&dqp->q_flush) ||
	    !(lip->li_flags & XFS_LI_IN_AIL)) {
		xfs_dqunlock(dqp);
		return;
	}

	bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
			dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
	xfs_dqunlock(dqp);
	if (!bp)
		return;
	if (XFS_BUF_ISDELAYWRITE(bp))
		xfs_buf_delwri_promote(bp);
	xfs_buf_relse(bp);
}
コード例 #5
0
STATIC bool
xfs_inode_item_pushbuf(
	struct xfs_log_item	*lip)
{
	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
	struct xfs_inode	*ip = iip->ili_inode;
	struct xfs_buf		*bp;
	bool			ret = true;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));

	/*
                                                               
                                                  
  */
	if (!xfs_isiflocked(ip) ||
	    !(lip->li_flags & XFS_LI_IN_AIL)) {
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return true;
	}

	bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
			iip->ili_format.ilf_len, XBF_TRYLOCK);

	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	if (!bp)
		return true;
	if (XFS_BUF_ISDELAYWRITE(bp))
		xfs_buf_delwri_promote(bp);
	if (xfs_buf_ispinned(bp))
		ret = false;
	xfs_buf_relse(bp);
	return ret;
}
コード例 #6
0
ファイル: xfs_trans_buf.c プロジェクト: AhmadTux/freebsd
/*
 * Get and lock the buffer for the caller if it is not already
 * locked within the given transaction.  If it is already locked
 * within the transaction, just increment its lock recursion count
 * and return a pointer to it.
 *
 * Use the fast path function xfs_trans_buf_item_match() or the buffer
 * cache routine incore_match() to find the buffer
 * if it is already owned by this transaction.
 *
 * If we don't already own the buffer, use get_buf() to get it.
 * If it doesn't yet have an associated xfs_buf_log_item structure,
 * then allocate one and add the item to this transaction.
 *
 * If the transaction pointer is NULL, make this just a normal
 * get_buf() call.
 */
xfs_buf_t *
xfs_trans_get_buf(xfs_trans_t	*tp,
		  xfs_buftarg_t	*target_dev,
		  xfs_daddr_t	blkno,
		  int		len,
		  uint		flags)
{
	xfs_buf_t		*bp;
	xfs_buf_log_item_t	*bip;

	if (flags == 0)
		flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;

	/*
	 * Default to a normal get_buf() call if the tp is NULL.
	 */
	if (tp == NULL) {
		bp = xfs_buf_get_flags(target_dev, blkno, len,
							flags | BUF_BUSY);
		return(bp);
	}

	/*
	 * If we find the buffer in the cache with this transaction
	 * pointer in its b_fsprivate2 field, then we know we already
	 * have it locked.  In this case we just increment the lock
	 * recursion count and return the buffer to the caller.
	 */
	if (tp->t_items.lic_next == NULL) {
		bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
	} else {
		bp  = xfs_trans_buf_item_match_all(tp, target_dev, blkno, len);
	}
	if (bp != NULL) {
		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
		if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
			xfs_buftrace("TRANS GET RECUR SHUT", bp);
			XFS_BUF_SUPER_STALE(bp);
		}
		/*
		 * If the buffer is stale then it was binval'ed
		 * since last read.  This doesn't matter since the
		 * caller isn't allowed to use the data anyway.
		 */
		else if (XFS_BUF_ISSTALE(bp)) {
			xfs_buftrace("TRANS GET RECUR STALE", bp);
			ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
		}
		ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
		bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
		ASSERT(bip != NULL);
		ASSERT(atomic_read(&bip->bli_refcount) > 0);
		bip->bli_recur++;
		xfs_buftrace("TRANS GET RECUR", bp);
		xfs_buf_item_trace("GET RECUR", bip);
		return (bp);
	}
コード例 #7
0
/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 * Just call bunpin() on the buffer to do this.
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
 */
void
xfs_buf_item_unpin(
	xfs_buf_log_item_t	*bip,
	int			stale)
{
	xfs_mount_t	*mp;
	xfs_buf_t	*bp;
	int		freed;
	SPLDECL(s);

	bp = bip->bli_buf;
	ASSERT(bp != NULL);
	ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
	ASSERT(atomic_read(&bip->bli_refcount) > 0);
	xfs_buf_item_trace("UNPIN", bip);
	xfs_buftrace("XFS_UNPIN", bp);

	freed = atomic_dec_and_test(&bip->bli_refcount);
	mp = bip->bli_item.li_mountp;
	xfs_bunpin(bp);
	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
		ASSERT(XFS_BUF_ISSTALE(bp));
		ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
		xfs_buf_item_trace("UNPIN STALE", bip);
		xfs_buftrace("XFS_UNPIN STALE", bp);
		/*
		 * If we get called here because of an IO error, we may
		 * or may not have the item on the AIL. xfs_trans_delete_ail()
		 * will take care of that situation.
		 * xfs_trans_delete_ail() drops the AIL lock.
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
			xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
			XFS_BUF_SET_FSPRIVATE(bp, NULL);
			XFS_BUF_CLR_IODONE_FUNC(bp);
		} else {
			AIL_LOCK(mp,s);
			xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s);
			xfs_buf_item_relse(bp);
			ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
		}
		xfs_buf_relse(bp);
	}
コード例 #8
0
ファイル: xfs_dquot_item.c プロジェクト: maraz/linux-2.6
/*
 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
 * the dquot is locked by us, but the flush lock isn't. So, here we are
 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
 * If so, we want to push it out to help us take this item off the AIL as soon
 * as possible.
 *
 * We must not be holding the AIL lock at this point. Calling incore() to
 * search the buffer cache can be a time consuming thing, and AIL lock is a
 * spinlock.
 */
STATIC void
xfs_qm_dquot_logitem_pushbuf(
	xfs_dq_logitem_t    *qip)
{
	xfs_dquot_t	*dqp;
	xfs_mount_t	*mp;
	xfs_buf_t	*bp;
	uint		dopush;

	dqp = qip->qli_dquot;
	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	/*
	 * The qli_pushbuf_flag keeps others from
	 * trying to duplicate our effort.
	 */
	ASSERT(qip->qli_pushbuf_flag != 0);
	ASSERT(qip->qli_push_owner == current_pid());

	/*
	 * If flushlock isn't locked anymore, chances are that the
	 * inode flush completed and the inode was taken off the AIL.
	 * So, just get out.
	 */
	if (!issemalocked(&(dqp->q_flock))  ||
	    ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
		qip->qli_pushbuf_flag = 0;
		xfs_dqunlock(dqp);
		return;
	}
	mp = dqp->q_mount;
	bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
		    XFS_QI_DQCHUNKLEN(mp),
		    XFS_INCORE_TRYLOCK);
	if (bp != NULL) {
		if (XFS_BUF_ISDELAYWRITE(bp)) {
			dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
				  issemalocked(&(dqp->q_flock)));
			qip->qli_pushbuf_flag = 0;
			xfs_dqunlock(dqp);

			if (XFS_BUF_ISPINNED(bp)) {
				xfs_log_force(mp, (xfs_lsn_t)0,
					      XFS_LOG_FORCE);
			}
			if (dopush) {
				int	error;
#ifdef XFSRACEDEBUG
				delay_for_intr();
				delay(300);
#endif
				error = xfs_bawrite(mp, bp);
				if (error)
					xfs_fs_cmn_err(CE_WARN, mp,
	"xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
							error, qip, bp);
			} else {
				xfs_buf_relse(bp);
			}
		} else {
			qip->qli_pushbuf_flag = 0;
			xfs_dqunlock(dqp);
			xfs_buf_relse(bp);
		}
		return;
	}

	qip->qli_pushbuf_flag = 0;
	xfs_dqunlock(dqp);
}
コード例 #9
0
ファイル: xfs_buf_item.c プロジェクト: ANFS/ANFS-kernel
/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
 *
 * If the remove flag is set we are called from uncommit in the
 * forced-shutdown path.  If that is true and the reference count on
 * the log item is going to drop to zero we need to free the item's
 * descriptor in the transaction.
 */
STATIC void
xfs_buf_item_unpin(
	struct xfs_log_item	*lip,
	int			remove)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	xfs_buf_t	*bp = bip->bli_buf;
	struct xfs_ail	*ailp = lip->li_ailp;
	int		stale = bip->bli_flags & XFS_BLI_STALE;
	int		freed;

	ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
	ASSERT(atomic_read(&bip->bli_refcount) > 0);

	trace_xfs_buf_item_unpin(bip);

	freed = atomic_dec_and_test(&bip->bli_refcount);

	if (atomic_dec_and_test(&bp->b_pin_count))
		wake_up_all(&bp->b_waiters);

	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
		ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
		ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
		ASSERT(XFS_BUF_ISSTALE(bp));
		ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);

		trace_xfs_buf_item_unpin_stale(bip);

		if (remove) {
			/*
			 * If we are in a transaction context, we have to
			 * remove the log item from the transaction as we are
			 * about to release our reference to the buffer.  If we
			 * don't, the unlock that occurs later in
			 * xfs_trans_uncommit() will try to reference the
			 * buffer which we no longer have a hold on.
			 */
			if (lip->li_desc)
				xfs_trans_del_item(lip);

			/*
			 * Since the transaction no longer refers to the buffer,
			 * the buffer should no longer refer to the transaction.
			 */
			XFS_BUF_SET_FSPRIVATE2(bp, NULL);
		}

		/*
		 * If we get called here because of an IO error, we may
		 * or may not have the item on the AIL. xfs_trans_ail_delete()
		 * will take care of that situation.
		 * xfs_trans_ail_delete() drops the AIL lock.
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
			xfs_buf_do_callbacks(bp);
			XFS_BUF_SET_FSPRIVATE(bp, NULL);
			XFS_BUF_CLR_IODONE_FUNC(bp);
		} else {
			spin_lock(&ailp->xa_lock);
			xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
			xfs_buf_item_relse(bp);
			ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
		}
		xfs_buf_relse(bp);
	}
コード例 #10
0
/*
 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
 * failed to get the inode flush lock but did get the inode locked SHARED.
 * Here we're trying to see if the inode buffer is incore, and if so whether it's
 * marked delayed write. If that's the case, we'll initiate a bawrite on that
 * buffer to expedite the process.
 *
 * We aren't holding the AIL lock (or the flush lock) when this gets called,
 * so it is inherently race-y.
 */
STATIC void
xfs_inode_item_pushbuf(
	xfs_inode_log_item_t	*iip)
{
	xfs_inode_t	*ip;
	xfs_mount_t	*mp;
	xfs_buf_t	*bp;
	uint		dopush;

	ip = iip->ili_inode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));

	/*
	 * The ili_pushbuf_flag keeps others from
	 * trying to duplicate our effort.
	 */
	ASSERT(iip->ili_pushbuf_flag != 0);
	ASSERT(iip->ili_push_owner == current_pid());

	/*
	 * If a flush is not in progress anymore, chances are that the
	 * inode was taken off the AIL. So, just get out.
	 */
	if (completion_done(&ip->i_flush) ||
	    ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
		iip->ili_pushbuf_flag = 0;
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return;
	}

	mp = ip->i_mount;
	bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
		    iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK);

	if (bp != NULL) {
		if (XFS_BUF_ISDELAYWRITE(bp)) {
			/*
			 * We were racing with iflush because we don't hold
			 * the AIL lock or the flush lock. However, at this point,
			 * we have the buffer, and we know that it's dirty.
			 * So, it's possible that iflush raced with us, and
			 * this item is already taken off the AIL.
			 * If not, we can flush it async.
			 */
			dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
				  !completion_done(&ip->i_flush));
			iip->ili_pushbuf_flag = 0;
			xfs_iunlock(ip, XFS_ILOCK_SHARED);
			xfs_buftrace("INODE ITEM PUSH", bp);
			if (XFS_BUF_ISPINNED(bp)) {
				xfs_log_force(mp, (xfs_lsn_t)0,
					      XFS_LOG_FORCE);
			}
			if (dopush) {
				int	error;
				error = xfs_bawrite(mp, bp);
				if (error)
					xfs_fs_cmn_err(CE_WARN, mp,
		"xfs_inode_item_pushbuf: pushbuf error %d on iip %p, bp %p",
							error, iip, bp);
			} else {
				xfs_buf_relse(bp);
			}
		} else {
			iip->ili_pushbuf_flag = 0;
			xfs_iunlock(ip, XFS_ILOCK_SHARED);
			xfs_buf_relse(bp);
		}
		return;
	}
	/*
	 * We have to be careful about resetting pushbuf flag too early (above).
	 * Even though in theory we can do it as soon as we have the buflock,
	 * we don't want others to be doing work needlessly. They'll come to
	 * this function thinking that pushing the buffer is their
	 * responsibility only to find that the buffer is still locked by
	 * another doing the same thing
	 */
	iip->ili_pushbuf_flag = 0;
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	return;
}
コード例 #11
0
ファイル: xfs_inode_item.c プロジェクト: crossmeta/sgi
/*
 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
 * failed to get the inode flush lock but did get the inode locked SHARED.
 * Here we're trying to see if the inode buffer is incore, and if so whether it's
 * marked delayed write. If that's the case, we'll initiate a bawrite on that
 * buffer to expedite the process.
 *
 * We aren't holding the AIL_LOCK (or the flush lock) when this gets called,
 * so it is inherently race-y.
 */
STATIC void
xfs_inode_item_pushbuf(
	xfs_inode_log_item_t	*iip)
{
	xfs_inode_t	*ip;
	xfs_mount_t 	*mp;
	xfs_buf_t		*bp;
	uint		dopush;

	ip = iip->ili_inode;
	
	ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS));

	/*
	 * The ili_pushbuf_flag keeps others from
	 * trying to duplicate our effort.
	 */
	ASSERT(iip->ili_pushbuf_flag != 0);
	ASSERT(iip->ili_push_owner == get_thread_id());

	/*
	 * If flushlock isn't locked anymore, chances are that the
	 * inode flush completed and the inode was taken off the AIL.
	 * So, just get out.
	 */
	if ((valusema(&(ip->i_flock)) > 0)  ||
	    ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
		iip->ili_pushbuf_flag = 0;
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
	    	return;
	}
	
	mp = ip->i_mount;
	bp = xfs_incore(mp->m_ddev_targ, iip->ili_format.ilf_blkno,
		    iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK);

	if (bp != NULL) {
		if (XFS_BUF_ISDELAYWRITE(bp)) {
			/*
			 * We were racing with iflush because we don't hold
			 * the AIL_LOCK or the flush lock. However, at this point,
			 * we have the buffer, and we know that it's dirty.
			 * So, it's possible that iflush raced with us, and
			 * this item is already taken off the AIL.
			 * If not, we can flush it async.
			 */
			dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) && 
				  (valusema(&(ip->i_flock)) <= 0));
			iip->ili_pushbuf_flag = 0;
			xfs_iunlock(ip, XFS_ILOCK_SHARED);
			xfs_buftrace("INODE ITEM PUSH", bp);
			if (XFS_BUF_ISPINNED(bp)) {
				xfs_log_force(mp, (xfs_lsn_t)0,
					      XFS_LOG_FORCE);
			}
			if (dopush) {
				xfs_bawrite(mp, bp);
			} else {
				xfs_buf_relse(bp);
			}
		} else {
			iip->ili_pushbuf_flag = 0;
			xfs_iunlock(ip, XFS_ILOCK_SHARED); 
			xfs_buf_relse(bp);
		}
		return;
	}
	/*
	 * We have to be careful about resetting pushbuf flag too early (above).
	 * Eventhough in theory we can do it as soon as we have the buflock,
	 * we don't want others to be doing work needlessly. They'll come to
	 * this function thinking that pushing the buffer is there responsibility
	 * only to find that the buffer is still locked by another doing the
	 * same thing.XXX
	 */
	iip->ili_pushbuf_flag = 0;
	xfs_iunlock(ip, XFS_ILOCK_SHARED); 
	return;
}