/*
 * Insert the given log item into the AIL.
 * We almost always insert at the end of the list, so on inserts
 * we search from the end of the list to find where the
 * new item belongs.
 */
STATIC void
xfs_ail_insert(
	xfs_ail_entry_t	*base,
	xfs_log_item_t	*lip)
/* ARGSUSED */
{
	xfs_log_item_t	*next_lip;

	/*
	 * If the list is empty, just insert the item.
	 */
	if (base->ail_back == (xfs_log_item_t*)base) {
		base->ail_forw = lip;
		base->ail_back = lip;
		lip->li_ail.ail_forw = (xfs_log_item_t*)base;
		lip->li_ail.ail_back = (xfs_log_item_t*)base;
		return;
	}

	next_lip = base->ail_back;
	while ((next_lip != (xfs_log_item_t*)base) &&
	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) > 0)) {
		next_lip = next_lip->li_ail.ail_back;
	}
	ASSERT((next_lip == (xfs_log_item_t*)base) ||
	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
	lip->li_ail.ail_forw = next_lip->li_ail.ail_forw;
	lip->li_ail.ail_back = next_lip;
	next_lip->li_ail.ail_forw = lip;
	lip->li_ail.ail_forw->li_ail.ail_back = lip;

	xfs_ail_check(base);
	return;
}
Beispiel #2
0
/*
 * xfs_trans_push_ail
 *
 * This routine is called to move the tail of the AIL forward.  It does this by
 * trying to flush items in the AIL whose lsns are below the given
 * threshold_lsn.
 *
 * the push is run asynchronously in a separate thread, so we return the tail
 * of the log right now instead of the tail after the push. This means we will
 * either continue right away, or we will sleep waiting on the async thread to
 * do it's work.
 *
 * We do this unlocked - we only need to know whether there is anything in the
 * AIL at the time we are called. We don't need to access the contents of
 * any of the objects, so the lock is not needed.
 */
void
xfs_trans_push_ail(
	xfs_mount_t		*mp,
	xfs_lsn_t		threshold_lsn)
{
	xfs_log_item_t		*lip;

	lip = xfs_ail_min(&mp->m_ail.xa_ail);
	if (lip && !XFS_FORCED_SHUTDOWN(mp)) {
		if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0)
			xfsaild_wakeup(mp, threshold_lsn);
	}
}
Beispiel #3
0
/*
 * Return the item in the AIL with the current lsn.
 * Return the current tree generation number for use
 * in calls to xfs_trans_next_ail().
 */
STATIC xfs_log_item_t *
xfs_trans_first_push_ail(
	xfs_mount_t	*mp,
	int		*gen,
	xfs_lsn_t	lsn)
{
	xfs_log_item_t	*lip;

	lip = xfs_ail_min(&(mp->m_ail.xa_ail));
	*gen = (int)mp->m_ail.xa_gen;
	if (lsn == 0)
		return lip;

	while (lip && (XFS_LSN_CMP(lip->li_lsn, lsn) < 0))
		lip = lip->li_ail.ail_forw;

	return lip;
}
/*
 * Check that the list is sorted as it should be.
 */
STATIC void
xfs_ail_check(
	xfs_ail_entry_t *base)
{
	xfs_log_item_t	*lip;
	xfs_log_item_t	*prev_lip;

	lip = base->ail_forw;
	if (lip == (xfs_log_item_t*)base) {
		/*
		 * Make sure the pointers are correct when the list
		 * is empty.
		 */
		ASSERT(base->ail_back == (xfs_log_item_t*)base);
		return;
	}

	/*
	 * Walk the list checking forward and backward pointers,
	 * lsn ordering, and that every entry has the XFS_LI_IN_AIL
	 * flag set.
	 */
	prev_lip = (xfs_log_item_t*)base;
	while (lip != (xfs_log_item_t*)base) {
		if (prev_lip != (xfs_log_item_t*)base) {
			ASSERT(prev_lip->li_ail.ail_forw == lip);
			ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
		}
		ASSERT(lip->li_ail.ail_back == prev_lip);
		ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
		prev_lip = lip;
		lip = lip->li_ail.ail_forw;
	}
	ASSERT(lip == (xfs_log_item_t*)base);
	ASSERT(base->ail_back == prev_lip);
}
Beispiel #5
0
/*
 * This is called to perform the commit processing for each
 * item described by the given chunk.
 *
 * The commit processing consists of unlocking items which were
 * held locked with the SYNC_UNLOCK attribute, calling the committed
 * routine of each logged item, updating the item's position in the AIL
 * if necessary, and unpinning each item.  If the committed routine
 * returns -1, then do nothing further with the item because it
 * may have been freed.
 *
 * Since items are unlocked when they are copied to the incore
 * log, it is possible for two transactions to be completing
 * and manipulating the same item simultaneously.  The AIL lock
 * will protect the lsn field of each item.  The value of this
 * field can never go backwards.
 *
 * We unpin the items after repositioning them in the AIL, because
 * otherwise they could be immediately flushed and we'd have to race
 * with the flusher trying to pull the item from the AIL as we add it.
 */
STATIC void
xfs_trans_chunk_committed(
	xfs_log_item_chunk_t	*licp,
	xfs_lsn_t		lsn,
	int			aborted)
{
	xfs_log_item_desc_t	*lidp;
	xfs_log_item_t		*lip;
	xfs_lsn_t		item_lsn;
	struct xfs_mount	*mp;
	int			i;
	SPLDECL(s);

	lidp = licp->lic_descs;
	for (i = 0; i < licp->lic_unused; i++, lidp++) {
		if (XFS_LIC_ISFREE(licp, i)) {
			continue;
		}

		lip = lidp->lid_item;
		if (aborted)
			lip->li_flags |= XFS_LI_ABORTED;

		/*
		 * Send in the ABORTED flag to the COMMITTED routine
		 * so that it knows whether the transaction was aborted
		 * or not.
		 */
		item_lsn = IOP_COMMITTED(lip, lsn);

		/*
		 * If the committed routine returns -1, make
		 * no more references to the item.
		 */
		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) {
			continue;
		}

		/*
		 * If the returned lsn is greater than what it
		 * contained before, update the location of the
		 * item in the AIL.  If it is not, then do nothing.
		 * Items can never move backwards in the AIL.
		 *
		 * While the new lsn should usually be greater, it
		 * is possible that a later transaction completing
		 * simultaneously with an earlier one using the
		 * same item could complete first with a higher lsn.
		 * This would cause the earlier transaction to fail
		 * the test below.
		 */
		mp = lip->li_mountp;
		AIL_LOCK(mp,s);
		if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) {
			/*
			 * This will set the item's lsn to item_lsn
			 * and update the position of the item in
			 * the AIL.
			 *
			 * xfs_trans_update_ail() drops the AIL lock.
			 */
			xfs_trans_update_ail(mp, lip, item_lsn, s);
		} else {
			AIL_UNLOCK(mp, s);
		}

		/*
		 * Now that we've repositioned the item in the AIL,
		 * unpin it so it can be flushed. Pass information
		 * about buffer stale state down from the log item
		 * flags, if anyone else stales the buffer we do not
		 * want to pay any attention to it.
		 */
		IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE);
	}
}
/*
 * xfs_trans_push_ail
 *
 * This routine is called to move the tail of the AIL
 * forward.  It does this by trying to flush items in the AIL
 * whose lsns are below the given threshold_lsn.
 *
 * The routine returns the lsn of the tail of the log.
 */
xfs_lsn_t
xfs_trans_push_ail(
	xfs_mount_t		*mp,
	xfs_lsn_t		threshold_lsn)
{
	xfs_lsn_t		lsn;
	xfs_log_item_t		*lip;
	int			gen;
	int			restarts;
	int			lock_result;
	int			flush_log;
	SPLDECL(s);

#define	XFS_TRANS_PUSH_AIL_RESTARTS	10

	AIL_LOCK(mp,s);
	lip = xfs_trans_first_ail(mp, &gen);
	if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) {
		/*
		 * Just return if the AIL is empty.
		 */
		AIL_UNLOCK(mp, s);
		return (xfs_lsn_t)0;
	}

	XFS_STATS_INC(xs_push_ail);

	/*
	 * While the item we are looking at is below the given threshold
	 * try to flush it out.  Make sure to limit the number of times
	 * we allow xfs_trans_next_ail() to restart scanning from the
	 * beginning of the list.  We'd like not to stop until we've at least
	 * tried to push on everything in the AIL with an LSN less than
	 * the given threshold. However, we may give up before that if
	 * we realize that we've been holding the AIL_LOCK for 'too long',
	 * blocking interrupts. Currently, too long is < 500us roughly.
	 */
	flush_log = 0;
	restarts = 0;
	while (((restarts < XFS_TRANS_PUSH_AIL_RESTARTS) &&
		(XFS_LSN_CMP(lip->li_lsn, threshold_lsn) < 0))) {
		/*
		 * If we can lock the item without sleeping, unlock
		 * the AIL lock and flush the item.  Then re-grab the
		 * AIL lock so we can look for the next item on the
		 * AIL.  Since we unlock the AIL while we flush the
		 * item, the next routine may start over again at the
		 * the beginning of the list if anything has changed.
		 * That is what the generation count is for.
		 *
		 * If we can't lock the item, either its holder will flush
		 * it or it is already being flushed or it is being relogged.
		 * In any of these case it is being taken care of and we
		 * can just skip to the next item in the list.
		 */
		lock_result = IOP_TRYLOCK(lip);
		switch (lock_result) {
		      case XFS_ITEM_SUCCESS:
			AIL_UNLOCK(mp, s);
			XFS_STATS_INC(xs_push_ail_success);
			IOP_PUSH(lip);
			AIL_LOCK(mp,s);
			break;

		      case XFS_ITEM_PUSHBUF:
			AIL_UNLOCK(mp, s);
			XFS_STATS_INC(xs_push_ail_pushbuf);
#ifdef XFSRACEDEBUG
			delay_for_intr();
			delay(300);
#endif
			ASSERT(lip->li_ops->iop_pushbuf);
			ASSERT(lip);
			IOP_PUSHBUF(lip);
			AIL_LOCK(mp,s);
			break;

		      case XFS_ITEM_PINNED:
			XFS_STATS_INC(xs_push_ail_pinned);
			flush_log = 1;
			break;

		      case XFS_ITEM_LOCKED:
			XFS_STATS_INC(xs_push_ail_locked);
			break;

		      case XFS_ITEM_FLUSHING:
			XFS_STATS_INC(xs_push_ail_flushing);
			break;

		      default:
			ASSERT(0);
			break;
		}

		lip = xfs_trans_next_ail(mp, lip, &gen, &restarts);
		if (lip == NULL) {
			break;
		}
		if (XFS_FORCED_SHUTDOWN(mp)) {
			/*
			 * Just return if we shut down during the last try.
			 */
			AIL_UNLOCK(mp, s);
			return (xfs_lsn_t)0;
		}

	}

	if (flush_log) {
		/*
		 * If something we need to push out was pinned, then
		 * push out the log so it will become unpinned and
		 * move forward in the AIL.
		 */
		AIL_UNLOCK(mp, s);
		XFS_STATS_INC(xs_push_ail_flush);
		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
		AIL_LOCK(mp, s);
	}

	lip = xfs_ail_min(&(mp->m_ail));
	if (lip == NULL) {
		lsn = (xfs_lsn_t)0;
	} else {
		lsn = lip->li_lsn;
	}

	AIL_UNLOCK(mp, s);
	return lsn;
}	/* xfs_trans_push_ail */
Beispiel #7
0
/*
 * Function that does the work of pushing on the AIL
 */
long
xfsaild_push(
	xfs_mount_t	*mp,
	xfs_lsn_t	*last_lsn)
{
	long		tout = 1000; /* milliseconds */
	xfs_lsn_t	last_pushed_lsn = *last_lsn;
	xfs_lsn_t	target =  mp->m_ail.xa_target;
	xfs_lsn_t	lsn;
	xfs_log_item_t	*lip;
	int		gen;
	int		restarts;
	int		flush_log, count, stuck;

#define	XFS_TRANS_PUSH_AIL_RESTARTS	10

	spin_lock(&mp->m_ail_lock);
	lip = xfs_trans_first_push_ail(mp, &gen, *last_lsn);
	if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
		/*
		 * AIL is empty or our push has reached the end.
		 */
		spin_unlock(&mp->m_ail_lock);
		last_pushed_lsn = 0;
		goto out;
	}

	XFS_STATS_INC(xs_push_ail);

	/*
	 * While the item we are looking at is below the given threshold
	 * try to flush it out. We'd like not to stop until we've at least
	 * tried to push on everything in the AIL with an LSN less than
	 * the given threshold.
	 *
	 * However, we will stop after a certain number of pushes and wait
	 * for a reduced timeout to fire before pushing further. This
	 * prevents use from spinning when we can't do anything or there is
	 * lots of contention on the AIL lists.
	 */
	tout = 10;
	lsn = lip->li_lsn;
	flush_log = stuck = count = restarts = 0;
	while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
		int	lock_result;
		/*
		 * If we can lock the item without sleeping, unlock the AIL
		 * lock and flush the item.  Then re-grab the AIL lock so we
		 * can look for the next item on the AIL. List changes are
		 * handled by the AIL lookup functions internally
		 *
		 * If we can't lock the item, either its holder will flush it
		 * or it is already being flushed or it is being relogged.  In
		 * any of these case it is being taken care of and we can just
		 * skip to the next item in the list.
		 */
		lock_result = IOP_TRYLOCK(lip);
		spin_unlock(&mp->m_ail_lock);
		switch (lock_result) {
		case XFS_ITEM_SUCCESS:
			XFS_STATS_INC(xs_push_ail_success);
			IOP_PUSH(lip);
			last_pushed_lsn = lsn;
			break;

		case XFS_ITEM_PUSHBUF:
			XFS_STATS_INC(xs_push_ail_pushbuf);
			IOP_PUSHBUF(lip);
			last_pushed_lsn = lsn;
			break;

		case XFS_ITEM_PINNED:
			XFS_STATS_INC(xs_push_ail_pinned);
			stuck++;
			flush_log = 1;
			break;

		case XFS_ITEM_LOCKED:
			XFS_STATS_INC(xs_push_ail_locked);
			last_pushed_lsn = lsn;
			stuck++;
			break;

		case XFS_ITEM_FLUSHING:
			XFS_STATS_INC(xs_push_ail_flushing);
			last_pushed_lsn = lsn;
			stuck++;
			break;

		default:
			ASSERT(0);
			break;
		}

		spin_lock(&mp->m_ail_lock);
		/* should we bother continuing? */
		if (XFS_FORCED_SHUTDOWN(mp))
			break;
		ASSERT(mp->m_log);

		count++;

		/*
		 * Are there too many items we can't do anything with?
		 * If we we are skipping too many items because we can't flush
		 * them or they are already being flushed, we back off and
		 * given them time to complete whatever operation is being
		 * done. i.e. remove pressure from the AIL while we can't make
		 * progress so traversals don't slow down further inserts and
		 * removals to/from the AIL.
		 *
		 * The value of 100 is an arbitrary magic number based on
		 * observation.
		 */
		if (stuck > 100)
			break;

		lip = xfs_trans_next_ail(mp, lip, &gen, &restarts);
		if (lip == NULL)
			break;
		if (restarts > XFS_TRANS_PUSH_AIL_RESTARTS)
			break;
		lsn = lip->li_lsn;
	}
	spin_unlock(&mp->m_ail_lock);

	if (flush_log) {
		/*
		 * If something we need to push out was pinned, then
		 * push out the log so it will become unpinned and
		 * move forward in the AIL.
		 */
		XFS_STATS_INC(xs_push_ail_flush);
		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
	}

	if (!count) {
		/* We're past our target or empty, so idle */
		tout = 1000;
	} else if (XFS_LSN_CMP(lsn, target) >= 0) {
		/*
		 * We reached the target so wait a bit longer for I/O to
		 * complete and remove pushed items from the AIL before we
		 * start the next scan from the start of the AIL.
		 */
		tout += 20;
		last_pushed_lsn = 0;
	} else if ((restarts > XFS_TRANS_PUSH_AIL_RESTARTS) ||
		   ((stuck * 100) / count > 90)) {
		/*
		 * Either there is a lot of contention on the AIL or we
		 * are stuck due to operations in progress. "Stuck" in this
		 * case is defined as >90% of the items we tried to push
		 * were stuck.
		 *
		 * Backoff a bit more to allow some I/O to complete before
		 * continuing from where we were.
		 */
		tout += 10;
	}
out:
	*last_lsn = last_pushed_lsn;
	return tout;
}	/* xfsaild_push */