예제 #1
0
STATIC uint
xfs_buf_item_push(
    struct xfs_log_item	*lip,
    struct list_head	*buffer_list)
{
    struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
    struct xfs_buf		*bp = bip->bli_buf;
    uint			rval = XFS_ITEM_SUCCESS;

    if (xfs_buf_ispinned(bp))
        return XFS_ITEM_PINNED;
    if (!xfs_buf_trylock(bp)) {
        /*
         * If we have just raced with a buffer being pinned and it has
         * been marked stale, we could end up stalling until someone else
         * issues a log force to unpin the stale buffer. Check for the
         * race condition here so xfsaild recognizes the buffer is pinned
         * and queues a log force to move it along.
         */
        if (xfs_buf_ispinned(bp))
            return XFS_ITEM_PINNED;
        return XFS_ITEM_LOCKED;
    }

    ASSERT(!(bip->bli_flags & XFS_BLI_STALE));

    trace_xfs_buf_item_push(bip);

    if (!xfs_buf_delwri_queue(bp, buffer_list))
        rval = XFS_ITEM_FLUSHING;
    xfs_buf_unlock(bp);
    return rval;
}
예제 #2
0
파일: xfs_sync.c 프로젝트: AllenDou/linux
/*
 * Second stage of a quiesce. The data is already synced, now we have to take
 * care of the metadata. New transactions are already blocked, so we need to
 * wait for any remaining transactions to drain out before proceeding.
 */
void
xfs_quiesce_attr(
	struct xfs_mount	*mp)
{
	int	error = 0;

	/* wait for all modifications to complete */
	while (atomic_read(&mp->m_active_trans) > 0)
		delay(100);

	/* reclaim inodes to do any IO before the freeze completes */
	xfs_reclaim_inodes(mp, 0);
	xfs_reclaim_inodes(mp, SYNC_WAIT);

	/* flush all pending changes from the AIL */
	xfs_ail_push_all_sync(mp->m_ail);

	/*
	 * Just warn here till VFS can correctly support
	 * read-only remount without racing.
	 */
	WARN_ON(atomic_read(&mp->m_active_trans) != 0);

	/* Push the superblock and write an unmount record */
	error = xfs_log_sbcount(mp);
	if (error)
		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
				"Frozen image may not be consistent.");
	xfs_log_unmount_write(mp);

	/*
	 * At this point we might have modified the superblock again and thus
	 * added an item to the AIL, thus flush it again.
	 */
	xfs_ail_push_all_sync(mp->m_ail);

	/*
	 * The superblock buffer is uncached and xfsaild_push() will lock and
	 * set the XBF_ASYNC flag on the buffer. We cannot do xfs_buf_iowait()
	 * here but a lock on the superblock buffer will block until iodone()
	 * has completed.
	 */
	xfs_buf_lock(mp->m_sb_bp);
	xfs_buf_unlock(mp->m_sb_bp);
}
예제 #3
0
STATIC uint
xfs_buf_item_push(
	struct xfs_log_item	*lip,
	struct list_head	*buffer_list)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	struct xfs_buf		*bp = bip->bli_buf;
	uint			rval = XFS_ITEM_SUCCESS;

	if (xfs_buf_ispinned(bp))
		return XFS_ITEM_PINNED;
	if (!xfs_buf_trylock(bp)) {
		/*
		 * If we have just raced with a buffer being pinned and it has
		 * been marked stale, we could end up stalling until someone else
		 * issues a log force to unpin the stale buffer. Check for the
		 * race condition here so xfsaild recognizes the buffer is pinned
		 * and queues a log force to move it along.
		 */
		if (xfs_buf_ispinned(bp))
			return XFS_ITEM_PINNED;
		return XFS_ITEM_LOCKED;
	}

	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));

	trace_xfs_buf_item_push(bip);

	/* has a previous flush failed due to IO errors? */
	if ((bp->b_flags & XBF_WRITE_FAIL) &&
	    ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
		xfs_warn(bp->b_target->bt_mount,
"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
			 (long long)bp->b_bn);
	}

	if (!xfs_buf_delwri_queue(bp, buffer_list))
		rval = XFS_ITEM_FLUSHING;
	xfs_buf_unlock(bp);
	return rval;
}
예제 #4
0
STATIC uint
xfs_buf_item_push(
	struct xfs_log_item	*lip,
	struct list_head	*buffer_list)
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	struct xfs_buf		*bp = bip->bli_buf;
	uint			rval = XFS_ITEM_SUCCESS;

	if (xfs_buf_ispinned(bp))
		return XFS_ITEM_PINNED;
	if (!xfs_buf_trylock(bp))
		return XFS_ITEM_LOCKED;

	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));

	trace_xfs_buf_item_push(bip);

	if (!xfs_buf_delwri_queue(bp, buffer_list))
		rval = XFS_ITEM_FLUSHING;
	xfs_buf_unlock(bp);
	return rval;
}
예제 #5
0
/*
 * Zero file bytes between startoff and endoff inclusive.
 * The iolock is held exclusive and no blocks are buffered.
 *
 * This function is used by xfs_free_file_space() to zero
 * partial blocks when the range to free is not block aligned.
 * When unreserving space with boundaries that are not block
 * aligned we round up the start and round down the end
 * boundaries and then use this function to zero the parts of
 * the blocks that got dropped during the rounding.
 */
STATIC int
xfs_zero_remaining_bytes(
	xfs_inode_t		*ip,
	xfs_off_t		startoff,
	xfs_off_t		endoff)
{
	xfs_bmbt_irec_t		imap;
	xfs_fileoff_t		offset_fsb;
	xfs_off_t		lastoffset;
	xfs_off_t		offset;
	xfs_buf_t		*bp;
	xfs_mount_t		*mp = ip->i_mount;
	int			nimap;
	int			error = 0;

	/*
	 * Avoid doing I/O beyond eof - it's not necessary
	 * since nothing can read beyond eof.  The space will
	 * be zeroed when the file is extended anyway.
	 */
	if (startoff >= XFS_ISIZE(ip))
		return 0;

	if (endoff > XFS_ISIZE(ip))
		endoff = XFS_ISIZE(ip);

	bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
					mp->m_rtdev_targp : mp->m_ddev_targp,
				  BTOBB(mp->m_sb.sb_blocksize), 0);
	if (!bp)
		return XFS_ERROR(ENOMEM);

	xfs_buf_unlock(bp);

	for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
		uint lock_mode;

		offset_fsb = XFS_B_TO_FSBT(mp, offset);
		nimap = 1;

		lock_mode = xfs_ilock_data_map_shared(ip);
		error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
		xfs_iunlock(ip, lock_mode);

		if (error || nimap < 1)
			break;
		ASSERT(imap.br_blockcount >= 1);
		ASSERT(imap.br_startoff == offset_fsb);
		lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
		if (lastoffset > endoff)
			lastoffset = endoff;
		if (imap.br_startblock == HOLESTARTBLOCK)
			continue;
		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
		if (imap.br_state == XFS_EXT_UNWRITTEN)
			continue;
		XFS_BUF_UNDONE(bp);
		XFS_BUF_UNWRITE(bp);
		XFS_BUF_READ(bp);
		XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));

		if (XFS_FORCED_SHUTDOWN(mp)) {
			error = XFS_ERROR(EIO);
			break;
		}
		xfs_buf_iorequest(bp);
		error = xfs_buf_iowait(bp);
		if (error) {
			xfs_buf_ioerror_alert(bp,
					"xfs_zero_remaining_bytes(read)");
			break;
		}
		memset(bp->b_addr +
			(offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
		      0, lastoffset - offset + 1);
		XFS_BUF_UNDONE(bp);
		XFS_BUF_UNREAD(bp);
		XFS_BUF_WRITE(bp);

		if (XFS_FORCED_SHUTDOWN(mp)) {
			error = XFS_ERROR(EIO);
			break;
		}
		xfs_buf_iorequest(bp);
		error = xfs_buf_iowait(bp);
		if (error) {
			xfs_buf_ioerror_alert(bp,
					"xfs_zero_remaining_bytes(write)");
			break;
		}
	}
	xfs_buf_free(bp);
	return error;
}