Esempio n. 1
0
/*
 * Trim the passed in imap to the next shared/unshared extent boundary, and
 * if imap->br_startoff points to a shared extent reserve space for it in the
 * COW fork.  In this case *shared is set to true, else to false.
 *
 * Note that imap will always contain the block numbers for the existing blocks
 * in the data fork, as the upper layers need them for read-modify-write
 * operations.
 */
int
xfs_reflink_reserve_cow(
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*imap,
	bool			*shared)
{
	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	struct xfs_bmbt_irec	got;
	int			error = 0;
	bool			eof = false, trimmed;
	xfs_extnum_t		idx;

	/*
	 * Search the COW fork extent list first.  This serves two purposes:
	 * first this implement the speculative preallocation using cowextisze,
	 * so that we also unshared block adjacent to shared blocks instead
	 * of just the shared blocks themselves.  Second the lookup in the
	 * extent list is generally faster than going out to the shared extent
	 * tree.
	 */

	if (!xfs_iext_lookup_extent(ip, ifp, imap->br_startoff, &idx, &got))
		eof = true;
	if (!eof && got.br_startoff <= imap->br_startoff) {
		trace_xfs_reflink_cow_found(ip, imap);
		xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);

		*shared = true;
		return 0;
	}

	/* Trim the mapping to the nearest shared extent boundary. */
	error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
	if (error)
		return error;

	/* Not shared?  Just report the (potentially capped) extent. */
	if (!*shared)
		return 0;

	/*
	 * Fork all the shared blocks from our write offset until the end of
	 * the extent.
	 */
	error = xfs_qm_dqattach_locked(ip, 0);
	if (error)
		return error;

	error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
			imap->br_blockcount, 0, &got, &idx, eof);
	if (error == -ENOSPC || error == -EDQUOT)
		trace_xfs_reflink_cow_enospc(ip, imap);
	if (error)
		return error;

	trace_xfs_reflink_cow_alloc(ip, &got);
	return 0;
}
Esempio n. 2
0
/*
 * Find the extent that maps the given range in the COW fork. Even if the extent
 * is not shared we might have a preallocation for it in the COW fork. If so we
 * use it that rather than trigger a new allocation.
 */
static int
xfs_find_trim_cow_extent(
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*imap,
	bool			*shared,
	bool			*found)
{
	xfs_fileoff_t		offset_fsb = imap->br_startoff;
	xfs_filblks_t		count_fsb = imap->br_blockcount;
	struct xfs_iext_cursor	icur;
	struct xfs_bmbt_irec	got;

	*found = false;

	/*
	 * If we don't find an overlapping extent, trim the range we need to
	 * allocate to fit the hole we found.
	 */
	if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
		got.br_startoff = offset_fsb + count_fsb;
	if (got.br_startoff > offset_fsb) {
		xfs_trim_extent(imap, imap->br_startoff,
				got.br_startoff - imap->br_startoff);
		return xfs_inode_need_cow(ip, imap, shared);
	}

	*shared = true;
	if (isnullstartblock(got.br_startblock)) {
		xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
		return 0;
	}

	/* real extent found - no need to allocate */
	xfs_trim_extent(&got, offset_fsb, count_fsb);
	*imap = got;
	*found = true;
	return 0;
}
Esempio n. 3
0
/* Convert part of an unwritten CoW extent to a real one. */
STATIC int
xfs_reflink_convert_cow_extent(
	struct xfs_inode		*ip,
	struct xfs_bmbt_irec		*imap,
	xfs_fileoff_t			offset_fsb,
	xfs_filblks_t			count_fsb,
	struct xfs_defer_ops		*dfops)
{
	xfs_fsblock_t			first_block = NULLFSBLOCK;
	int				nimaps = 1;

	if (imap->br_state == XFS_EXT_NORM)
		return 0;

	xfs_trim_extent(imap, offset_fsb, count_fsb);
	trace_xfs_reflink_convert_cow(ip, imap);
	if (imap->br_blockcount == 0)
		return 0;
	return xfs_bmapi_write(NULL, ip, imap->br_startoff, imap->br_blockcount,
			XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, &first_block,
			0, imap, &nimaps, dfops);
}
Esempio n. 4
0
static int
xfs_reflink_convert_cow_locked(
	struct xfs_inode	*ip,
	xfs_fileoff_t		offset_fsb,
	xfs_filblks_t		count_fsb)
{
	struct xfs_iext_cursor	icur;
	struct xfs_bmbt_irec	got;
	struct xfs_btree_cur	*dummy_cur = NULL;
	int			dummy_logflags;
	int			error = 0;

	if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
		return 0;

	do {
		if (got.br_startoff >= offset_fsb + count_fsb)
			break;
		if (got.br_state == XFS_EXT_NORM)
			continue;
		if (WARN_ON_ONCE(isnullstartblock(got.br_startblock)))
			return -EIO;

		xfs_trim_extent(&got, offset_fsb, count_fsb);
		if (!got.br_blockcount)
			continue;

		got.br_state = XFS_EXT_NORM;
		error = xfs_bmap_add_extent_unwritten_real(NULL, ip,
				XFS_COW_FORK, &icur, &dummy_cur, &got,
				&dummy_logflags);
		if (error)
			return error;
	} while (xfs_iext_next_extent(ip->i_cowfp, &icur, &got));

	return error;
}
Esempio n. 5
0
/*
 * Remap parts of a file's data fork after a successful CoW.
 */
int
xfs_reflink_end_cow(
	struct xfs_inode		*ip,
	xfs_off_t			offset,
	xfs_off_t			count)
{
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	struct xfs_bmbt_irec		got, del;
	struct xfs_trans		*tp;
	xfs_fileoff_t			offset_fsb;
	xfs_fileoff_t			end_fsb;
	xfs_fsblock_t			firstfsb;
	struct xfs_defer_ops		dfops;
	int				error;
	unsigned int			resblks;
	xfs_filblks_t			rlen;
	xfs_extnum_t			idx;

	trace_xfs_reflink_end_cow(ip, offset, count);

	/* No COW extents?  That's easy! */
	if (ifp->if_bytes == 0)
		return 0;

	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
	end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);

	/*
	 * Start a rolling transaction to switch the mappings.  We're
	 * unlikely ever to have to remap 16T worth of single-block
	 * extents, so just cap the worst case extent count to 2^32-1.
	 * Stick a warning in just in case, and avoid 64-bit division.
	 */
	BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
	if (end_fsb - offset_fsb > UINT_MAX) {
		error = -EFSCORRUPTED;
		xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
		ASSERT(0);
		goto out;
	}
	resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,
			(unsigned int)(end_fsb - offset_fsb),
			XFS_DATA_FORK);
	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
			resblks, 0, 0, &tp);
	if (error)
		goto out;

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

	/* If there is a hole at end_fsb - 1 go to the previous extent */
	if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||
	    got.br_startoff > end_fsb) {
		/*
		 * In case of racing, overlapping AIO writes no COW extents
		 * might be left by the time I/O completes for the loser of
		 * the race.  In that case we are done.
		 */
		if (idx <= 0)
			goto out_cancel;
		xfs_iext_get_extent(ifp, --idx, &got);
	}

	/* Walk backwards until we're out of the I/O range... */
	while (got.br_startoff + got.br_blockcount > offset_fsb) {
		del = got;
		xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);

		/* Extent delete may have bumped idx forward */
		if (!del.br_blockcount) {
			idx--;
			goto next_extent;
		}

		ASSERT(!isnullstartblock(got.br_startblock));

		/*
		 * Don't remap unwritten extents; these are
		 * speculatively preallocated CoW extents that have been
		 * allocated but have not yet been involved in a write.
		 */
		if (got.br_state == XFS_EXT_UNWRITTEN) {
			idx--;
			goto next_extent;
		}

		/* Unmap the old blocks in the data fork. */
		xfs_defer_init(&dfops, &firstfsb);
		rlen = del.br_blockcount;
		error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,
				&firstfsb, &dfops);
		if (error)
			goto out_defer;

		/* Trim the extent to whatever got unmapped. */
		if (rlen) {
			xfs_trim_extent(&del, del.br_startoff + rlen,
				del.br_blockcount - rlen);
		}
		trace_xfs_reflink_cow_remap(ip, &del);

		/* Free the CoW orphan record. */
		error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops,
				del.br_startblock, del.br_blockcount);
		if (error)
			goto out_defer;

		/* Map the new blocks into the data fork. */
		error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del);
		if (error)
			goto out_defer;

		/* Remove the mapping from the CoW fork. */
		xfs_bmap_del_extent_cow(ip, &idx, &got, &del);

		xfs_defer_ijoin(&dfops, ip);
		error = xfs_defer_finish(&tp, &dfops);
		if (error)
			goto out_defer;
next_extent:
		if (!xfs_iext_get_extent(ifp, idx, &got))
			break;
	}

	error = xfs_trans_commit(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	if (error)
		goto out;
	return 0;

out_defer:
	xfs_defer_cancel(&dfops);
out_cancel:
	xfs_trans_cancel(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
	trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
	return error;
}
Esempio n. 6
0
/*
 * Cancel CoW reservations for some block range of an inode.
 *
 * If cancel_real is true this function cancels all COW fork extents for the
 * inode; if cancel_real is false, real extents are not cleared.
 */
int
xfs_reflink_cancel_cow_blocks(
	struct xfs_inode		*ip,
	struct xfs_trans		**tpp,
	xfs_fileoff_t			offset_fsb,
	xfs_fileoff_t			end_fsb,
	bool				cancel_real)
{
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	struct xfs_bmbt_irec		got, del;
	xfs_extnum_t			idx;
	xfs_fsblock_t			firstfsb;
	struct xfs_defer_ops		dfops;
	int				error = 0;

	if (!xfs_is_reflink_inode(ip))
		return 0;
	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got))
		return 0;

	while (got.br_startoff < end_fsb) {
		del = got;
		xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
		trace_xfs_reflink_cancel_cow(ip, &del);

		if (isnullstartblock(del.br_startblock)) {
			error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
					&idx, &got, &del);
			if (error)
				break;
		} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
			xfs_trans_ijoin(*tpp, ip, 0);
			xfs_defer_init(&dfops, &firstfsb);

			/* Free the CoW orphan record. */
			error = xfs_refcount_free_cow_extent(ip->i_mount,
					&dfops, del.br_startblock,
					del.br_blockcount);
			if (error)
				break;

			xfs_bmap_add_free(ip->i_mount, &dfops,
					del.br_startblock, del.br_blockcount,
					NULL);

			/* Update quota accounting */
			xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
					-(long)del.br_blockcount);

			/* Roll the transaction */
			xfs_defer_ijoin(&dfops, ip);
			error = xfs_defer_finish(tpp, &dfops);
			if (error) {
				xfs_defer_cancel(&dfops);
				break;
			}

			/* Remove the mapping from the CoW fork. */
			xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
		}

		if (!xfs_iext_get_extent(ifp, ++idx, &got))
			break;
	}

	/* clear tag if cow fork is emptied */
	if (!ifp->if_bytes)
		xfs_inode_clear_cowblocks_tag(ip);

	return error;
}
Esempio n. 7
0
/* Allocate all CoW reservations covering a range of blocks in a file. */
int
xfs_reflink_allocate_cow(
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*imap,
	bool			*shared,
	uint			*lockmode)
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = imap->br_startoff;
	xfs_filblks_t		count_fsb = imap->br_blockcount;
	struct xfs_bmbt_irec	got;
	struct xfs_defer_ops	dfops;
	struct xfs_trans	*tp = NULL;
	xfs_fsblock_t		first_block;
	int			nimaps, error = 0;
	bool			trimmed;
	xfs_filblks_t		resaligned;
	xfs_extlen_t		resblks = 0;
	xfs_extnum_t		idx;

retry:
	ASSERT(xfs_is_reflink_inode(ip));
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));

	/*
	 * Even if the extent is not shared we might have a preallocation for
	 * it in the COW fork.  If so use it.
	 */
	if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &idx, &got) &&
	    got.br_startoff <= offset_fsb) {
		*shared = true;

		/* If we have a real allocation in the COW fork we're done. */
		if (!isnullstartblock(got.br_startblock)) {
			xfs_trim_extent(&got, offset_fsb, count_fsb);
			*imap = got;
			goto convert;
		}

		xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
	} else {
		error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
		if (error || !*shared)
			goto out;
	}

	if (!tp) {
		resaligned = xfs_aligned_fsb_count(imap->br_startoff,
			imap->br_blockcount, xfs_get_cowextsz_hint(ip));
		resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);

		xfs_iunlock(ip, *lockmode);
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
		*lockmode = XFS_ILOCK_EXCL;
		xfs_ilock(ip, *lockmode);

		if (error)
			return error;

		error = xfs_qm_dqattach_locked(ip, 0);
		if (error)
			goto out;
		goto retry;
	}

	error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
	if (error)
		goto out;

	xfs_trans_ijoin(tp, ip, 0);

	xfs_defer_init(&dfops, &first_block);
	nimaps = 1;

	/* Allocate the entire reservation as unwritten blocks. */
	error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
			XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,
			resblks, imap, &nimaps, &dfops);
	if (error)
		goto out_bmap_cancel;

	/* Finish up. */
	error = xfs_defer_finish(&tp, &dfops);
	if (error)
		goto out_bmap_cancel;

	error = xfs_trans_commit(tp);
	if (error)
		return error;
convert:
	return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb,
			&dfops);
out_bmap_cancel:
	xfs_defer_cancel(&dfops);
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
out:
	if (tp)
		xfs_trans_cancel(tp);
	return error;
}
Esempio n. 8
0
/*
 * Cancel CoW reservations for some block range of an inode.
 *
 * If cancel_real is true this function cancels all COW fork extents for the
 * inode; if cancel_real is false, real extents are not cleared.
 */
int
xfs_reflink_cancel_cow_blocks(
	struct xfs_inode		*ip,
	struct xfs_trans		**tpp,
	xfs_fileoff_t			offset_fsb,
	xfs_fileoff_t			end_fsb,
	bool				cancel_real)
{
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	struct xfs_bmbt_irec		got, del;
	struct xfs_iext_cursor		icur;
	xfs_fsblock_t			firstfsb;
	struct xfs_defer_ops		dfops;
	int				error = 0;

	if (!xfs_is_reflink_inode(ip))
		return 0;
	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
		return 0;

	/* Walk backwards until we're out of the I/O range... */
	while (got.br_startoff + got.br_blockcount > offset_fsb) {
		del = got;
		xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);

		/* Extent delete may have bumped ext forward */
		if (!del.br_blockcount) {
			xfs_iext_prev(ifp, &icur);
			goto next_extent;
		}

		trace_xfs_reflink_cancel_cow(ip, &del);

		if (isnullstartblock(del.br_startblock)) {
			error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
					&icur, &got, &del);
			if (error)
				break;
		} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
			xfs_trans_ijoin(*tpp, ip, 0);
			xfs_defer_init(&dfops, &firstfsb);

			/* Free the CoW orphan record. */
			error = xfs_refcount_free_cow_extent(ip->i_mount,
					&dfops, del.br_startblock,
					del.br_blockcount);
			if (error)
				break;

			xfs_bmap_add_free(ip->i_mount, &dfops,
					del.br_startblock, del.br_blockcount,
					NULL);

			/* Update quota accounting */
			xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
					-(long)del.br_blockcount);

			/* Roll the transaction */
			xfs_defer_ijoin(&dfops, ip);
			error = xfs_defer_finish(tpp, &dfops);
			if (error) {
				xfs_defer_cancel(&dfops);
				break;
			}

			/* Remove the mapping from the CoW fork. */
			xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
		} else {
			/* Didn't do anything, push cursor back. */
			xfs_iext_prev(ifp, &icur);
		}
next_extent:
		if (!xfs_iext_get_extent(ifp, &icur, &got))
			break;
	}

	/* clear tag if cow fork is emptied */
	if (!ifp->if_bytes)
		xfs_inode_clear_cowblocks_tag(ip);

	return error;
}
Esempio n. 9
0
/*
 * Remap part of the CoW fork into the data fork.
 *
 * We aim to remap the range starting at @offset_fsb and ending at @end_fsb
 * into the data fork; this function will remap what it can (at the end of the
 * range) and update @end_fsb appropriately.  Each remap gets its own
 * transaction because we can end up merging and splitting bmbt blocks for
 * every remap operation and we'd like to keep the block reservation
 * requirements as low as possible.
 */
STATIC int
xfs_reflink_end_cow_extent(
	struct xfs_inode	*ip,
	xfs_fileoff_t		offset_fsb,
	xfs_fileoff_t		*end_fsb)
{
	struct xfs_bmbt_irec	got, del;
	struct xfs_iext_cursor	icur;
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_trans	*tp;
	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	xfs_filblks_t		rlen;
	unsigned int		resblks;
	int			error;

	/* No COW extents?  That's easy! */
	if (ifp->if_bytes == 0) {
		*end_fsb = offset_fsb;
		return 0;
	}

	resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
			XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
	if (error)
		return error;

	/*
	 * Lock the inode.  We have to ijoin without automatic unlock because
	 * the lead transaction is the refcountbt record deletion; the data
	 * fork update follows as a deferred log item.
	 */
	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

	/*
	 * In case of racing, overlapping AIO writes no COW extents might be
	 * left by the time I/O completes for the loser of the race.  In that
	 * case we are done.
	 */
	if (!xfs_iext_lookup_extent_before(ip, ifp, end_fsb, &icur, &got) ||
	    got.br_startoff + got.br_blockcount <= offset_fsb) {
		*end_fsb = offset_fsb;
		goto out_cancel;
	}

	/*
	 * Structure copy @got into @del, then trim @del to the range that we
	 * were asked to remap.  We preserve @got for the eventual CoW fork
	 * deletion; from now on @del represents the mapping that we're
	 * actually remapping.
	 */
	del = got;
	xfs_trim_extent(&del, offset_fsb, *end_fsb - offset_fsb);

	ASSERT(del.br_blockcount > 0);

	/*
	 * Only remap real extents that contain data.  With AIO, speculative
	 * preallocations can leak into the range we are called upon, and we
	 * need to skip them.
	 */
	if (!xfs_bmap_is_real_extent(&got)) {
		*end_fsb = del.br_startoff;
		goto out_cancel;
	}

	/* Unmap the old blocks in the data fork. */
	rlen = del.br_blockcount;
	error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
	if (error)
		goto out_cancel;

	/* Trim the extent to whatever got unmapped. */
	xfs_trim_extent(&del, del.br_startoff + rlen, del.br_blockcount - rlen);
	trace_xfs_reflink_cow_remap(ip, &del);

	/* Free the CoW orphan record. */
	error = xfs_refcount_free_cow_extent(tp, del.br_startblock,
			del.br_blockcount);
	if (error)
		goto out_cancel;

	/* Map the new blocks into the data fork. */
	error = xfs_bmap_map_extent(tp, ip, &del);
	if (error)
		goto out_cancel;

	/* Charge this new data fork mapping to the on-disk quota. */
	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
			(long)del.br_blockcount);

	/* Remove the mapping from the CoW fork. */
	xfs_bmap_del_extent_cow(ip, &icur, &got, &del);

	error = xfs_trans_commit(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	if (error)
		return error;

	/* Update the caller about how much progress we made. */
	*end_fsb = del.br_startoff;
	return 0;

out_cancel:
	xfs_trans_cancel(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	return error;
}
Esempio n. 10
0
/*
 * Cancel CoW reservations for some block range of an inode.
 *
 * If cancel_real is true this function cancels all COW fork extents for the
 * inode; if cancel_real is false, real extents are not cleared.
 *
 * Caller must have already joined the inode to the current transaction. The
 * inode will be joined to the transaction returned to the caller.
 */
int
xfs_reflink_cancel_cow_blocks(
	struct xfs_inode		*ip,
	struct xfs_trans		**tpp,
	xfs_fileoff_t			offset_fsb,
	xfs_fileoff_t			end_fsb,
	bool				cancel_real)
{
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	struct xfs_bmbt_irec		got, del;
	struct xfs_iext_cursor		icur;
	int				error = 0;

	if (!xfs_inode_has_cow_data(ip))
		return 0;
	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
		return 0;

	/* Walk backwards until we're out of the I/O range... */
	while (got.br_startoff + got.br_blockcount > offset_fsb) {
		del = got;
		xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);

		/* Extent delete may have bumped ext forward */
		if (!del.br_blockcount) {
			xfs_iext_prev(ifp, &icur);
			goto next_extent;
		}

		trace_xfs_reflink_cancel_cow(ip, &del);

		if (isnullstartblock(del.br_startblock)) {
			error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
					&icur, &got, &del);
			if (error)
				break;
		} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
			ASSERT((*tpp)->t_firstblock == NULLFSBLOCK);

			/* Free the CoW orphan record. */
			error = xfs_refcount_free_cow_extent(*tpp,
					del.br_startblock, del.br_blockcount);
			if (error)
				break;

			xfs_bmap_add_free(*tpp, del.br_startblock,
					  del.br_blockcount, NULL);

			/* Roll the transaction */
			error = xfs_defer_finish(tpp);
			if (error)
				break;

			/* Remove the mapping from the CoW fork. */
			xfs_bmap_del_extent_cow(ip, &icur, &got, &del);

			/* Remove the quota reservation */
			error = xfs_trans_reserve_quota_nblks(NULL, ip,
					-(long)del.br_blockcount, 0,
					XFS_QMOPT_RES_REGBLKS);
			if (error)
				break;
		} else {
			/* Didn't do anything, push cursor back. */
			xfs_iext_prev(ifp, &icur);
		}
next_extent:
		if (!xfs_iext_get_extent(ifp, &icur, &got))
			break;
	}

	/* clear tag if cow fork is emptied */
	if (!ifp->if_bytes)
		xfs_inode_clear_cowblocks_tag(ip);
	return error;
}
Esempio n. 11
0
/* Allocate all CoW reservations covering a range of blocks in a file. */
int
xfs_reflink_allocate_cow(
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*imap,
	bool			*shared,
	uint			*lockmode,
	bool			convert_now)
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = imap->br_startoff;
	xfs_filblks_t		count_fsb = imap->br_blockcount;
	struct xfs_trans	*tp;
	int			nimaps, error = 0;
	bool			found;
	xfs_filblks_t		resaligned;
	xfs_extlen_t		resblks = 0;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	if (!ip->i_cowfp) {
		ASSERT(!xfs_is_reflink_inode(ip));
		xfs_ifork_init_cow(ip);
	}

	error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
	if (error || !*shared)
		return error;
	if (found)
		goto convert;

	resaligned = xfs_aligned_fsb_count(imap->br_startoff,
		imap->br_blockcount, xfs_get_cowextsz_hint(ip));
	resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);

	xfs_iunlock(ip, *lockmode);
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
	*lockmode = XFS_ILOCK_EXCL;
	xfs_ilock(ip, *lockmode);

	if (error)
		return error;

	error = xfs_qm_dqattach_locked(ip, false);
	if (error)
		goto out_trans_cancel;

	/*
	 * Check for an overlapping extent again now that we dropped the ilock.
	 */
	error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
	if (error || !*shared)
		goto out_trans_cancel;
	if (found) {
		xfs_trans_cancel(tp);
		goto convert;
	}

	error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
	if (error)
		goto out_trans_cancel;

	xfs_trans_ijoin(tp, ip, 0);

	/* Allocate the entire reservation as unwritten blocks. */
	nimaps = 1;
	error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
			XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
			resblks, imap, &nimaps);
	if (error)
		goto out_unreserve;

	xfs_inode_set_cowblocks_tag(ip);
	error = xfs_trans_commit(tp);
	if (error)
		return error;

	/*
	 * Allocation succeeded but the requested range was not even partially
	 * satisfied?  Bail out!
	 */
	if (nimaps == 0)
		return -ENOSPC;
convert:
	xfs_trim_extent(imap, offset_fsb, count_fsb);
	/*
	 * COW fork extents are supposed to remain unwritten until we're ready
	 * to initiate a disk write.  For direct I/O we are going to write the
	 * data and need the conversion, but for buffered writes we're done.
	 */
	if (!convert_now || imap->br_state == XFS_EXT_NORM)
		return 0;
	trace_xfs_reflink_convert_cow(ip, imap);
	return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);

out_unreserve:
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
out_trans_cancel:
	xfs_trans_cancel(tp);
	return error;
}