Exemple #1
0
/* Allocate all CoW reservations covering a range of blocks in a file. */
int
xfs_reflink_allocate_cow(
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*imap,
	bool			*shared,
	uint			*lockmode)
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = imap->br_startoff;
	xfs_filblks_t		count_fsb = imap->br_blockcount;
	struct xfs_bmbt_irec	got;
	struct xfs_defer_ops	dfops;
	struct xfs_trans	*tp = NULL;
	xfs_fsblock_t		first_block;
	int			nimaps, error = 0;
	bool			trimmed;
	xfs_filblks_t		resaligned;
	xfs_extlen_t		resblks = 0;
	struct xfs_iext_cursor	icur;

retry:
	ASSERT(xfs_is_reflink_inode(ip));
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));

	/*
	 * Even if the extent is not shared we might have a preallocation for
	 * it in the COW fork.  If so use it.
	 */
	if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) &&
	    got.br_startoff <= offset_fsb) {
		*shared = true;

		/* If we have a real allocation in the COW fork we're done. */
		if (!isnullstartblock(got.br_startblock)) {
			xfs_trim_extent(&got, offset_fsb, count_fsb);
			*imap = got;
			goto convert;
		}

		xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
	} else {
		error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
		if (error || !*shared)
			goto out;
	}

	if (!tp) {
		resaligned = xfs_aligned_fsb_count(imap->br_startoff,
			imap->br_blockcount, xfs_get_cowextsz_hint(ip));
		resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);

		xfs_iunlock(ip, *lockmode);
		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
		*lockmode = XFS_ILOCK_EXCL;
		xfs_ilock(ip, *lockmode);

		if (error)
			return error;

		error = xfs_qm_dqattach_locked(ip, 0);
		if (error)
			goto out;
		goto retry;
	}

	error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
	if (error)
		goto out;

	xfs_trans_ijoin(tp, ip, 0);

	xfs_defer_init(&dfops, &first_block);
	nimaps = 1;

	/* Allocate the entire reservation as unwritten blocks. */
	error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
			XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,
			resblks, imap, &nimaps, &dfops);
	if (error)
		goto out_bmap_cancel;

	xfs_inode_set_cowblocks_tag(ip);

	/* Finish up. */
	error = xfs_defer_finish(&tp, &dfops);
	if (error)
		goto out_bmap_cancel;

	error = xfs_trans_commit(tp);
	if (error)
		return error;
convert:
	return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb,
			&dfops);
out_bmap_cancel:
	xfs_defer_cancel(&dfops);
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
out:
	if (tp)
		xfs_trans_cancel(tp);
	return error;
}
Exemple #2
0
/* Allocate all CoW reservations covering a range of blocks in a file. */
int
xfs_reflink_allocate_cow(
	struct xfs_inode	*ip,
	struct xfs_bmbt_irec	*imap,
	bool			*shared,
	uint			*lockmode,
	bool			convert_now)
{
	struct xfs_mount	*mp = ip->i_mount;
	xfs_fileoff_t		offset_fsb = imap->br_startoff;
	xfs_filblks_t		count_fsb = imap->br_blockcount;
	struct xfs_trans	*tp;
	int			nimaps, error = 0;
	bool			found;
	xfs_filblks_t		resaligned;
	xfs_extlen_t		resblks = 0;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	if (!ip->i_cowfp) {
		ASSERT(!xfs_is_reflink_inode(ip));
		xfs_ifork_init_cow(ip);
	}

	error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
	if (error || !*shared)
		return error;
	if (found)
		goto convert;

	resaligned = xfs_aligned_fsb_count(imap->br_startoff,
		imap->br_blockcount, xfs_get_cowextsz_hint(ip));
	resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);

	xfs_iunlock(ip, *lockmode);
	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
	*lockmode = XFS_ILOCK_EXCL;
	xfs_ilock(ip, *lockmode);

	if (error)
		return error;

	error = xfs_qm_dqattach_locked(ip, false);
	if (error)
		goto out_trans_cancel;

	/*
	 * Check for an overlapping extent again now that we dropped the ilock.
	 */
	error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
	if (error || !*shared)
		goto out_trans_cancel;
	if (found) {
		xfs_trans_cancel(tp);
		goto convert;
	}

	error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
	if (error)
		goto out_trans_cancel;

	xfs_trans_ijoin(tp, ip, 0);

	/* Allocate the entire reservation as unwritten blocks. */
	nimaps = 1;
	error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
			XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
			resblks, imap, &nimaps);
	if (error)
		goto out_unreserve;

	xfs_inode_set_cowblocks_tag(ip);
	error = xfs_trans_commit(tp);
	if (error)
		return error;

	/*
	 * Allocation succeeded but the requested range was not even partially
	 * satisfied?  Bail out!
	 */
	if (nimaps == 0)
		return -ENOSPC;
convert:
	xfs_trim_extent(imap, offset_fsb, count_fsb);
	/*
	 * COW fork extents are supposed to remain unwritten until we're ready
	 * to initiate a disk write.  For direct I/O we are going to write the
	 * data and need the conversion, but for buffered writes we're done.
	 */
	if (!convert_now || imap->br_state == XFS_EXT_NORM)
		return 0;
	trace_xfs_reflink_convert_cow(ip, imap);
	return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);

out_unreserve:
	xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
			XFS_QMOPT_RES_REGBLKS);
out_trans_cancel:
	xfs_trans_cancel(tp);
	return error;
}