/* * Process a bmap update intent item that was recovered from the log. * We need to update some inode's bmbt. */ int xfs_bui_recover( struct xfs_mount *mp, struct xfs_bui_log_item *buip, struct xfs_defer_ops *dfops) { int error = 0; unsigned int bui_type; struct xfs_map_extent *bmap; xfs_fsblock_t startblock_fsb; xfs_fsblock_t inode_fsb; xfs_filblks_t count; bool op_ok; struct xfs_bud_log_item *budp; enum xfs_bmap_intent_type type; int whichfork; xfs_exntst_t state; struct xfs_trans *tp; struct xfs_inode *ip = NULL; struct xfs_bmbt_irec irec; ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)); /* Only one mapping operation per BUI... */ if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_bui_release(buip); return -EIO; } /* * First check the validity of the extent described by the * BUI. If anything is bad, then toss the BUI. */ bmap = &buip->bui_format.bui_extents[0]; startblock_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp, bmap->me_startblock)); inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp, XFS_INO_TO_FSB(mp, bmap->me_owner))); switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) { case XFS_BMAP_MAP: case XFS_BMAP_UNMAP: op_ok = true; break; default: op_ok = false; break; } if (!op_ok || startblock_fsb == 0 || bmap->me_len == 0 || inode_fsb == 0 || startblock_fsb >= mp->m_sb.sb_dblocks || bmap->me_len >= mp->m_sb.sb_agblocks || inode_fsb >= mp->m_sb.sb_dblocks || (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS)) { /* * This will pull the BUI from the AIL and * free the memory associated with it. */ set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_bui_release(buip); return -EIO; } error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp); if (error) return error; budp = xfs_trans_get_bud(tp, buip); /* Grab the inode. */ error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip); if (error) goto err_inode; if (VFS_I(ip)->i_nlink == 0) xfs_iflags_set(ip, XFS_IRECOVERY); /* Process deferred bmap item. */ state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? XFS_EXT_UNWRITTEN : XFS_EXT_NORM; whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK; switch (bui_type) { case XFS_BMAP_MAP: case XFS_BMAP_UNMAP: type = bui_type; break; default: error = -EFSCORRUPTED; goto err_inode; } xfs_trans_ijoin(tp, ip, 0); count = bmap->me_len; error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type, ip, whichfork, bmap->me_startoff, bmap->me_startblock, &count, state); if (error) goto err_inode; if (count > 0) { ASSERT(type == XFS_BMAP_UNMAP); irec.br_startblock = bmap->me_startblock; irec.br_blockcount = count; irec.br_startoff = bmap->me_startoff; irec.br_state = state; error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec); if (error) goto err_inode; } set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); return error; err_inode: xfs_trans_cancel(tp); if (ip) { xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); } return error; }
/* * Unmap a range of blocks from a file, then map other blocks into the hole. * The range to unmap is (destoff : destoff + srcioff + irec->br_blockcount). * The extent irec is mapped into dest at irec->br_startoff. */ STATIC int xfs_reflink_remap_extent( struct xfs_inode *ip, struct xfs_bmbt_irec *irec, xfs_fileoff_t destoff, xfs_off_t new_isize) { struct xfs_mount *mp = ip->i_mount; bool real_extent = xfs_bmap_is_real_extent(irec); struct xfs_trans *tp; xfs_fsblock_t firstfsb; unsigned int resblks; struct xfs_defer_ops dfops; struct xfs_bmbt_irec uirec; xfs_filblks_t rlen; xfs_filblks_t unmap_len; xfs_off_t newlen; int error; unmap_len = irec->br_startoff + irec->br_blockcount - destoff; trace_xfs_reflink_punch_range(ip, destoff, unmap_len); /* No reflinking if we're low on space */ if (real_extent) { error = xfs_reflink_ag_has_free_space(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock)); if (error) goto out; } /* Start a rolling transaction to switch the mappings */ resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); if (error) goto out; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); /* If we're not just clearing space, then do we have enough quota? */ if (real_extent) { error = xfs_trans_reserve_quota_nblks(tp, ip, irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS); if (error) goto out_cancel; } trace_xfs_reflink_remap(ip, irec->br_startoff, irec->br_blockcount, irec->br_startblock); /* Unmap the old blocks in the data fork. */ rlen = unmap_len; while (rlen) { xfs_defer_init(&dfops, &firstfsb); error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1, &firstfsb, &dfops); if (error) goto out_defer; /* * Trim the extent to whatever got unmapped. * Remember, bunmapi works backwards. */ uirec.br_startblock = irec->br_startblock + rlen; uirec.br_startoff = irec->br_startoff + rlen; uirec.br_blockcount = unmap_len - rlen; unmap_len = rlen; /* If this isn't a real mapping, we're done. */ if (!real_extent || uirec.br_blockcount == 0) goto next_extent; trace_xfs_reflink_remap(ip, uirec.br_startoff, uirec.br_blockcount, uirec.br_startblock); /* Update the refcount tree */ error = xfs_refcount_increase_extent(mp, &dfops, &uirec); if (error) goto out_defer; /* Map the new blocks into the data fork. */ error = xfs_bmap_map_extent(mp, &dfops, ip, &uirec); if (error) goto out_defer; /* Update quota accounting. */ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, uirec.br_blockcount); /* Update dest isize if needed. */ newlen = XFS_FSB_TO_B(mp, uirec.br_startoff + uirec.br_blockcount); newlen = min_t(xfs_off_t, newlen, new_isize); if (newlen > i_size_read(VFS_I(ip))) { trace_xfs_reflink_update_inode_size(ip, newlen); i_size_write(VFS_I(ip), newlen); ip->i_d.di_size = newlen; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } next_extent: /* Process all the deferred stuff. */ xfs_defer_ijoin(&dfops, ip); error = xfs_defer_finish(&tp, &dfops); if (error) goto out_defer; } error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) goto out; return 0; out_defer: xfs_defer_cancel(&dfops); out_cancel: xfs_trans_cancel(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); out: trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_); return error; }
/* * Pass in a delayed allocate extent, convert it to real extents; * return to the caller the extent we create which maps on top of * the originating callers request. * * Called without a lock on the inode. * * We no longer bother to look at the incoming map - all we have to * guarantee is that whatever we allocate fills the required range. */ int xfs_iomap_write_allocate( xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *map, int *retmap) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb, last_block; xfs_fileoff_t end_fsb, map_start_fsb; xfs_fsblock_t first_block; xfs_bmap_free_t free_list; xfs_filblks_t count_fsb; xfs_bmbt_irec_t imap; xfs_trans_t *tp; int nimaps, committed; int error = 0; int nres; *retmap = 0; /* * Make sure that the dquots are there. */ error = xfs_qm_dqattach(ip, 0); if (error) return XFS_ERROR(error); offset_fsb = XFS_B_TO_FSBT(mp, offset); count_fsb = map->br_blockcount; map_start_fsb = map->br_startoff; XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); while (count_fsb != 0) { /* * Set up a transaction with which to allocate the * backing store for the file. Do allocations in a * loop until we get some space in the range we are * interested in. The other space that might be allocated * is in the delayed allocation extent on which we sit * but before our buffer starts. */ nimaps = 0; while (nimaps == 0) { tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); tp->t_flags |= XFS_TRANS_RESERVE; nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); error = xfs_trans_reserve(tp, nres, XFS_WRITE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); return XFS_ERROR(error); } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_ihold(tp, ip); xfs_bmap_init(&free_list, &first_block); /* * it is possible that the extents have changed since * we did the read call as we dropped the ilock for a * while. We have to be careful about truncates or hole * punchs here - we are not allowed to allocate * non-delalloc blocks here. * * The only protection against truncation is the pages * for the range we are being asked to convert are * locked and hence a truncate will block on them * first. * * As a result, if we go beyond the range we really * need and hit an delalloc extent boundary followed by * a hole while we have excess blocks in the map, we * will fill the hole incorrectly and overrun the * transaction reservation. * * Using a single map prevents this as we are forced to * check each map we look for overlap with the desired * range and abort as soon as we find it. Also, given * that we only return a single map, having one beyond * what we can return is probably a bit silly. * * We also need to check that we don't go beyond EOF; * this is a truncate optimisation as a truncate sets * the new file size before block on the pages we * currently have locked under writeback. Because they * are about to be tossed, we don't need to write them * back.... */ nimaps = 1; end_fsb = XFS_B_TO_FSB(mp, ip->i_size); error = xfs_bmap_last_offset(NULL, ip, &last_block, XFS_DATA_FORK); if (error) goto trans_cancel; last_block = XFS_FILEOFF_MAX(last_block, end_fsb); if ((map_start_fsb + count_fsb) > last_block) { count_fsb = last_block - map_start_fsb; if (count_fsb == 0) { error = EAGAIN; goto trans_cancel; } } /* Go get the actual blocks */ error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, XFS_BMAPI_WRITE, &first_block, 1, &imap, &nimaps, &free_list, NULL); if (error) goto trans_cancel; error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) goto trans_cancel; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto error0; xfs_iunlock(ip, XFS_ILOCK_EXCL); } /* * See if we were able to allocate an extent that * covers at least part of the callers request */ if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_cmn_err_fsblock_zero(ip, &imap); if ((offset_fsb >= imap.br_startoff) && (offset_fsb < (imap.br_startoff + imap.br_blockcount))) { *map = imap; *retmap = 1; XFS_STATS_INC(xs_xstrat_quick); return 0; } /* * So far we have not mapped the requested part of the * file, just surrounding data, try again. */ count_fsb -= imap.br_blockcount; map_start_fsb = imap.br_startoff + imap.br_blockcount; } trans_cancel: xfs_bmap_cancel(&free_list); xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); error0: xfs_iunlock(ip, XFS_ILOCK_EXCL); return XFS_ERROR(error); }
/* * Remap part of the CoW fork into the data fork. * * We aim to remap the range starting at @offset_fsb and ending at @end_fsb * into the data fork; this function will remap what it can (at the end of the * range) and update @end_fsb appropriately. Each remap gets its own * transaction because we can end up merging and splitting bmbt blocks for * every remap operation and we'd like to keep the block reservation * requirements as low as possible. */ STATIC int xfs_reflink_end_cow_extent( struct xfs_inode *ip, xfs_fileoff_t offset_fsb, xfs_fileoff_t *end_fsb) { struct xfs_bmbt_irec got, del; struct xfs_iext_cursor icur; struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); xfs_filblks_t rlen; unsigned int resblks; int error; /* No COW extents? That's easy! */ if (ifp->if_bytes == 0) { *end_fsb = offset_fsb; return 0; } resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp); if (error) return error; /* * Lock the inode. We have to ijoin without automatic unlock because * the lead transaction is the refcountbt record deletion; the data * fork update follows as a deferred log item. */ xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); /* * In case of racing, overlapping AIO writes no COW extents might be * left by the time I/O completes for the loser of the race. In that * case we are done. */ if (!xfs_iext_lookup_extent_before(ip, ifp, end_fsb, &icur, &got) || got.br_startoff + got.br_blockcount <= offset_fsb) { *end_fsb = offset_fsb; goto out_cancel; } /* * Structure copy @got into @del, then trim @del to the range that we * were asked to remap. We preserve @got for the eventual CoW fork * deletion; from now on @del represents the mapping that we're * actually remapping. */ del = got; xfs_trim_extent(&del, offset_fsb, *end_fsb - offset_fsb); ASSERT(del.br_blockcount > 0); /* * Only remap real extents that contain data. With AIO, speculative * preallocations can leak into the range we are called upon, and we * need to skip them. */ if (!xfs_bmap_is_real_extent(&got)) { *end_fsb = del.br_startoff; goto out_cancel; } /* Unmap the old blocks in the data fork. */ rlen = del.br_blockcount; error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1); if (error) goto out_cancel; /* Trim the extent to whatever got unmapped. */ xfs_trim_extent(&del, del.br_startoff + rlen, del.br_blockcount - rlen); trace_xfs_reflink_cow_remap(ip, &del); /* Free the CoW orphan record. */ error = xfs_refcount_free_cow_extent(tp, del.br_startblock, del.br_blockcount); if (error) goto out_cancel; /* Map the new blocks into the data fork. */ error = xfs_bmap_map_extent(tp, ip, &del); if (error) goto out_cancel; /* Charge this new data fork mapping to the on-disk quota. */ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT, (long)del.br_blockcount); /* Remove the mapping from the CoW fork. */ xfs_bmap_del_extent_cow(ip, &icur, &got, &del); error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) return error; /* Update the caller about how much progress we made. */ *end_fsb = del.br_startoff; return 0; out_cancel: xfs_trans_cancel(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; }
/* * Pass in a delayed allocate extent, convert it to real extents; * return to the caller the extent we create which maps on top of * the originating callers request. * * Called without a lock on the inode. */ int xfs_iomap_write_allocate( xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *map, int *retmap) { xfs_mount_t *mp = ip->i_mount; xfs_iocore_t *io = &ip->i_iocore; xfs_fileoff_t offset_fsb, last_block; xfs_fileoff_t end_fsb, map_start_fsb; xfs_fsblock_t first_block; xfs_bmap_free_t free_list; xfs_filblks_t count_fsb; xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS]; xfs_trans_t *tp; int i, nimaps, committed; int error = 0; int nres; *retmap = 0; /* * Make sure that the dquots are there. */ if ((error = XFS_QM_DQATTACH(mp, ip, 0))) return XFS_ERROR(error); offset_fsb = XFS_B_TO_FSBT(mp, offset); count_fsb = map->br_blockcount; map_start_fsb = map->br_startoff; XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); while (count_fsb != 0) { /* * Set up a transaction with which to allocate the * backing store for the file. Do allocations in a * loop until we get some space in the range we are * interested in. The other space that might be allocated * is in the delayed allocation extent on which we sit * but before our buffer starts. */ nimaps = 0; while (nimaps == 0) { tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); error = xfs_trans_reserve(tp, nres, XFS_WRITE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); if (error == ENOSPC) { error = xfs_trans_reserve(tp, 0, XFS_WRITE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); } if (error) { xfs_trans_cancel(tp, 0); return XFS_ERROR(error); } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_ihold(tp, ip); XFS_BMAP_INIT(&free_list, &first_block); nimaps = XFS_STRAT_WRITE_IMAPS; /* * Ensure we don't go beyond eof - it is possible * the extents changed since we did the read call, * we dropped the ilock in the interim. */ end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size); xfs_bmap_last_offset(NULL, ip, &last_block, XFS_DATA_FORK); last_block = XFS_FILEOFF_MAX(last_block, end_fsb); if ((map_start_fsb + count_fsb) > last_block) { count_fsb = last_block - map_start_fsb; if (count_fsb == 0) { error = EAGAIN; goto trans_cancel; } } /* Go get the actual blocks */ error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb, XFS_BMAPI_WRITE, &first_block, 1, imap, &nimaps, &free_list, NULL); if (error) goto trans_cancel; error = xfs_bmap_finish(&tp, &free_list, first_block, &committed); if (error) goto trans_cancel; error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); if (error) goto error0; xfs_iunlock(ip, XFS_ILOCK_EXCL); } /* * See if we were able to allocate an extent that * covers at least part of the callers request */ for (i = 0; i < nimaps; i++) { if (unlikely(!imap[i].br_startblock && !(io->io_flags & XFS_IOCORE_RT))) return xfs_cmn_err_fsblock_zero(ip, &imap[i]); if ((offset_fsb >= imap[i].br_startoff) && (offset_fsb < (imap[i].br_startoff + imap[i].br_blockcount))) { *map = imap[i]; *retmap = 1; XFS_STATS_INC(xs_xstrat_quick); return 0; } count_fsb -= imap[i].br_blockcount; } /* So far we have not mapped the requested part of the * file, just surrounding data, try again. */ nimaps--; map_start_fsb = imap[nimaps].br_startoff + imap[nimaps].br_blockcount; } trans_cancel: xfs_bmap_cancel(&free_list); xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); error0: xfs_iunlock(ip, XFS_ILOCK_EXCL); return XFS_ERROR(error); }