/* * IO write completion. */ STATIC void xfs_end_io( struct work_struct *work) { xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { ioend->io_error = -EIO; goto done; } if (ioend->io_error) goto done; /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. */ if (ioend->io_type == XFS_IO_UNWRITTEN) { error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); } else if (ioend->io_append_trans) { error = xfs_setfilesize_ioend(ioend); } else { ASSERT(!xfs_ioend_is_append(ioend)); } done: if (error) ioend->io_error = error; xfs_destroy_ioend(ioend); }
/* * IO write completion. */ STATIC void xfs_end_io( struct work_struct *work) { struct xfs_ioend *ioend = container_of(work, struct xfs_ioend, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = ioend->io_bio->bi_error; /* * Set an error if the mount has shut down and proceed with end I/O * processing so it can perform whatever cleanups are necessary. */ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) error = -EIO; /* * For a CoW extent, we need to move the mapping from the CoW fork * to the data fork. If instead an error happened, just dump the * new blocks. */ if (ioend->io_type == XFS_IO_COW) { if (error) goto done; if (ioend->io_bio->bi_error) { error = xfs_reflink_cancel_cow_range(ip, ioend->io_offset, ioend->io_size); goto done; } error = xfs_reflink_end_cow(ip, ioend->io_offset, ioend->io_size); if (error) goto done; } /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. * Detecting and handling completion IO errors is done individually * for each case as different cleanup operations need to be performed * on error. */ if (ioend->io_type == XFS_IO_UNWRITTEN) { if (error) goto done; error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); } else if (ioend->io_append_trans) { error = xfs_setfilesize_ioend(ioend, error); } else { ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_type == XFS_IO_COW); } done: xfs_destroy_ioend(ioend, error); }
/* * Schedule IO completion handling on the final put of an ioend. * * If there is no work to do we might as well call it a day and free the * ioend right now. */ STATIC void xfs_finish_ioend( struct xfs_ioend *ioend) { if (atomic_dec_and_test(&ioend->io_remaining)) { if (ioend->io_type == IO_UNWRITTEN) queue_work(xfsconvertd_workqueue, &ioend->io_work); else if (xfs_ioend_is_append(ioend)) queue_work(xfsdatad_workqueue, &ioend->io_work); else xfs_destroy_ioend(ioend); } }
STATIC void xfs_end_io( struct work_struct *work) { xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { ioend->io_error = -EIO; goto done; } if (ioend->io_error) goto done; /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. */ if (ioend->io_type == IO_UNWRITTEN) { /* * For buffered I/O we never preallocate a transaction when * doing the unwritten extent conversion, but for direct I/O * we do not know if we are converting an unwritten extent * or not at the point where we preallocate the transaction. */ if (ioend->io_append_trans) { ASSERT(ioend->io_isdirect); current_set_flags_nested( &ioend->io_append_trans->t_pflags, PF_FSTRANS); xfs_trans_cancel(ioend->io_append_trans, 0); } error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); if (error) { ioend->io_error = -error; goto done; } } else if (ioend->io_append_trans) { error = xfs_setfilesize(ioend); if (error) ioend->io_error = -error; } else { ASSERT(!xfs_ioend_is_append(ioend)); } done: xfs_destroy_ioend(ioend); }
/* * IO write completion. */ STATIC void xfs_end_io( struct work_struct *work) { xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { error = -EIO; goto done; } if (ioend->io_error) goto done; /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. */ if (ioend->io_type == IO_UNWRITTEN) { error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); if (error) { ioend->io_error = -error; goto done; } } /* * We might have to update the on-disk file size after extending * writes. */ error = xfs_setfilesize(ioend); ASSERT(!error || error == EAGAIN); done: /* * If we didn't complete processing of the ioend, requeue it to the * tail of the workqueue for another attempt later. Otherwise destroy * it. */ if (error == EAGAIN) { atomic_inc(&ioend->io_remaining); xfs_finish_ioend(ioend); /* ensure we don't spin on blocked ioends */ delay(1); } else { xfs_destroy_ioend(ioend); } }
STATIC void xfs_end_bio( struct bio *bio) { struct xfs_ioend *ioend = bio->bi_private; struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; if (ioend->io_type == XFS_IO_UNWRITTEN) queue_work(mp->m_unwritten_workqueue, &ioend->io_work); else if (ioend->io_append_trans) queue_work(mp->m_data_workqueue, &ioend->io_work); else xfs_destroy_ioend(ioend, bio->bi_error); }
STATIC void xfs_finish_ioend( struct xfs_ioend *ioend) { if (atomic_dec_and_test(&ioend->io_remaining)) { struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; if (ioend->io_type == IO_UNWRITTEN) queue_work(mp->m_unwritten_workqueue, &ioend->io_work); else if (ioend->io_append_trans) queue_work(mp->m_data_workqueue, &ioend->io_work); else xfs_destroy_ioend(ioend); } }
/* * IO write completion. */ STATIC void xfs_end_io( struct work_struct *work) { xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { ioend->io_error = -EIO; goto done; } if (ioend->io_error) goto done; /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. */ if (ioend->io_type == XFS_IO_UNWRITTEN) { error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) { /* * For direct I/O we do not know if we need to allocate blocks * or not so we can't preallocate an append transaction as that * results in nested reservations and log space deadlocks. Hence * allocate the transaction here. While this is sub-optimal and * can block IO completion for some time, we're stuck with doing * it this way until we can pass the ioend to the direct IO * allocation callbacks and avoid nesting that way. */ error = xfs_setfilesize_trans_alloc(ioend); if (error) goto done; error = xfs_setfilesize(ioend); } else if (ioend->io_append_trans) { error = xfs_setfilesize(ioend); } else { ASSERT(!xfs_ioend_is_append(ioend)); } done: if (error) ioend->io_error = -error; xfs_destroy_ioend(ioend); }
/* * IO write completion. */ STATIC void xfs_end_io( struct work_struct *work) { xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); int error = 0; /* * Set an error if the mount has shut down and proceed with end I/O * processing so it can perform whatever cleanups are necessary. */ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) ioend->io_error = -EIO; /* * For unwritten extents we need to issue transactions to convert a * range to normal written extens after the data I/O has finished. * Detecting and handling completion IO errors is done individually * for each case as different cleanup operations need to be performed * on error. */ if (ioend->io_type == XFS_IO_UNWRITTEN) { if (ioend->io_error) goto done; error = xfs_iomap_write_unwritten(ip, ioend->io_offset, ioend->io_size); } else if (ioend->io_append_trans) { error = xfs_setfilesize_ioend(ioend); } else { ASSERT(!xfs_ioend_is_append(ioend)); } done: if (error) ioend->io_error = error; xfs_destroy_ioend(ioend); }