static void ext4_end_io_work(struct work_struct *work) { ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); struct inode *inode = io->inode; struct ext4_inode_info *ei = EXT4_I(inode); unsigned long flags; spin_lock_irqsave(&ei->i_completed_io_lock, flags); if (io->flag & EXT4_IO_END_IN_FSYNC) goto requeue; if (list_empty(&io->list)) { spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); goto free; } if (!mutex_trylock(&inode->i_mutex)) { bool was_queued; requeue: was_queued = !!(io->flag & EXT4_IO_END_QUEUED); io->flag |= EXT4_IO_END_QUEUED; spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work); if (was_queued) yield(); return; } list_del_init(&io->list); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); (void) ext4_end_io_nolock(io); mutex_unlock(&inode->i_mutex); free: ext4_free_io_end(io); }
/* * work on completed aio dio IO, to convert unwritten extents to extents */ static void ext4_end_io_work(struct work_struct *work) { ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); struct inode *inode = io->inode; struct ext4_inode_info *ei = EXT4_I(inode); unsigned long flags; spin_lock_irqsave(&ei->i_completed_io_lock, flags); if (io->flag & EXT4_IO_END_IN_FSYNC) goto requeue; if (list_empty(&io->list)) { spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); goto free; } if (!mutex_trylock(&inode->i_mutex)) { bool was_queued; requeue: was_queued = !!(io->flag & EXT4_IO_END_QUEUED); io->flag |= EXT4_IO_END_QUEUED; spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); /* * Requeue the work instead of waiting so that the work * items queued after this can be processed. */ queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work); /* * To prevent the ext4-dio-unwritten thread from keeping * requeueing end_io requests and occupying cpu for too long, * yield the cpu if it sees an end_io request that has already * been requeued. */ if (was_queued) yield(); return; } list_del_init(&io->list); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); (void) ext4_end_io_nolock(io); mutex_unlock(&inode->i_mutex); free: ext4_free_io_end(io); }
/* * work on completed aio dio IO, to convert unwritten extents to extents */ static void ext4_end_io_work(struct work_struct *work) { ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); struct inode *inode = io->inode; struct ext4_inode_info *ei = EXT4_I(inode); unsigned long flags; int ret; mutex_lock(&inode->i_mutex); ret = ext4_end_io_nolock(io); if (ret < 0) { mutex_unlock(&inode->i_mutex); return; } spin_lock_irqsave(&ei->i_completed_io_lock, flags); if (!list_empty(&io->list)) list_del_init(&io->list); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); mutex_unlock(&inode->i_mutex); ext4_free_io_end(io); }