static void ext4_finish_bio(struct bio *bio) { int i; struct bio_vec *bvec; bio_for_each_segment_all(bvec, bio, i) { struct page *page = bvec->bv_page; #ifdef CONFIG_EXT4_FS_ENCRYPTION struct page *data_page = NULL; #endif struct buffer_head *bh, *head; unsigned bio_start = bvec->bv_offset; unsigned bio_end = bio_start + bvec->bv_len; unsigned under_io = 0; unsigned long flags; if (!page) continue; #ifdef CONFIG_EXT4_FS_ENCRYPTION if (!page->mapping) { /* The bounce data pages are unmapped. */ data_page = page; fscrypt_pullback_bio_page(&page, false); } #endif if (bio->bi_error) { SetPageError(page); mapping_set_error(page->mapping, -EIO); } bh = head = page_buffers(page); /* * We check all buffers in the page under BH_Uptodate_Lock * to avoid races with other end io clearing async_write flags */ local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &head->b_state); do { if (bh_offset(bh) < bio_start || bh_offset(bh) + bh->b_size > bio_end) { if (buffer_async_write(bh)) under_io++; continue; } clear_buffer_async_write(bh); if (bio->bi_error) buffer_io_error(bh); } while ((bh = bh->b_this_page) != head); bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); local_irq_restore(flags); if (!under_io) { #ifdef CONFIG_EXT4_FS_ENCRYPTION if (data_page) fscrypt_restore_control_page(data_page); #endif end_page_writeback(page); } } }
static void ext4_finish_bio(struct bio *bio) { int i; int error = !test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec; bio_for_each_segment_all(bvec, bio, i) { struct page *page = bvec->bv_page; struct buffer_head *bh, *head; unsigned bio_start = bvec->bv_offset; unsigned bio_end = bio_start + bvec->bv_len; unsigned under_io = 0; unsigned long flags; if (!page) continue; if (error) { SetPageError(page); set_bit(AS_EIO, &page->mapping->flags); } bh = head = page_buffers(page); /* * We check all buffers in the page under BH_Uptodate_Lock * to avoid races with other end io clearing async_write flags */ local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &head->b_state); do { if (bh_offset(bh) < bio_start || bh_offset(bh) + bh->b_size > bio_end) { if (buffer_async_write(bh)) under_io++; continue; } clear_buffer_async_write(bh); if (error) buffer_io_error(bh); } while ((bh = bh->b_this_page) != head); bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); local_irq_restore(flags); if (!under_io) end_page_writeback(page); } }
static void ext4_end_bio(struct bio *bio, int error) { ext4_io_end_t *io_end = bio->bi_private; struct workqueue_struct *wq; struct inode *inode; unsigned long flags; int i; sector_t bi_sector = bio->bi_sector; BUG_ON(!io_end); bio->bi_private = NULL; bio->bi_end_io = NULL; if (test_bit(BIO_UPTODATE, &bio->bi_flags)) error = 0; bio_put(bio); for (i = 0; i < io_end->num_io_pages; i++) { struct page *page = io_end->pages[i]->p_page; struct buffer_head *bh, *head; loff_t offset; loff_t io_end_offset; if (error) { SetPageError(page); set_bit(AS_EIO, &page->mapping->flags); head = page_buffers(page); BUG_ON(!head); io_end_offset = io_end->offset + io_end->size; offset = (sector_t) page->index << PAGE_CACHE_SHIFT; bh = head; do { if ((offset >= io_end->offset) && (offset+bh->b_size <= io_end_offset)) buffer_io_error(bh); offset += bh->b_size; bh = bh->b_this_page; } while (bh != head); } put_io_page(io_end->pages[i]); } io_end->num_io_pages = 0; inode = io_end->inode; if (error) { io_end->flag |= EXT4_IO_END_ERROR; ext4_warning(inode->i_sb, "I/O error writing to inode %lu " "(offset %llu size %ld starting block %llu)", inode->i_ino, (unsigned long long) io_end->offset, (long) io_end->size, (unsigned long long) bi_sector >> (inode->i_blkbits - 9)); } if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { ext4_free_io_end(io_end); return; } /* Add the io_end to per-inode completed io list*/ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; /* queue the work to convert unwritten extents to written */ queue_work(wq, &io_end->work); }
static void ext4_end_bio(struct bio *bio, int error) { ext4_io_end_t *io_end = bio->bi_private; struct workqueue_struct *wq; struct inode *inode; unsigned long flags; int i; BUG_ON(!io_end); bio->bi_private = NULL; bio->bi_end_io = NULL; if (test_bit(BIO_UPTODATE, &bio->bi_flags)) error = 0; bio_put(bio); for (i = 0; i < io_end->num_io_pages; i++) { struct page *page = io_end->pages[i]->p_page; struct buffer_head *bh, *head; int partial_write = 0; head = page_buffers(page); if (error) SetPageError(page); BUG_ON(!head); if (head->b_size == PAGE_CACHE_SIZE) clear_buffer_dirty(head); else { loff_t offset; loff_t io_end_offset = io_end->offset + io_end->size; offset = (sector_t) page->index << PAGE_CACHE_SHIFT; bh = head; do { if ((offset >= io_end->offset) && (offset+bh->b_size <= io_end_offset)) { if (error) buffer_io_error(bh); clear_buffer_dirty(bh); } if (buffer_delay(bh)) partial_write = 1; else if (!buffer_mapped(bh)) clear_buffer_dirty(bh); else if (buffer_dirty(bh)) partial_write = 1; offset += bh->b_size; bh = bh->b_this_page; } while (bh != head); } /* * If this is a partial write which happened to make * all buffers uptodate then we can optimize away a * bogus readpage() for the next read(). Here we * 'discover' whether the page went uptodate as a * result of this (potentially partial) write. */ if (!partial_write) SetPageUptodate(page); put_io_page(io_end->pages[i]); } io_end->num_io_pages = 0; inode = io_end->inode; if (error) { io_end->flag |= EXT4_IO_END_ERROR; ext4_warning(inode->i_sb, "I/O error writing to inode %lu " "(offset %llu size %ld starting block %llu)", inode->i_ino, (unsigned long long) io_end->offset, (long) io_end->size, (unsigned long long) bio->bi_sector >> (inode->i_blkbits - 9)); } /* Add the io_end to per-inode completed io list*/ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; /* queue the work to convert unwritten extents to written */ queue_work(wq, &io_end->work); }