/* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writepage(struct page *page, struct writeback_control *wbc) { struct bio *bio; int ret = 0, rw = WRITE; if (try_to_free_swap(page)) { unlock_page(page); goto out; } #ifdef CONFIG_FRONTSWAP if (frontswap_store(page) == 0) { set_page_writeback(page); unlock_page(page); end_page_writeback(page); goto out; } #endif bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); unlock_page(page); ret = -ENOMEM; goto out; } if (wbc->sync_mode == WB_SYNC_ALL) rw |= REQ_SYNC; count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); submit_bio(rw, bio); out: return ret; }
/* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writepage(struct page *page, struct writeback_control *wbc) { struct bio *bio; int ret = 0, rw = WRITE; if (try_to_free_swap(page)) { unlock_page(page); goto out; } if (frontswap_put_page(page) == 0) { set_page_writeback(page); unlock_page(page); end_page_writeback(page); goto out; } bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); unlock_page(page); ret = -ENOMEM; goto out; } if (wbc->sync_mode == WB_SYNC_ALL) rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); submit_bio(rw, bio); out: return ret; }
static int rawfs_writepage(struct page *page, struct writeback_control *wbc) { struct address_space *mapping = page->mapping; struct inode *inode; unsigned long end_index; char *buffer; int n_written = 0; unsigned n_bytes; loff_t i_size; if (!mapping) BUG(); inode = mapping->host; if (!inode) BUG(); i_size = i_size_read(inode); end_index = i_size >> PAGE_CACHE_SHIFT; if (page->index < end_index) n_bytes = PAGE_CACHE_SIZE; else { n_bytes = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || !n_bytes) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); set_page_writeback(page); unlock_page(page); end_page_writeback(page); return 0; } } if (n_bytes != PAGE_CACHE_SIZE) zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE); get_page(page); buffer = kmap(page); /* n_written = yaffs_wr_file(obj, buffer, page->index << PAGE_CACHE_SHIFT, n_bytes, 0); */ n_written = rawfs_write_file(inode, buffer, n_bytes, page->index << PAGE_CACHE_SHIFT); kunmap(page); set_page_writeback(page); unlock_page(page); end_page_writeback(page); put_page(page); return (n_written == n_bytes) ? 0 : -ENOSPC; }
/* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writepage(struct page *page, struct writeback_control *wbc) { struct bio *bio; int ret = 0, rw = WRITE; if (try_to_free_swap(page)) { set_swapped_bit(page_private(page), 0); unlock_page(page); goto out; } bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); if (bio == NULL) { set_page_dirty(page); unlock_page(page); ret = -ENOMEM; goto out; } if (wbc->sync_mode == WB_SYNC_ALL) rw |= REQ_SYNC; bio->bi_io_vec[0].bv_page = priv_encrypt_page(page); count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); submit_bio(rw, bio); out: return ret; }
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) { int err; struct buffer_head *bh, *head; int nr_underway = 0; int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE)); BUG_ON(!PageLocked(page)); BUG_ON(!page_has_buffers(page)); head = page_buffers(page); bh = head; do { if (!buffer_mapped(bh)) continue; /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the page. Note that this can * potentially cause a busy-wait loop from pdflush and kswapd * activity, but those code paths have their own higher-level * throttling. */ if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { lock_buffer(bh); } else if (!trylock_buffer(bh)) { redirty_page_for_writepage(wbc, page); continue; } if (test_clear_buffer_dirty(bh)) { mark_buffer_async_write(bh); } else { unlock_buffer(bh); } } while ((bh = bh->b_this_page) != head); /* * The page and its buffers are protected by PageWriteback(), so we can * drop the bh refcounts early. */ BUG_ON(PageWriteback(page)); set_page_writeback(page); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh(write_op, bh); nr_underway++; } bh = next; } while (bh != head); unlock_page(page); err = 0; if (nr_underway == 0) end_page_writeback(page); return err; }
int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, int len, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; struct ext4_io_page *io_page; struct buffer_head *bh, *head; int ret = 0; blocksize = 1 << inode->i_blkbits; BUG_ON(PageWriteback(page)); set_page_writeback(page); ClearPageError(page); io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); if (!io_page) { set_page_dirty(page); unlock_page(page); return -ENOMEM; } io_page->p_page = page; atomic_set(&io_page->p_count, 1); get_page(page); for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_start >= len) { clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } ret = io_submit_add_bh(io, io_page, inode, wbc, bh); if (ret) { /* * We only get here on ENOMEM. Not much else * we can do but mark the page as dirty, and * better luck next time. */ set_page_dirty(page); break; } } unlock_page(page); /* * If the page was truncated before we could do the writeback, * or we had a memory allocation error while trying to write * the first buffer head, we won't have submitted any pages for * I/O. In that case we need to make sure we've cleared the * PageWriteback bit from the page to prevent the system from * wedging later on. */ put_io_page(io_page); return ret; }
int do_write_data_page(struct f2fs_io_info *fio) { struct page *page = fio->page; struct inode *inode = page->mapping->host; struct dnode_of_data dn; int err = 0; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); if (err) return err; fio->blk_addr = dn.data_blkaddr; /* This page is already truncated */ if (fio->blk_addr == NULL_ADDR) { ClearPageUptodate(page); goto out_writepage; } if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { fio->encrypted_page = f2fs_encrypt(inode, fio->page); if (IS_ERR(fio->encrypted_page)) { err = PTR_ERR(fio->encrypted_page); goto out_writepage; } } set_page_writeback(page); /* * If current allocation needs SSR, * it had better in-place writes for updated data. */ if (unlikely(fio->blk_addr != NEW_ADDR && !is_cold_data(page) && need_inplace_update(inode))) { rewrite_data_page(fio); set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); trace_f2fs_do_write_data_page(page, IPU); } else { write_data_page(&dn, fio); set_data_blkaddr(&dn); f2fs_update_extent_cache(&dn); trace_f2fs_do_write_data_page(page, OPU); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); if (page->index == 0) set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); } out_writepage: f2fs_put_dnode(&dn); return err; }
int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, int len, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; struct ext4_io_page *io_page; struct buffer_head *bh, *head; int ret = 0; blocksize = 1 << inode->i_blkbits; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); if (!io_page) { set_page_dirty(page); unlock_page(page); return -ENOMEM; } io_page->p_page = page; atomic_set(&io_page->p_count, 1); get_page(page); set_page_writeback(page); ClearPageError(page); for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_start >= len) { zero_user_segment(page, block_start, block_end); clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } clear_buffer_dirty(bh); ret = io_submit_add_bh(io, io_page, inode, wbc, bh); if (ret) { set_page_dirty(page); break; } } unlock_page(page); put_io_page(io_page); return ret; }
static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, size_t nr_pages) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; struct bio *bio; struct page *page; struct request_queue *q = bdev_get_queue(sb->s_bdev); unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); int i; if (max_pages > BIO_MAX_PAGES) max_pages = BIO_MAX_PAGES; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); for (i = 0; i < nr_pages; i++) { if (i >= max_pages) { /* Block layer cannot split bios :( */ bio->bi_vcnt = i; bio->bi_idx = 0; bio->bi_size = i * PAGE_SIZE; bio->bi_bdev = super->s_bdev; bio->bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = writeseg_end_io; atomic_inc(&super->s_pending_writes); submit_bio(WRITE, bio); ofs += i * PAGE_SIZE; index += i; nr_pages -= i; i = 0; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); } page = find_lock_page(mapping, index + i); BUG_ON(!page); bio->bi_io_vec[i].bv_page = page; bio->bi_io_vec[i].bv_len = PAGE_SIZE; bio->bi_io_vec[i].bv_offset = 0; BUG_ON(PageWriteback(page)); set_page_writeback(page); unlock_page(page); }
/* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writepage(struct page *page, struct writeback_control *wbc) { int ret = 0; if (try_to_free_swap(page)) { unlock_page(page); goto out; } if (frontswap_store(page) == 0) { set_page_writeback(page); unlock_page(page); end_page_writeback(page); goto out; } ret = __swap_writepage(page, wbc, end_swap_bio_write); out: return ret; }
int __swap_writepage(struct page *page, struct writeback_control *wbc, void (*end_write_func)(struct bio *, int)) { struct bio *bio; int ret = 0, rw = WRITE; struct swap_info_struct *sis = page_swap_info(page); bio = get_swap_bio(GFP_NOIO, page, end_write_func); if (bio == NULL) { set_page_dirty(page); unlock_page(page); ret = -ENOMEM; goto out; } if (wbc->sync_mode == WB_SYNC_ALL) rw |= REQ_SYNC; count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); submit_bio(rw, bio); out: return ret; }
static int __mpage_writepage(struct page *page, struct writeback_control *wbc, void *data) { struct mpage_data *mpd = data; struct bio *bio = mpd->bio; struct address_space *mapping = page->mapping; struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; unsigned long end_index; const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; sector_t last_block; sector_t block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; unsigned page_block; unsigned first_unmapped = blocks_per_page; struct block_device *bdev = NULL; int boundary = 0; sector_t boundary_block = 0; struct block_device *boundary_bdev = NULL; int length; struct buffer_head map_bh; loff_t i_size = i_size_read(inode); int ret = 0; int wr = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); if (page_has_buffers(page)) { struct buffer_head *head = page_buffers(page); struct buffer_head *bh = head; /* If they're all mapped and dirty, do it */ page_block = 0; do { BUG_ON(buffer_locked(bh)); if (!buffer_mapped(bh)) { /* * unmapped dirty buffers are created by * __set_page_dirty_buffers -> mmapped data */ if (buffer_dirty(bh)) goto confused; if (first_unmapped == blocks_per_page) first_unmapped = page_block; continue; } if (first_unmapped != blocks_per_page) goto confused; /* hole -> non-hole */ if (!buffer_dirty(bh) || !buffer_uptodate(bh)) goto confused; if (page_block) { if (bh->b_blocknr != blocks[page_block-1] + 1) goto confused; } blocks[page_block++] = bh->b_blocknr; boundary = buffer_boundary(bh); if (boundary) { boundary_block = bh->b_blocknr; boundary_bdev = bh->b_bdev; } bdev = bh->b_bdev; } while ((bh = bh->b_this_page) != head); if (first_unmapped) goto page_is_mapped; /* * Page has buffers, but they are all unmapped. The page was * created by pagein or read over a hole which was handled by * block_read_full_page(). If this address_space is also * using mpage_readpages then this can rarely happen. */ goto confused; } /* * The page has no buffers: map it to disk */ BUG_ON(!PageUptodate(page)); block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); last_block = (i_size - 1) >> blkbits; map_bh.b_page = page; for (page_block = 0; page_block < blocks_per_page; ) { map_bh.b_state = 0; map_bh.b_size = 1 << blkbits; if (mpd->get_block(inode, block_in_file, &map_bh, 1)) goto confused; if (buffer_new(&map_bh)) unmap_underlying_metadata(map_bh.b_bdev, map_bh.b_blocknr); if (buffer_boundary(&map_bh)) { boundary_block = map_bh.b_blocknr; boundary_bdev = map_bh.b_bdev; } if (page_block) { if (map_bh.b_blocknr != blocks[page_block-1] + 1) goto confused; } blocks[page_block++] = map_bh.b_blocknr; boundary = buffer_boundary(&map_bh); bdev = map_bh.b_bdev; if (block_in_file == last_block) break; block_in_file++; } BUG_ON(page_block == 0); first_unmapped = page_block; page_is_mapped: end_index = i_size >> PAGE_CACHE_SHIFT; if (page->index >= end_index) { /* * The page straddles i_size. It must be zeroed out on each * and every writepage invocation because it may be mmapped. * "A file is mapped in multiples of the page size. For a file * that is not a multiple of the page size, the remaining memory * is zeroed when mapped, and writes to that region are not * written out to the file." */ unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || !offset) goto confused; zero_user_segment(page, offset, PAGE_CACHE_SIZE); } /* * This page will go to BIO. Do we need to send this BIO off first? */ if (bio && mpd->last_block_in_bio != blocks[0] - 1) bio = mpage_bio_submit(wr, bio); alloc_new: if (bio == NULL) { if (first_unmapped == blocks_per_page) { if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), page, wbc)) { clean_buffers(page, first_unmapped); goto out; } } bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); if (bio == NULL) goto confused; wbc_init_bio(wbc, bio); } /* * Must try to add the page before marking the buffer clean or * the confused fail path above (OOM) will be very confused when * it finds all bh marked clean (i.e. it will not write anything) */ wbc_account_io(wbc, page, PAGE_SIZE); length = first_unmapped << blkbits; if (bio_add_page(bio, page, length, 0) < length) { bio = mpage_bio_submit(wr, bio); goto alloc_new; } clean_buffers(page, first_unmapped); BUG_ON(PageWriteback(page)); set_page_writeback(page); unlock_page(page); if (boundary || (first_unmapped != blocks_per_page)) { bio = mpage_bio_submit(wr, bio); if (boundary_block) { write_boundary_block(boundary_bdev, boundary_block, 1 << blkbits); } } else { mpd->last_block_in_bio = blocks[blocks_per_page - 1]; } goto out; confused: if (bio) bio = mpage_bio_submit(wr, bio); if (mpd->use_writepage) { ret = mapping->a_ops->writepage(page, wbc); } else { ret = -EAGAIN; goto out; } /* * The caller has a ref on the inode, so *mapping is stable */ mapping_set_error(mapping, ret); out: mpd->bio = bio; return ret; }
/** * ecryptfs_writepage * @page: Page that is locked before this call is made * * Returns zero on success; non-zero otherwise * * This is where we encrypt the data and pass the encrypted data to * the lower filesystem. In OpenPGP-compatible mode, we operate on * entire underlying packets. */ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) { #ifndef CONFIG_CRYPTO_DEV_KFIPS int rc; #else struct ecryptfs_page_crypt_req *page_crypt_req; int rc = 0; #endif #ifdef FEATURE_SDCARD_ENCRYPTION struct inode *ecryptfs_inode; struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; ecryptfs_inode = page->mapping->host; #endif #ifdef FEATURE_SDCARD_ENCRYPTION if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { ecryptfs_printk(KERN_DEBUG, "Passing through unencrypted page\n"); rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0, PAGE_CACHE_SIZE); if (rc) { ClearPageUptodate(page); goto out; } SetPageUptodate(page); } else { #ifndef CONFIG_CRYPTO_DEV_KFIPS rc = ecryptfs_encrypt_page(page); if (rc) { ecryptfs_printk(KERN_WARNING, "Error encrypting " "page (upper index [0x%.16lx])\n", page->index); ClearPageUptodate(page); #else // rc = ecryptfs_encrypt_page(page); // if (rc) { // ecryptfs_printk(KERN_WARNING, "Error encrypting " // "page (upper index [0x%.16lx])\n", page->index); // ClearPageUptodate(page); page_crypt_req = ecryptfs_alloc_page_crypt_req( page, ecryptfs_writepage_complete); if (unlikely(!page_crypt_req)) { rc = -ENOMEM; ecryptfs_printk(KERN_ERR, "Failed to allocate page crypt request " "for encryption\n"); #endif goto out; } #ifndef CONFIG_CRYPTO_DEV_KFIPS SetPageUptodate(page); #else // SetPageUptodate(page); set_page_writeback(page); ecryptfs_encrypt_page_async(page_crypt_req); #endif } #else rc = ecryptfs_encrypt_page(page); if (rc) { ecryptfs_printk(KERN_WARNING, "Error encrypting " "page (upper index [0x%.16lx])\n", page->index); ClearPageUptodate(page); goto out; } SetPageUptodate(page); #endif out: unlock_page(page); return rc; } static void strip_xattr_flag(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat) { if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { size_t written; crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR; ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat, &written); crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; } } /** * Header Extent: * Octets 0-7: Unencrypted file size (big-endian) * Octets 8-15: eCryptfs special marker * Octets 16-19: Flags * Octet 16: File format version number (between 0 and 255) * Octets 17-18: Reserved * Octet 19: Bit 1 (lsb): Reserved * Bit 2: Encrypted? * Bits 3-8: Reserved * Octets 20-23: Header extent size (big-endian) * Octets 24-25: Number of header extents at front of file * (big-endian) * Octet 26: Begin RFC 2440 authentication token packet set */ /** * ecryptfs_copy_up_encrypted_with_header * @page: Sort of a ``virtual'' representation of the encrypted lower * file. The actual lower file does not have the metadata in * the header. This is locked. * @crypt_stat: The eCryptfs inode's cryptographic context * * The ``view'' is the version of the file that userspace winds up * seeing, with the header information inserted. */ static int ecryptfs_copy_up_encrypted_with_header(struct page *page, struct ecryptfs_crypt_stat *crypt_stat) { loff_t extent_num_in_page = 0; loff_t num_extents_per_page = (PAGE_CACHE_SIZE / crypt_stat->extent_size); int rc = 0; while (extent_num_in_page < num_extents_per_page) { loff_t view_extent_num = ((((loff_t)page->index) * num_extents_per_page) + extent_num_in_page); size_t num_header_extents_at_front = (crypt_stat->metadata_size / crypt_stat->extent_size); if (view_extent_num < num_header_extents_at_front) { /* This is a header extent */ char *page_virt; page_virt = kmap_atomic(page); memset(page_virt, 0, PAGE_CACHE_SIZE); /* TODO: Support more than one header extent */ if (view_extent_num == 0) { size_t written; rc = ecryptfs_read_xattr_region( page_virt, page->mapping->host); strip_xattr_flag(page_virt + 16, crypt_stat); ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written); } kunmap_atomic(page_virt); flush_dcache_page(page); if (rc) { printk(KERN_ERR "%s: Error reading xattr " "region; rc = [%d]\n", __func__, rc); goto out; } } else { /* This is an encrypted data extent */ loff_t lower_offset = ((view_extent_num * crypt_stat->extent_size) - crypt_stat->metadata_size); rc = ecryptfs_read_lower_page_segment( page, (lower_offset >> PAGE_CACHE_SHIFT), (lower_offset & ~PAGE_CACHE_MASK), crypt_stat->extent_size, page->mapping->host); if (rc) { printk(KERN_ERR "%s: Error attempting to read " "extent at offset [%lld] in the lower " "file; rc = [%d]\n", __func__, lower_offset, rc); goto out; } } extent_num_in_page++; } out: return rc; }
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) { void *src_addr, *dst_addr; struct f2fs_io_info fio = { .sbi = F2FS_I_SB(dn->inode), .type = DATA, .rw = WRITE_SYNC | REQ_PRIO, .page = page, .encrypted_page = NULL, }; int dirty, err; f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); if (!f2fs_exist_data(dn->inode)) goto clear_out; err = f2fs_reserve_block(dn, 0); if (err) return err; f2fs_wait_on_page_writeback(page, DATA); if (PageUptodate(page)) goto no_update; zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); /* Copy the whole inline data block */ src_addr = inline_data_addr(dn->inode_page); dst_addr = kmap_atomic(page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); flush_dcache_page(page); kunmap_atomic(dst_addr); SetPageUptodate(page); no_update: set_page_dirty(page); /* clear dirty state */ dirty = clear_page_dirty_for_io(page); /* write data page to try to make data consistent */ set_page_writeback(page); fio.blk_addr = dn->data_blkaddr; write_data_page(dn, &fio); set_data_blkaddr(dn); f2fs_update_extent_cache(dn); f2fs_wait_on_page_writeback(page, DATA); if (dirty) inode_dec_dirty_pages(dn->inode); /* this converted inline_data should be recovered. */ set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); /* clear inline data and flag after data writeback */ truncate_inline_inode(dn->inode_page, 0); clear_out: stat_dec_inline_inode(dn->inode); f2fs_clear_inline_inode(dn->inode); sync_inode_page(dn); f2fs_put_dnode(dn); return 0; } int f2fs_convert_inline_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; struct page *ipage, *page; int err = 0; page = grab_cache_page(inode->i_mapping, 0); if (!page) return -ENOMEM; f2fs_lock_op(sbi); ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto out; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) err = f2fs_convert_inline_page(&dn, page); f2fs_put_dnode(&dn); out: f2fs_unlock_op(sbi); f2fs_put_page(page, 1); return err; } int f2fs_write_inline_data(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; if (!f2fs_has_inline_data(inode)) { f2fs_put_dnode(&dn); return -EAGAIN; } f2fs_bug_on(F2FS_I_SB(inode), page->index); f2fs_wait_on_page_writeback(dn.inode_page, NODE); src_addr = kmap_atomic(page); dst_addr = inline_data_addr(dn.inode_page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); kunmap_atomic(src_addr); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); sync_inode_page(&dn); f2fs_put_dnode(&dn); return 0; }
int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, int len, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; struct ext4_io_page *io_page; struct buffer_head *bh, *head; int ret = 0; blocksize = 1 << inode->i_blkbits; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); if (!io_page) { set_page_dirty(page); unlock_page(page); return -ENOMEM; } io_page->p_page = page; atomic_set(&io_page->p_count, 1); get_page(page); set_page_writeback(page); ClearPageError(page); /* * Comments copied from block_write_full_page_endio: * * The page straddles i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ if (len < PAGE_CACHE_SIZE) zero_user_segment(page, len, PAGE_CACHE_SIZE); for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_start >= len) { clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } clear_buffer_dirty(bh); ret = io_submit_add_bh(io, io_page, inode, wbc, bh); if (ret) { /* * We only get here on ENOMEM. Not much else * we can do but mark the page as dirty, and * better luck next time. */ set_page_dirty(page); break; } } unlock_page(page); /* * If the page was truncated before we could do the writeback, * or we had a memory allocation error while trying to write * the first buffer head, we won't have submitted any pages for * I/O. In that case we need to make sure we've cleared the * PageWriteback bit from the page to prevent the system from * wedging later on. */ put_io_page(io_page); return ret; }
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) { int err; struct page *ipage; struct dnode_of_data dn; void *src_addr, *dst_addr; block_t new_blk_addr; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_io_info fio = { .type = DATA, .rw = WRITE_SYNC | REQ_PRIO, }; f2fs_lock_op(sbi); ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) return PTR_ERR(ipage); /* * i_addr[0] is not used for inline data, * so reserving new block will not destroy inline data */ set_new_dnode(&dn, inode, ipage, NULL, 0); err = f2fs_reserve_block(&dn, 0); if (err) { f2fs_unlock_op(sbi); return err; } zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); /* Copy the whole inline data block */ src_addr = inline_data_addr(ipage); dst_addr = kmap(page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); kunmap(page); SetPageUptodate(page); /* write data page to try to make data consistent */ set_page_writeback(page); write_data_page(page, &dn, &new_blk_addr, &fio); update_extent_cache(new_blk_addr, &dn); f2fs_wait_on_page_writeback(page, DATA); /* clear inline data and flag after data writeback */ zero_user_segment(ipage, INLINE_DATA_OFFSET, INLINE_DATA_OFFSET + MAX_INLINE_DATA); clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); stat_dec_inline_inode(inode); sync_inode_page(&dn); f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); return err; } int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size) { struct page *page; int err; if (!f2fs_has_inline_data(inode)) return 0; else if (to_size <= MAX_INLINE_DATA) return 0; page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS); if (!page) return -ENOMEM; err = __f2fs_convert_inline_data(inode, page); f2fs_put_page(page, 1); return err; } int f2fs_write_inline_data(struct inode *inode, struct page *page, unsigned size) { void *src_addr, *dst_addr; struct page *ipage; struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; ipage = dn.inode_page; zero_user_segment(ipage, INLINE_DATA_OFFSET, INLINE_DATA_OFFSET + MAX_INLINE_DATA); src_addr = kmap(page); dst_addr = inline_data_addr(ipage); memcpy(dst_addr, src_addr, size); kunmap(page); /* Release the first data block if it is allocated */ if (!f2fs_has_inline_data(inode)) { truncate_data_blocks_range(&dn, 1); set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); stat_inc_inline_inode(inode); } sync_inode_page(&dn); f2fs_put_dnode(&dn); return 0; }
int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, int len, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; struct ext4_io_page *io_page; struct buffer_head *bh, *head; int ret = 0; blocksize = 1 << inode->i_blkbits; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); if (!io_page) { set_page_dirty(page); unlock_page(page); return -ENOMEM; } io_page->p_page = page; atomic_set(&io_page->p_count, 1); get_page(page); set_page_writeback(page); ClearPageError(page); for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_start >= len) { /* * Comments copied from block_write_full_page_endio: * * The page straddles i_size. It must be zeroed out on * each and every writepage invocation because it may * be mmapped. "A file is mapped in multiples of the * page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when * mapped, and writes to that region are not written * out to the file." */ zero_user_segment(page, block_start, block_end); clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } clear_buffer_dirty(bh); ret = io_submit_add_bh(io, io_page, inode, wbc, bh); if (ret) { set_page_dirty(page); break; } } unlock_page(page); put_io_page(io_page); return ret; }
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(dn->inode), .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_PRIO, .page = page, .encrypted_page = NULL, }; int dirty, err; if (!f2fs_exist_data(dn->inode)) goto clear_out; err = f2fs_reserve_block(dn, 0); if (err) return err; f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page)); read_inline_data(page, dn->inode_page); set_page_dirty(page); /* clear dirty state */ dirty = clear_page_dirty_for_io(page); /* write data page to try to make data consistent */ set_page_writeback(page); fio.old_blkaddr = dn->data_blkaddr; set_inode_flag(dn->inode, FI_HOT_DATA); write_data_page(dn, &fio); f2fs_wait_on_page_writeback(page, DATA, true); if (dirty) { inode_dec_dirty_pages(dn->inode); remove_dirty_inode(dn->inode); } /* this converted inline_data should be recovered. */ set_inode_flag(dn->inode, FI_APPEND_WRITE); /* clear inline data and flag after data writeback */ truncate_inline_inode(dn->inode, dn->inode_page, 0); clear_inline_node(dn->inode_page); clear_out: stat_dec_inline_inode(dn->inode); clear_inode_flag(dn->inode, FI_INLINE_DATA); f2fs_put_dnode(dn); return 0; } int f2fs_convert_inline_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; struct page *ipage, *page; int err = 0; if (!f2fs_has_inline_data(inode)) return 0; page = f2fs_grab_cache_page(inode->i_mapping, 0, false); if (!page) return -ENOMEM; f2fs_lock_op(sbi); ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto out; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) err = f2fs_convert_inline_page(&dn, page); f2fs_put_dnode(&dn); out: f2fs_unlock_op(sbi); f2fs_put_page(page, 1); f2fs_balance_fs(sbi, dn.node_changed); return err; } int f2fs_write_inline_data(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; if (!f2fs_has_inline_data(inode)) { f2fs_put_dnode(&dn); return -EAGAIN; } f2fs_bug_on(F2FS_I_SB(inode), page->index); f2fs_wait_on_page_writeback(dn.inode_page, NODE, true); src_addr = kmap_atomic(page); dst_addr = inline_data_addr(dn.inode_page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); kunmap_atomic(src_addr); set_page_dirty(dn.inode_page); set_inode_flag(inode, FI_APPEND_WRITE); set_inode_flag(inode, FI_DATA_EXIST); clear_inline_node(dn.inode_page); f2fs_put_dnode(&dn); return 0; }
/** * ntfs_mft_writepage - check if a metadata page contains dirty mft records * @page: metadata page possibly containing dirty mft records * @wbc: writeback control structure * * This is called from the VM when it wants to have a dirty $MFT/$DATA metadata * page cache page cleaned. The VM has already locked the page and marked it * clean. Instead of writing the page as a conventional ->writepage function * would do, we check if the page still contains any dirty mft records (it must * have done at some point in the past since the page was marked dirty) and if * none are found, i.e. all mft records are clean, we unlock the page and * return. The VM is then free to do with the page as it pleases. If on the * other hand we do find any dirty mft records in the page, we redirty the page * before unlocking it and returning so the VM knows that the page is still * busy and cannot be thrown out. * * Note, we do not actually write any dirty mft records here because they are * dirty inodes and hence will be written by the VFS inode dirty code paths. * There is no need to write them from the VM page dirty code paths, too and in * fact once we implement journalling it would be a complete nightmare having * two code paths leading to mft record writeout. */ static int ntfs_mft_writepage(struct page *page, struct writeback_control *wbc) { struct inode *mft_vi = page->mapping->host; struct super_block *sb = mft_vi->i_sb; ntfs_volume *vol = NTFS_SB(sb); u8 *maddr; MFT_RECORD *m; ntfs_inode **extent_nis; unsigned long mft_no; int nr, i, j; BOOL is_dirty = FALSE; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); BUG_ON(mft_vi != vol->mft_ino); /* The first mft record number in the page. */ mft_no = page->index << (PAGE_CACHE_SHIFT - vol->mft_record_size_bits); /* Number of mft records in the page. */ nr = PAGE_CACHE_SIZE >> vol->mft_record_size_bits; BUG_ON(!nr); ntfs_debug("Entering for %i inodes starting at 0x%lx.", nr, mft_no); /* Iterate over the mft records in the page looking for a dirty one. */ maddr = (u8*)kmap(page); for (i = 0; i < nr; ++i, ++mft_no, maddr += vol->mft_record_size) { struct inode *vi; ntfs_inode *ni, *eni; ntfs_attr na; na.mft_no = mft_no; na.name = NULL; na.name_len = 0; na.type = AT_UNUSED; /* * Check if the inode corresponding to this mft record is in * the VFS inode cache and obtain a reference to it if it is. */ ntfs_debug("Looking for inode 0x%lx in icache.", mft_no); /* * For inode 0, i.e. $MFT itself, we cannot use ilookup5() from * here or we deadlock because the inode is already locked by * the kernel (fs/fs-writeback.c::__sync_single_inode()) and * ilookup5() waits until the inode is unlocked before * returning it and it never gets unlocked because * ntfs_mft_writepage() never returns. )-: Fortunately, we * have inode 0 pinned in icache for the duration of the mount * so we can access it directly. */ if (!mft_no) { /* Balance the below iput(). */ vi = igrab(mft_vi); BUG_ON(vi != mft_vi); } else vi = ilookup5(sb, mft_no, (test_t)ntfs_test_inode, &na); if (vi) { ntfs_debug("Inode 0x%lx is in icache.", mft_no); /* The inode is in icache. Check if it is dirty. */ ni = NTFS_I(vi); if (!NInoDirty(ni)) { /* The inode is not dirty, skip this record. */ ntfs_debug("Inode 0x%lx is not dirty, " "continuing search.", mft_no); iput(vi); continue; } ntfs_debug("Inode 0x%lx is dirty, aborting search.", mft_no); /* The inode is dirty, no need to search further. */ iput(vi); is_dirty = TRUE; break; } ntfs_debug("Inode 0x%lx is not in icache.", mft_no); /* The inode is not in icache. */ /* Skip the record if it is not a mft record (type "FILE"). */ if (!ntfs_is_mft_recordp(maddr)) { ntfs_debug("Mft record 0x%lx is not a FILE record, " "continuing search.", mft_no); continue; } m = (MFT_RECORD*)maddr; /* * Skip the mft record if it is not in use. FIXME: What about * deleted/deallocated (extent) inodes? (AIA) */ if (!(m->flags & MFT_RECORD_IN_USE)) { ntfs_debug("Mft record 0x%lx is not in use, " "continuing search.", mft_no); continue; } /* Skip the mft record if it is a base inode. */ if (!m->base_mft_record) { ntfs_debug("Mft record 0x%lx is a base record, " "continuing search.", mft_no); continue; } /* * This is an extent mft record. Check if the inode * corresponding to its base mft record is in icache. */ na.mft_no = MREF_LE(m->base_mft_record); ntfs_debug("Mft record 0x%lx is an extent record. Looking " "for base inode 0x%lx in icache.", mft_no, na.mft_no); vi = ilookup5(sb, na.mft_no, (test_t)ntfs_test_inode, &na); if (!vi) { /* * The base inode is not in icache. Skip this extent * mft record. */ ntfs_debug("Base inode 0x%lx is not in icache, " "continuing search.", na.mft_no); continue; } ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no); /* * The base inode is in icache. Check if it has the extent * inode corresponding to this extent mft record attached. */ ni = NTFS_I(vi); down(&ni->extent_lock); if (ni->nr_extents <= 0) { /* * The base inode has no attached extent inodes. Skip * this extent mft record. */ up(&ni->extent_lock); iput(vi); continue; } /* Iterate over the attached extent inodes. */ extent_nis = ni->ext.extent_ntfs_inos; for (eni = NULL, j = 0; j < ni->nr_extents; ++j) { if (mft_no == extent_nis[j]->mft_no) { /* * Found the extent inode corresponding to this * extent mft record. */ eni = extent_nis[j]; break; } } /* * If the extent inode was not attached to the base inode, skip * this extent mft record. */ if (!eni) { up(&ni->extent_lock); iput(vi); continue; } /* * Found the extent inode corrsponding to this extent mft * record. If it is dirty, no need to search further. */ if (NInoDirty(eni)) { up(&ni->extent_lock); iput(vi); is_dirty = TRUE; break; } /* The extent inode is not dirty, so do the next record. */ up(&ni->extent_lock); iput(vi); } kunmap(page); /* If a dirty mft record was found, redirty the page. */ if (is_dirty) { ntfs_debug("Inode 0x%lx is dirty. Redirtying the page " "starting at inode 0x%lx.", mft_no, page->index << (PAGE_CACHE_SHIFT - vol->mft_record_size_bits)); redirty_page_for_writepage(wbc, page); unlock_page(page); } else { /* * Keep the VM happy. This must be done otherwise the * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though * the page is clean. */ BUG_ON(PageWriteback(page)); set_page_writeback(page); unlock_page(page); end_page_writeback(page); } ntfs_debug("Done."); return 0; }