/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); trace_f2fs_evict_inode(inode); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto no_delete; f2fs_bug_on(get_dirty_dents(inode)); remove_dirty_dir_inode(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); f2fs_lock_op(sbi); remove_inode_page(inode); stat_dec_inline_inode(inode); f2fs_unlock_op(sbi); no_delete: end_writeback(inode); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); nid_t xnid = fi->i_xattr_nid; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) commit_inmem_pages(inode, true); trace_f2fs_evict_inode(inode); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); remove_dirty_dir_inode(inode); f2fs_destroy_extent_tree(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; set_inode_flag(fi, FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode, true); f2fs_lock_op(sbi); remove_inode_page(inode); f2fs_unlock_op(sbi); no_delete: stat_dec_inline_xattr(inode); stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (is_inode_flag_set(fi, FI_APPEND_WRITE)) add_dirty_inode(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(fi, FI_UPDATE_WRITE)) add_dirty_inode(sbi, inode->i_ino, UPDATE_INO); if (is_inode_flag_set(fi, FI_FREE_NID)) { alloc_nid_failed(sbi, inode->i_ino); clear_inode_flag(fi, FI_FREE_NID); } out_clear: #ifdef CONFIG_F2FS_FS_ENCRYPTION if (fi->i_crypt_info) f2fs_free_encryption_info(inode, fi->i_crypt_info); #endif end_writeback(inode); }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) commit_inmem_pages(inode, true); trace_f2fs_evict_inode(inode); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); remove_dirty_dir_inode(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; sb_start_intwrite(inode->i_sb); set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); f2fs_lock_op(sbi); remove_inode_page(inode); f2fs_unlock_op(sbi); sb_end_intwrite(inode->i_sb); no_delete: stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); /* update extent info in inode */ if (inode->i_nlink) f2fs_preserve_extent_tree(inode); f2fs_destroy_extent_tree(inode); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE)) add_dirty_inode(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE)) add_dirty_inode(sbi, inode->i_ino, UPDATE_INO); out_clear: clear_inode(inode); }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); nid_t xnid = F2FS_I(inode)->i_xattr_nid; trace_f2fs_evict_inode(inode); truncate_inode_pages_final(&inode->i_data); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(get_dirty_dents(inode)); remove_dirty_dir_inode(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; sb_start_intwrite(inode->i_sb); set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); f2fs_lock_op(sbi); remove_inode_page(inode); stat_dec_inline_inode(inode); f2fs_unlock_op(sbi); sb_end_intwrite(inode->i_sb); no_delete: invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE)) add_dirty_inode(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE)) add_dirty_inode(sbi, inode->i_ino, UPDATE_INO); out_clear: clear_inode(inode); }
/* caller should call f2fs_lock_op() */ void handle_failed_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); clear_nlink(inode); make_bad_inode(inode); unlock_new_inode(inode); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); remove_inode_page(inode); stat_dec_inline_inode(inode); alloc_nid_failed(sbi, inode->i_ino); f2fs_unlock_op(sbi); /* iput will drop the inode object */ iput(inode); }
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) { int err; struct page *ipage; struct dnode_of_data dn; void *src_addr, *dst_addr; block_t new_blk_addr; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_io_info fio = { .type = DATA, .rw = WRITE_SYNC | REQ_PRIO, }; f2fs_lock_op(sbi); ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) return PTR_ERR(ipage); /* * i_addr[0] is not used for inline data, * so reserving new block will not destroy inline data */ set_new_dnode(&dn, inode, ipage, NULL, 0); err = f2fs_reserve_block(&dn, 0); if (err) { f2fs_unlock_op(sbi); return err; } zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); /* Copy the whole inline data block */ src_addr = inline_data_addr(ipage); dst_addr = kmap(page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); kunmap(page); SetPageUptodate(page); /* write data page to try to make data consistent */ set_page_writeback(page); write_data_page(page, &dn, &new_blk_addr, &fio); update_extent_cache(new_blk_addr, &dn); f2fs_wait_on_page_writeback(page, DATA); /* clear inline data and flag after data writeback */ zero_user_segment(ipage, INLINE_DATA_OFFSET, INLINE_DATA_OFFSET + MAX_INLINE_DATA); clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); stat_dec_inline_inode(inode); sync_inode_page(&dn); f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); return err; } int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size) { struct page *page; int err; if (!f2fs_has_inline_data(inode)) return 0; else if (to_size <= MAX_INLINE_DATA) return 0; page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS); if (!page) return -ENOMEM; err = __f2fs_convert_inline_data(inode, page); f2fs_put_page(page, 1); return err; } int f2fs_write_inline_data(struct inode *inode, struct page *page, unsigned size) { void *src_addr, *dst_addr; struct page *ipage; struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; ipage = dn.inode_page; zero_user_segment(ipage, INLINE_DATA_OFFSET, INLINE_DATA_OFFSET + MAX_INLINE_DATA); src_addr = kmap(page); dst_addr = inline_data_addr(ipage); memcpy(dst_addr, src_addr, size); kunmap(page); /* Release the first data block if it is allocated */ if (!f2fs_has_inline_data(inode)) { truncate_data_blocks_range(&dn, 1); set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); stat_inc_inline_inode(inode); } sync_inode_page(&dn); f2fs_put_dnode(&dn); return 0; }
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) { void *src_addr, *dst_addr; struct f2fs_io_info fio = { .sbi = F2FS_I_SB(dn->inode), .type = DATA, .rw = WRITE_SYNC | REQ_PRIO, .page = page, .encrypted_page = NULL, }; int dirty, err; f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); if (!f2fs_exist_data(dn->inode)) goto clear_out; err = f2fs_reserve_block(dn, 0); if (err) return err; f2fs_wait_on_page_writeback(page, DATA); if (PageUptodate(page)) goto no_update; zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); /* Copy the whole inline data block */ src_addr = inline_data_addr(dn->inode_page); dst_addr = kmap_atomic(page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); flush_dcache_page(page); kunmap_atomic(dst_addr); SetPageUptodate(page); no_update: set_page_dirty(page); /* clear dirty state */ dirty = clear_page_dirty_for_io(page); /* write data page to try to make data consistent */ set_page_writeback(page); fio.blk_addr = dn->data_blkaddr; write_data_page(dn, &fio); set_data_blkaddr(dn); f2fs_update_extent_cache(dn); f2fs_wait_on_page_writeback(page, DATA); if (dirty) inode_dec_dirty_pages(dn->inode); /* this converted inline_data should be recovered. */ set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); /* clear inline data and flag after data writeback */ truncate_inline_inode(dn->inode_page, 0); clear_out: stat_dec_inline_inode(dn->inode); f2fs_clear_inline_inode(dn->inode); sync_inode_page(dn); f2fs_put_dnode(dn); return 0; } int f2fs_convert_inline_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; struct page *ipage, *page; int err = 0; page = grab_cache_page(inode->i_mapping, 0); if (!page) return -ENOMEM; f2fs_lock_op(sbi); ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto out; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) err = f2fs_convert_inline_page(&dn, page); f2fs_put_dnode(&dn); out: f2fs_unlock_op(sbi); f2fs_put_page(page, 1); return err; } int f2fs_write_inline_data(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; if (!f2fs_has_inline_data(inode)) { f2fs_put_dnode(&dn); return -EAGAIN; } f2fs_bug_on(F2FS_I_SB(inode), page->index); f2fs_wait_on_page_writeback(dn.inode_page, NODE); src_addr = kmap_atomic(page); dst_addr = inline_data_addr(dn.inode_page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); kunmap_atomic(src_addr); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); sync_inode_page(&dn); f2fs_put_dnode(&dn); return 0; }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid; int err = 0; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) drop_inmem_pages(inode); trace_f2fs_evict_inode(inode); truncate_inode_pages_final(&inode->i_data); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); remove_dirty_inode(inode); f2fs_destroy_extent_tree(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; #ifdef CONFIG_F2FS_FAULT_INJECTION if (time_to_inject(sbi, FAULT_EVICT_INODE)) goto no_delete; #endif remove_ino_entry(sbi, inode->i_ino, APPEND_INO); remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); sb_start_intwrite(inode->i_sb); set_inode_flag(inode, FI_NO_ALLOC); i_size_write(inode, 0); retry: if (F2FS_HAS_BLOCKS(inode)) err = f2fs_truncate(inode); if (!err) { f2fs_lock_op(sbi); err = remove_inode_page(inode); f2fs_unlock_op(sbi); if (err == -ENOENT) err = 0; } /* give more chances, if ENOMEM case */ if (err == -ENOMEM) { err = 0; goto retry; } if (err) update_inode_page(inode); sb_end_intwrite(inode->i_sb); no_delete: stat_dec_inline_xattr(inode); stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (inode->i_nlink) { if (is_inode_flag_set(inode, FI_APPEND_WRITE)) add_ino_entry(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) add_ino_entry(sbi, inode->i_ino, UPDATE_INO); } if (is_inode_flag_set(inode, FI_FREE_NID)) { alloc_nid_failed(sbi, inode->i_ino); clear_inode_flag(inode, FI_FREE_NID); } f2fs_bug_on(sbi, err && !exist_written_data(sbi, inode->i_ino, ORPHAN_INO)); out_clear: fscrypt_put_encryption_info(inode, NULL); clear_inode(inode); }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); nid_t xnid = fi->i_xattr_nid; int err = 0; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) commit_inmem_pages(inode, true); trace_f2fs_evict_inode(inode); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); remove_dirty_dir_inode(inode); f2fs_destroy_extent_tree(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; set_inode_flag(fi, FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) err = f2fs_truncate(inode, true); if (!err) { f2fs_lock_op(sbi); err = remove_inode_page(inode); f2fs_unlock_op(sbi); } no_delete: stat_dec_inline_xattr(inode); stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (is_inode_flag_set(fi, FI_APPEND_WRITE)) add_dirty_inode(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(fi, FI_UPDATE_WRITE)) add_dirty_inode(sbi, inode->i_ino, UPDATE_INO); if (is_inode_flag_set(fi, FI_FREE_NID)) { if (err && err != -ENOENT) alloc_nid_done(sbi, inode->i_ino); else alloc_nid_failed(sbi, inode->i_ino); clear_inode_flag(fi, FI_FREE_NID); } if (err && err != -ENOENT) { if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) { /* * get here because we failed to release resource * of inode previously, reminder our user to run fsck * for fixing. */ set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_msg(sbi->sb, KERN_WARNING, "inode (ino:%lu) resource leak, run fsck " "to fix this issue!", inode->i_ino); } } out_clear: #ifdef CONFIG_F2FS_FS_ENCRYPTION if (fi->i_crypt_info) f2fs_free_encryption_info(inode, fi->i_crypt_info); #endif clear_inode(inode); }
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) { struct f2fs_io_info fio = { .sbi = F2FS_I_SB(dn->inode), .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_PRIO, .page = page, .encrypted_page = NULL, }; int dirty, err; if (!f2fs_exist_data(dn->inode)) goto clear_out; err = f2fs_reserve_block(dn, 0); if (err) return err; f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page)); read_inline_data(page, dn->inode_page); set_page_dirty(page); /* clear dirty state */ dirty = clear_page_dirty_for_io(page); /* write data page to try to make data consistent */ set_page_writeback(page); fio.old_blkaddr = dn->data_blkaddr; set_inode_flag(dn->inode, FI_HOT_DATA); write_data_page(dn, &fio); f2fs_wait_on_page_writeback(page, DATA, true); if (dirty) { inode_dec_dirty_pages(dn->inode); remove_dirty_inode(dn->inode); } /* this converted inline_data should be recovered. */ set_inode_flag(dn->inode, FI_APPEND_WRITE); /* clear inline data and flag after data writeback */ truncate_inline_inode(dn->inode, dn->inode_page, 0); clear_inline_node(dn->inode_page); clear_out: stat_dec_inline_inode(dn->inode); clear_inode_flag(dn->inode, FI_INLINE_DATA); f2fs_put_dnode(dn); return 0; } int f2fs_convert_inline_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; struct page *ipage, *page; int err = 0; if (!f2fs_has_inline_data(inode)) return 0; page = f2fs_grab_cache_page(inode->i_mapping, 0, false); if (!page) return -ENOMEM; f2fs_lock_op(sbi); ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto out; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) err = f2fs_convert_inline_page(&dn, page); f2fs_put_dnode(&dn); out: f2fs_unlock_op(sbi); f2fs_put_page(page, 1); f2fs_balance_fs(sbi, dn.node_changed); return err; } int f2fs_write_inline_data(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; if (!f2fs_has_inline_data(inode)) { f2fs_put_dnode(&dn); return -EAGAIN; } f2fs_bug_on(F2FS_I_SB(inode), page->index); f2fs_wait_on_page_writeback(dn.inode_page, NODE, true); src_addr = kmap_atomic(page); dst_addr = inline_data_addr(dn.inode_page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); kunmap_atomic(src_addr); set_page_dirty(dn.inode_page); set_inode_flag(inode, FI_APPEND_WRITE); set_inode_flag(inode, FI_DATA_EXIST); clear_inline_node(dn.inode_page); f2fs_put_dnode(&dn); return 0; }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid; int err = 0; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) f2fs_drop_inmem_pages(inode); trace_f2fs_evict_inode(inode); truncate_inode_pages_final(&inode->i_data); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); f2fs_remove_dirty_inode(inode); f2fs_destroy_extent_tree(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; dquot_initialize(inode); f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO); f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); sb_start_intwrite(inode->i_sb); set_inode_flag(inode, FI_NO_ALLOC); i_size_write(inode, 0); retry: if (F2FS_HAS_BLOCKS(inode)) err = f2fs_truncate(inode); if (time_to_inject(sbi, FAULT_EVICT_INODE)) { f2fs_show_injection_info(FAULT_EVICT_INODE); err = -EIO; } if (!err) { f2fs_lock_op(sbi); err = f2fs_remove_inode_page(inode); f2fs_unlock_op(sbi); if (err == -ENOENT) err = 0; } /* give more chances, if ENOMEM case */ if (err == -ENOMEM) { err = 0; goto retry; } if (err) f2fs_update_inode_page(inode); dquot_free_inode(inode); sb_end_intwrite(inode->i_sb); no_delete: dquot_drop(inode); stat_dec_inline_xattr(inode); stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG))) f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); else f2fs_inode_synced(inode); /* ino == 0, if f2fs_new_inode() was failed t*/ if (inode->i_ino) invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (inode->i_nlink) { if (is_inode_flag_set(inode, FI_APPEND_WRITE)) f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO); } if (is_inode_flag_set(inode, FI_FREE_NID)) { f2fs_alloc_nid_failed(sbi, inode->i_ino); clear_inode_flag(inode, FI_FREE_NID); } else { /* * If xattr nid is corrupted, we can reach out error condition, * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)). * In that case, f2fs_check_nid_range() is enough to give a clue. */ } out_clear: fscrypt_put_encryption_info(inode); clear_inode(inode); }