/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) commit_inmem_pages(inode, true); trace_f2fs_evict_inode(inode); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); remove_dirty_dir_inode(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); f2fs_lock_op(sbi); remove_inode_page(inode); f2fs_unlock_op(sbi); no_delete: stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); /* update extent info in inode */ if (inode->i_nlink) f2fs_preserve_extent_tree(inode); f2fs_destroy_extent_tree(inode); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE)) add_dirty_inode(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE)) add_dirty_inode(sbi, inode->i_ino, UPDATE_INO); out_clear: end_writeback(inode); }
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) return 0; if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) return 0; /* * We need to balance fs here to prevent from producing dirty node pages * during the urgent cleaning time when runing out of free sections. */ if (update_inode_page(inode) && wbc && wbc->nr_to_write) f2fs_balance_fs(sbi, true); return 0; }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); nid_t xnid = F2FS_I(inode)->i_xattr_nid; trace_f2fs_evict_inode(inode); truncate_inode_pages_final(&inode->i_data); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(get_dirty_dents(inode)); remove_dirty_dir_inode(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; sb_start_intwrite(inode->i_sb); set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); f2fs_lock_op(sbi); remove_inode_page(inode); stat_dec_inline_inode(inode); f2fs_unlock_op(sbi); sb_end_intwrite(inode->i_sb); no_delete: invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE)) add_dirty_inode(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE)) add_dirty_inode(sbi, inode->i_ino, UPDATE_INO); out_clear: clear_inode(inode); }
void f2fs_invalidate_page(struct page *page, unsigned long offset) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (inode->i_ino >= F2FS_ROOT_INO(sbi) && (offset % PAGE_CACHE_SIZE)) return; if (PageDirty(page)) { if (inode->i_ino == F2FS_META_INO(sbi)) dec_page_count(sbi, F2FS_DIRTY_META); else if (inode->i_ino == F2FS_NODE_INO(sbi)) dec_page_count(sbi, F2FS_DIRTY_NODES); else inode_dec_dirty_pages(inode); } /* This is atomic written page, keep Private */ if (IS_ATOMIC_WRITTEN_PAGE(page)) return; ClearPagePrivate(page); }
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) return 0; if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE)) return 0; /* * We need to lock here to prevent from producing dirty node pages * during the urgent cleaning time when runing out of free sections. */ f2fs_lock_op(sbi); update_inode_page(inode); f2fs_unlock_op(sbi); if (wbc) f2fs_balance_fs(sbi); return 0; }
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct buffer_head *raw_super_buf; struct inode *root; long err; bool retry = true, need_fsck = false; char *options = NULL; int recovery, i; try_onemore: err = -EINVAL; raw_super = NULL; raw_super_buf = NULL; recovery = 0; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; } err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery); if (err) goto free_sbi; sb->s_fs_info = sbi; default_options(sbi); /* parse mount options */ options = kstrdup((const char *)data, GFP_KERNEL); if (data && !options) { err = -ENOMEM; goto free_sb_buf; } err = parse_options(sb, options); if (err) goto free_options; sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); sb->s_op = &f2fs_sops; sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); clear_sbi_flag(sbi, SBI_POR_DOING); spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->read_io.io_rwsem); sbi->read_io.sbi = sbi; sbi->read_io.bio = NULL; for (i = 0; i < NR_PAGE_TYPE; i++) { init_rwsem(&sbi->write_io[i].io_rwsem); sbi->write_io[i].sbi = sbi; sbi->write_io[i].bio = NULL; } init_rwsem(&sbi->cp_rwsem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_options; } err = get_valid_checkpoint(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } /* sanity checking of checkpoint */ err = -EINVAL; if (sanity_check_ckpt(sbi)) { f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); goto free_cp; } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); init_extent_cache_info(sbi); init_ino_entry_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS segment manager"); goto free_sm; } err = build_node_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS node manager"); goto free_nm; } build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } /* if there are nt orphan nodes free them */ recover_orphan_inodes(sbi); /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); err = -EINVAL; goto free_node_inode; } sb->s_root = d_make_root(root); /* allocate root dentry */ if (!sb->s_root) { err = -ENOMEM; goto free_root_inode; } err = f2fs_build_stats(sbi); if (err) goto free_root_inode; if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); if (sbi->s_proc) proc_create_data("segment_info", S_IRUGO, sbi->s_proc, &f2fs_seq_segment_info_fops, sb); if (test_opt(sbi, DISCARD)) { struct request_queue *q = bdev_get_queue(sb->s_bdev); if (!blk_queue_discard(q)) f2fs_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); clear_opt(sbi, DISCARD); } sbi->s_kobj.kset = f2fs_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL, "%s", sb->s_id); if (err) goto free_proc; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { /* * mount should be failed, when device has readonly mode, and * previous checkpoint was not done by clean system shutdown. */ if (bdev_read_only(sb->s_bdev) && !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) { err = -EROFS; goto free_kobj; } if (need_fsck) set_sbi_flag(sbi, SBI_NEED_FSCK); err = recover_fsync_data(sbi); if (err) { need_fsck = true; f2fs_msg(sb, KERN_ERR, "Cannot recover all fsync data errno=%ld", err); goto free_kobj; } } /* * If filesystem is not mounted as read-only then * do start the gc_thread. */ if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) { /* After POR, we can run background GC thread.*/ err = start_gc_thread(sbi); if (err) goto free_kobj; } kfree(options); /* recover broken superblock */ if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) { f2fs_msg(sb, KERN_INFO, "Recover invalid superblock"); f2fs_commit_super(sbi); } return 0; free_kobj: kobject_del(&sbi->s_kobj); free_proc: if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } f2fs_destroy_stats(sbi); free_root_inode: dput(sb->s_root); sb->s_root = NULL; free_node_inode: iput(sbi->node_inode); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); free_cp: kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_options: kfree(options); free_sb_buf: brelse(raw_super_buf); free_sbi: kfree(sbi); /* give only one another chance */ if (retry) { retry = false; shrink_dcache_sb(sb); goto try_onemore; } return err; }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid; int err = 0; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) drop_inmem_pages(inode); trace_f2fs_evict_inode(inode); truncate_inode_pages_final(&inode->i_data); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); remove_dirty_inode(inode); f2fs_destroy_extent_tree(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; #ifdef CONFIG_F2FS_FAULT_INJECTION if (time_to_inject(sbi, FAULT_EVICT_INODE)) goto no_delete; #endif remove_ino_entry(sbi, inode->i_ino, APPEND_INO); remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); sb_start_intwrite(inode->i_sb); set_inode_flag(inode, FI_NO_ALLOC); i_size_write(inode, 0); retry: if (F2FS_HAS_BLOCKS(inode)) err = f2fs_truncate(inode); if (!err) { f2fs_lock_op(sbi); err = remove_inode_page(inode); f2fs_unlock_op(sbi); if (err == -ENOENT) err = 0; } /* give more chances, if ENOMEM case */ if (err == -ENOMEM) { err = 0; goto retry; } if (err) update_inode_page(inode); sb_end_intwrite(inode->i_sb); no_delete: stat_dec_inline_xattr(inode); stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (inode->i_nlink) { if (is_inode_flag_set(inode, FI_APPEND_WRITE)) add_ino_entry(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) add_ino_entry(sbi, inode->i_ino, UPDATE_INO); } if (is_inode_flag_set(inode, FI_FREE_NID)) { alloc_nid_failed(sbi, inode->i_ino); clear_inode_flag(inode, FI_FREE_NID); } f2fs_bug_on(sbi, err && !exist_written_data(sbi, inode->i_ino, ORPHAN_INO)); out_clear: fscrypt_put_encryption_info(inode, NULL); clear_inode(inode); }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); nid_t xnid = fi->i_xattr_nid; int err = 0; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) commit_inmem_pages(inode, true); trace_f2fs_evict_inode(inode); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); remove_dirty_dir_inode(inode); f2fs_destroy_extent_tree(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; set_inode_flag(fi, FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) err = f2fs_truncate(inode, true); if (!err) { f2fs_lock_op(sbi); err = remove_inode_page(inode); f2fs_unlock_op(sbi); } no_delete: stat_dec_inline_xattr(inode); stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (is_inode_flag_set(fi, FI_APPEND_WRITE)) add_dirty_inode(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(fi, FI_UPDATE_WRITE)) add_dirty_inode(sbi, inode->i_ino, UPDATE_INO); if (is_inode_flag_set(fi, FI_FREE_NID)) { if (err && err != -ENOENT) alloc_nid_done(sbi, inode->i_ino); else alloc_nid_failed(sbi, inode->i_ino); clear_inode_flag(fi, FI_FREE_NID); } if (err && err != -ENOENT) { if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) { /* * get here because we failed to release resource * of inode previously, reminder our user to run fsck * for fixing. */ set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_msg(sbi->sb, KERN_WARNING, "inode (ino:%lu) resource leak, run fsck " "to fix this issue!", inode->i_ino); } } out_clear: #ifdef CONFIG_F2FS_FS_ENCRYPTION if (fi->i_crypt_info) f2fs_free_encryption_info(inode, fi->i_crypt_info); #endif clear_inode(inode); }
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct buffer_head *raw_super_buf; struct inode *root; long err = -EINVAL; int i; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; } err = read_raw_super_block(sb, &raw_super, &raw_super_buf); if (err) goto free_sbi; sb->s_fs_info = sbi; /* init some FS parameters */ sbi->active_logs = NR_CURSEG_TYPE; set_opt(sbi, BG_GC); #ifdef CONFIG_F2FS_FS_XATTR set_opt(sbi, XATTR_USER); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL set_opt(sbi, POSIX_ACL); #endif /* parse mount options */ err = parse_options(sb, (char *)data); if (err) goto free_sb_buf; sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); // sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); sb->s_op = &f2fs_sops; sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); mutex_init(&sbi->node_write); sbi->por_doing = false; spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->read_io.io_rwsem); sbi->read_io.sbi = sbi; sbi->read_io.bio = NULL; for (i = 0; i < NR_PAGE_TYPE; i++) { init_rwsem(&sbi->write_io[i].io_rwsem); sbi->write_io[i].sbi = sbi; sbi->write_io[i].bio = NULL; } init_rwsem(&sbi->cp_rwsem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_sb_buf; } err = get_valid_checkpoint(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } /* sanity checking of checkpoint */ err = -EINVAL; if (sanity_check_ckpt(sbi)) { f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); goto free_cp; } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); init_orphan_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS segment manager"); goto free_sm; } err = build_node_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS node manager"); goto free_nm; } build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } /* if there are nt orphan nodes free them */ recover_orphan_inodes(sbi); /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { err = -EINVAL; goto free_root_inode; } sb->s_root = d_alloc_root(root); /* allocate root dentry */ if (!sb->s_root) { err = -ENOMEM; goto free_root_inode; } err = f2fs_build_stats(sbi); if (err) goto free_root_inode; if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); if (sbi->s_proc) proc_create_data("segment_info", S_IRUGO, sbi->s_proc, &f2fs_seq_segment_info_fops, sb); if (test_opt(sbi, DISCARD)) { struct request_queue *q = bdev_get_queue(sb->s_bdev); if (!blk_queue_discard(q)) f2fs_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); } sbi->s_kobj.kset = f2fs_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL, "%s", sb->s_id); if (err) goto free_proc; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { err = recover_fsync_data(sbi); if (err) f2fs_msg(sb, KERN_ERR, "Cannot recover all fsync data errno=%ld", err); } /* * If filesystem is not mounted as read-only then * do start the gc_thread. */ if (!(sb->s_flags & MS_RDONLY)) { /* After POR, we can run background GC thread.*/ err = start_gc_thread(sbi); if (err) goto free_kobj; } return 0; free_kobj: kobject_del(&sbi->s_kobj); free_proc: if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } f2fs_destroy_stats(sbi); free_root_inode: dput(sb->s_root); sb->s_root = NULL; free_node_inode: iput(sbi->node_inode); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); free_cp: kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_sb_buf: brelse(raw_super_buf); free_sbi: kfree(sbi); return err; }
struct inode *f2fs_iget_nowait(struct super_block *sb, unsigned long ino) { struct f2fs_iget_args args = { .ino = ino, .on_free = 0 }; struct inode *inode = ilookup5(sb, ino, f2fs_iget_test, &args); if (inode) return inode; if (!args.on_free) return f2fs_iget(sb, ino); return ERR_PTR(-ENOENT); } static int do_read_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_inode_info *fi = F2FS_I(inode); struct page *node_page; struct f2fs_node *rn; struct f2fs_inode *ri; /* Check if ino is within scope */ check_nid_range(sbi, inode->i_ino); node_page = get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) return PTR_ERR(node_page); rn = page_address(node_page); ri = &(rn->i); inode->i_mode = le16_to_cpu(ri->i_mode); i_uid_write(inode, le32_to_cpu(ri->i_uid)); i_gid_write(inode, le32_to_cpu(ri->i_gid)); set_nlink(inode, le32_to_cpu(ri->i_links)); inode->i_size = le64_to_cpu(ri->i_size); inode->i_blocks = le64_to_cpu(ri->i_blocks); inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime); inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); inode->i_generation = le32_to_cpu(ri->i_generation); fi->i_current_depth = le32_to_cpu(ri->i_current_depth); fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); fi->i_flags = le32_to_cpu(ri->i_flags); fi->flags = 0; fi->data_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver) - 1; fi->i_advise = ri->i_advise; fi->i_pino = le32_to_cpu(ri->i_pino); get_extent_info(&fi->ext, ri->i_ext); f2fs_put_page(node_page, 1); return 0; } struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int ret; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) goto make_now; ret = do_read_inode(inode); if (ret) goto bad_inode; if (!sbi->por_doing && inode->i_nlink == 0) { ret = -ENOENT; goto bad_inode; } make_now: if (ino == F2FS_NODE_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_node_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); } else if (ino == F2FS_META_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_meta_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); } else if (S_ISREG(inode->i_mode)) { inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE | __GFP_ZERO); } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &f2fs_special_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } else { ret = -EIO; goto bad_inode; } unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(ret); } void update_inode(struct inode *inode, struct page *node_page) { struct f2fs_node *rn; struct f2fs_inode *ri; wait_on_page_writeback(node_page); rn = page_address(node_page); ri = &(rn->i); ri->i_mode = cpu_to_le16(inode->i_mode); ri->i_advise = F2FS_I(inode)->i_advise; ri->i_uid = cpu_to_le32(i_uid_read(inode)); ri->i_gid = cpu_to_le32(i_gid_read(inode)); ri->i_links = cpu_to_le32(inode->i_nlink); ri->i_size = cpu_to_le64(i_size_read(inode)); ri->i_blocks = cpu_to_le64(inode->i_blocks); set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext); ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth); ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); ri->i_generation = cpu_to_le32(inode->i_generation); set_cold_node(inode, node_page); set_page_dirty(node_page); } int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct page *node_page; bool need_lock = false; if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) return 0; if (wbc) f2fs_balance_fs(sbi); node_page = get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) return PTR_ERR(node_page); if (!PageDirty(node_page)) { need_lock = true; f2fs_put_page(node_page, 1); mutex_lock(&sbi->write_inode); node_page = get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) { mutex_unlock(&sbi->write_inode); return PTR_ERR(node_page); } } update_inode(inode, node_page); f2fs_put_page(node_page, 1); if (need_lock) mutex_unlock(&sbi->write_inode); return 0; } /* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto no_delete; BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents)); remove_dirty_dir_inode(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); remove_inode_page(inode); no_delete: clear_inode(inode); }
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct buffer_head *raw_super_buf; struct inode *root; int i; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* set a temporary block size */ if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) goto free_sbi; /* read f2fs raw super block */ raw_super_buf = sb_bread(sb, F2FS_SUPER_OFFSET); if (!raw_super_buf) goto free_sbi; raw_super = (struct f2fs_super_block *) ((char *)raw_super_buf->b_data); /* init some FS parameters */ set_opt(sbi, BG_GC); #ifdef CONFIG_F2FS_FS_XATTR set_opt(sbi, XATTR_USER); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL set_opt(sbi, POSIX_ACL); #endif /* parse mount options */ if (parse_options(sbi, (char *)data)) goto free_sb_buf; /* sanity checking of raw super */ if (sanity_check_raw_super(raw_super)) goto free_sb_buf; sb->s_maxbytes = max_file_size(raw_super->log_blocksize); sb->s_op = &f2fs_sops; sb->s_xattr = f2fs_xattr_handlers; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_fs_info = sbi; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->write_inode); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); for (i = 0; i < NR_LOCK_TYPE; i++) mutex_init(&sbi->fs_lock[i]); sbi->por_doing = 0; spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->bio_sem); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) goto free_sb_buf; if (get_valid_checkpoint(sbi)) goto free_meta_inode; /* sanity checking of checkpoint */ if (sanity_check_ckpt(raw_super, sbi->ckpt)) goto free_cp; sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); /* init super block */ if (!sb_set_blocksize(sb, sbi->blocksize)) goto free_cp; init_orphan_info(sbi); /* setup f2fs internal modules */ if (build_segment_manager(sbi)) goto free_sm; if (build_node_manager(sbi)) goto free_nm; if (build_gc_manager(sbi)) goto free_gc; /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) goto free_gc; /* if there are nt orphan nodes free them */ if (recover_orphan_inodes(sbi)) goto free_node_inode; /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) goto free_node_inode; if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) goto free_root_inode; sb->s_root = d_alloc_root(root); /* allocate root dentry */ if (!sb->s_root) goto free_root_inode; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) recover_fsync_data(sbi); /* After POR, we can run background GC thread */ if (start_gc_thread(sbi)) goto fail; #ifdef CONFIG_F2FS_STAT_FS if (f2fs_proc_root) { sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); if (f2fs_stat_init(sbi)) goto fail; } #endif return 0; fail: stop_gc_thread(sbi); free_root_inode: make_bad_inode(root); iput(root); free_node_inode: make_bad_inode(sbi->node_inode); iput(sbi->node_inode); free_gc: destroy_gc_manager(sbi); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); free_cp: kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_sb_buf: brelse(raw_super_buf); free_sbi: kfree(sbi); return -EINVAL; }
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct inode *root; long err; bool retry = true, need_fsck = false; char *options = NULL; int recovery, i, valid_super_block; struct curseg_info *seg_i; try_onemore: err = -EINVAL; raw_super = NULL; valid_super_block = -1; recovery = 0; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* Load the checksum driver */ sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); if (IS_ERR(sbi->s_chksum_driver)) { f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver."); err = PTR_ERR(sbi->s_chksum_driver); sbi->s_chksum_driver = NULL; goto free_sbi; } /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; } err = read_raw_super_block(sb, &raw_super, &valid_super_block, &recovery); if (err) goto free_sbi; sb->s_fs_info = sbi; default_options(sbi); /* parse mount options */ options = kstrdup((const char *)data, GFP_KERNEL); if (data && !options) { err = -ENOMEM; goto free_sb_buf; } err = parse_options(sb, options); if (err) goto free_options; sbi->max_file_blocks = max_file_blocks(); sb->s_maxbytes = sbi->max_file_blocks << le32_to_cpu(raw_super->log_blocksize); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); sb->s_op = &f2fs_sops; sb->s_cop = &f2fs_cryptops; sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->valid_super_block = valid_super_block; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); /* disallow all the data/node/meta page writes */ set_sbi_flag(sbi, SBI_POR_DOING); spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->read_io.io_rwsem); sbi->read_io.sbi = sbi; sbi->read_io.bio = NULL; for (i = 0; i < NR_PAGE_TYPE; i++) { init_rwsem(&sbi->write_io[i].io_rwsem); sbi->write_io[i].sbi = sbi; sbi->write_io[i].bio = NULL; } init_rwsem(&sbi->cp_rwsem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_options; } err = get_valid_checkpoint(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); spin_lock_init(&sbi->inode_lock[i]); } init_extent_cache_info(sbi); init_ino_entry_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS segment manager"); goto free_sm; } err = build_node_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS node manager"); goto free_nm; } /* For write statistics */ if (sb->s_bdev->bd_part) sbi->sectors_written_start = (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]); /* Read accumulated write IO statistics if exists */ seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); if (__exist_node_summaries(sbi)) sbi->kbytes_written = le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written); build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } f2fs_join_shrinker(sbi); /* if there are nt orphan nodes free them */ err = recover_orphan_inodes(sbi); if (err) goto free_node_inode; /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); err = -EINVAL; goto free_node_inode; } sb->s_root = d_make_root(root); /* allocate root dentry */ if (!sb->s_root) { err = -ENOMEM; goto free_root_inode; } err = f2fs_build_stats(sbi); if (err) goto free_root_inode; if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); if (sbi->s_proc) proc_create_data("segment_info", S_IRUGO, sbi->s_proc, &f2fs_seq_segment_info_fops, sb); sbi->s_kobj.kset = f2fs_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL, "%s", sb->s_id); if (err) goto free_proc; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { /* * mount should be failed, when device has readonly mode, and * previous checkpoint was not done by clean system shutdown. */ if (bdev_read_only(sb->s_bdev) && !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) { err = -EROFS; goto free_kobj; } if (need_fsck) set_sbi_flag(sbi, SBI_NEED_FSCK); err = recover_fsync_data(sbi); if (err) { need_fsck = true; f2fs_msg(sb, KERN_ERR, "Cannot recover all fsync data errno=%ld", err); goto free_kobj; } } /* recover_fsync_data() cleared this already */ clear_sbi_flag(sbi, SBI_POR_DOING); /* * If filesystem is not mounted as read-only then * do start the gc_thread. */ if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) { /* After POR, we can run background GC thread.*/ err = start_gc_thread(sbi); if (err) goto free_kobj; } kfree(options); /* recover broken superblock */ if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) { err = f2fs_commit_super(sbi, true); f2fs_msg(sb, KERN_INFO, "Try to recover %dth superblock, ret: %ld", sbi->valid_super_block ? 1 : 2, err); } f2fs_update_time(sbi, CP_TIME); f2fs_update_time(sbi, REQ_TIME); return 0; free_kobj: kobject_del(&sbi->s_kobj); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); free_proc: if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } f2fs_destroy_stats(sbi); free_root_inode: dput(sb->s_root); sb->s_root = NULL; free_node_inode: mutex_lock(&sbi->umount_mutex); f2fs_leave_shrinker(sbi); iput(sbi->node_inode); mutex_unlock(&sbi->umount_mutex); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_options: kfree(options); free_sb_buf: kfree(raw_super); free_sbi: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi); /* give only one another chance */ if (retry) { retry = false; shrink_dcache_sb(sb); goto try_onemore; } return err; }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid; int err = 0; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) f2fs_drop_inmem_pages(inode); trace_f2fs_evict_inode(inode); truncate_inode_pages_final(&inode->i_data); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto out_clear; f2fs_bug_on(sbi, get_dirty_pages(inode)); f2fs_remove_dirty_inode(inode); f2fs_destroy_extent_tree(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; dquot_initialize(inode); f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO); f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO); f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO); sb_start_intwrite(inode->i_sb); set_inode_flag(inode, FI_NO_ALLOC); i_size_write(inode, 0); retry: if (F2FS_HAS_BLOCKS(inode)) err = f2fs_truncate(inode); if (time_to_inject(sbi, FAULT_EVICT_INODE)) { f2fs_show_injection_info(FAULT_EVICT_INODE); err = -EIO; } if (!err) { f2fs_lock_op(sbi); err = f2fs_remove_inode_page(inode); f2fs_unlock_op(sbi); if (err == -ENOENT) err = 0; } /* give more chances, if ENOMEM case */ if (err == -ENOMEM) { err = 0; goto retry; } if (err) f2fs_update_inode_page(inode); dquot_free_inode(inode); sb_end_intwrite(inode->i_sb); no_delete: dquot_drop(inode); stat_dec_inline_xattr(inode); stat_dec_inline_dir(inode); stat_dec_inline_inode(inode); if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG))) f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); else f2fs_inode_synced(inode); /* ino == 0, if f2fs_new_inode() was failed t*/ if (inode->i_ino) invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); if (xnid) invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); if (inode->i_nlink) { if (is_inode_flag_set(inode, FI_APPEND_WRITE)) f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO); if (is_inode_flag_set(inode, FI_UPDATE_WRITE)) f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO); } if (is_inode_flag_set(inode, FI_FREE_NID)) { f2fs_alloc_nid_failed(sbi, inode->i_ino); clear_inode_flag(inode, FI_FREE_NID); } else { /* * If xattr nid is corrupted, we can reach out error condition, * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)). * In that case, f2fs_check_nid_range() is enough to give a clue. */ } out_clear: fscrypt_put_encryption_info(inode); clear_inode(inode); }
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int ret = 0; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) { trace_f2fs_iget(inode); return inode; } if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) goto make_now; ret = do_read_inode(inode); if (ret) goto bad_inode; make_now: if (ino == F2FS_NODE_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_node_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); } else if (ino == F2FS_META_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_meta_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); } else if (S_ISREG(inode->i_mode)) { inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; inode_nohighmem(inode); } else if (S_ISLNK(inode->i_mode)) { if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &f2fs_special_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } else { ret = -EIO; goto bad_inode; } f2fs_set_inode_flags(inode); unlock_new_inode(inode); trace_f2fs_iget(inode); return inode; bad_inode: iget_failed(inode); trace_f2fs_iget_exit(inode, ret); return ERR_PTR(ret); }
static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct buffer_head *raw_super_buf; struct inode *root; long err = -EINVAL; int i; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* set a block size */ if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) { f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; } if (validate_superblock(sb, &raw_super, &raw_super_buf, 0)) { brelse(raw_super_buf); if (validate_superblock(sb, &raw_super, &raw_super_buf, 1)) goto free_sb_buf; } /* init some FS parameters */ sbi->active_logs = NR_CURSEG_TYPE; set_opt(sbi, BG_GC); #ifdef CONFIG_F2FS_FS_XATTR set_opt(sbi, XATTR_USER); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL set_opt(sbi, POSIX_ACL); #endif /* parse mount options */ if (parse_options(sb, sbi, (char *)data)) goto free_sb_buf; sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); sb->s_op = &f2fs_sops; sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_fs_info = sbi; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->write_inode); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); for (i = 0; i < NR_LOCK_TYPE; i++) mutex_init(&sbi->fs_lock[i]); sbi->por_doing = 0; spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->bio_sem); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_sb_buf; } err = get_valid_checkpoint(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } /* sanity checking of checkpoint */ err = -EINVAL; if (sanity_check_ckpt(sbi)) { f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); goto free_cp; } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); init_orphan_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS segment manager"); goto free_sm; } err = build_node_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS node manager"); goto free_nm; } build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } /* if there are nt orphan nodes free them */ err = -EINVAL; if (recover_orphan_inodes(sbi)) goto free_node_inode; /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) goto free_root_inode; sb->s_root = d_make_root(root); /* allocate root dentry */ if (!sb->s_root) { err = -ENOMEM; goto free_root_inode; } /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) recover_fsync_data(sbi); /* After POR, we can run background GC thread */ err = start_gc_thread(sbi); if (err) goto fail; err = f2fs_build_stats(sbi); if (err) goto fail; return 0; fail: stop_gc_thread(sbi); free_root_inode: dput(sb->s_root); sb->s_root = NULL; free_node_inode: iput(sbi->node_inode); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); free_cp: kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_sb_buf: brelse(raw_super_buf); free_sbi: kfree(sbi); return err; }