static int __init init_inodecache(void) { f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", sizeof(struct f2fs_inode_info)); if (!f2fs_inode_cachep) return -ENOMEM; return 0; }
static int init_inodecache(void) { f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", sizeof(struct f2fs_inode_info), NULL); if (f2fs_inode_cachep == NULL) return -ENOMEM; return 0; }
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) { struct list_head inode_list, tmp_inode_list; struct list_head dir_list; int err; int ret = 0; unsigned long s_flags = sbi->sb->s_flags; bool need_writecp = false; #ifdef CONFIG_QUOTA int quota_enabled; #endif if (s_flags & SB_RDONLY) { f2fs_msg(sbi->sb, KERN_INFO, "recover fsync data on readonly fs"); sbi->sb->s_flags &= ~SB_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sbi->sb->s_flags |= SB_ACTIVE; /* Turn on quotas so that they are updated correctly */ quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY); #endif fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", sizeof(struct fsync_inode_entry)); if (!fsync_entry_slab) { err = -ENOMEM; goto out; } INIT_LIST_HEAD(&inode_list); INIT_LIST_HEAD(&tmp_inode_list); INIT_LIST_HEAD(&dir_list); /* prevent checkpoint */ mutex_lock(&sbi->cp_mutex); /* step #1: find fsynced inode numbers */ err = find_fsync_dnodes(sbi, &inode_list, check_only); if (err || list_empty(&inode_list)) goto skip; if (check_only) { ret = 1; goto skip; } need_writecp = true; /* step #2: recover data */ err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list); if (!err) f2fs_bug_on(sbi, !list_empty(&inode_list)); else { /* restore s_flags to let iput() trash data */ sbi->sb->s_flags = s_flags; } skip: destroy_fsync_dnodes(&inode_list, err); destroy_fsync_dnodes(&tmp_inode_list, err); /* truncate meta pages to be used by the recovery */ truncate_inode_pages_range(META_MAPPING(sbi), (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1); if (err) { truncate_inode_pages_final(NODE_MAPPING(sbi)); truncate_inode_pages_final(META_MAPPING(sbi)); } else { clear_sbi_flag(sbi, SBI_POR_DOING); } mutex_unlock(&sbi->cp_mutex); /* let's drop all the directory inodes for clean checkpoint */ destroy_fsync_dnodes(&dir_list, err); if (need_writecp) { set_sbi_flag(sbi, SBI_IS_RECOVERED); if (!err) { struct cp_control cpc = { .reason = CP_RECOVERY, }; err = f2fs_write_checkpoint(sbi, &cpc); } } kmem_cache_destroy(fsync_entry_slab); out: #ifdef CONFIG_QUOTA /* Turn quotas off */ if (quota_enabled) f2fs_quota_off_umount(sbi->sb); #endif sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ return ret ? ret: err; }