static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, struct list_head *tmp_inode_list, struct list_head *dir_list) { struct curseg_info *curseg; struct page *page = NULL; int err = 0; block_t blkaddr; /* get node pages in the current segment */ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); while (1) { struct fsync_inode_entry *entry; if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) break; f2fs_ra_meta_pages_cond(sbi, blkaddr); page = f2fs_get_tmp_page(sbi, blkaddr); if (IS_ERR(page)) { err = PTR_ERR(page); break; } if (!is_recoverable_dnode(page)) { f2fs_put_page(page, 1); break; } entry = get_fsync_inode(inode_list, ino_of_node(page)); if (!entry) goto next; /* * inode(x) | CP | inode(x) | dnode(F) * In this case, we can lose the latest inode(x). * So, call recover_inode for the inode update. */ if (IS_INODE(page)) { err = recover_inode(entry->inode, page); if (err) { f2fs_put_page(page, 1); break; } } if (entry->last_dentry == blkaddr) { err = recover_dentry(entry->inode, page, dir_list); if (err) { f2fs_put_page(page, 1); break; } } err = do_recover_data(sbi, entry->inode, page); if (err) { f2fs_put_page(page, 1); break; } if (entry->blkaddr == blkaddr) list_move_tail(&entry->list, tmp_inode_list); next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); f2fs_put_page(page, 1); } if (!err) f2fs_allocate_new_segments(sbi); return err; }
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) { unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); struct curseg_info *curseg; struct page *page; block_t blkaddr; int err = 0; /* get node pages in the current segment */ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); /* read node page */ page = alloc_page(GFP_F2FS_ZERO); if (!page) return -ENOMEM; lock_page(page); while (1) { struct fsync_inode_entry *entry; err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); if (err) return err; lock_page(page); if (cp_ver != cpver_of_node(page)) break; if (!is_fsync_dnode(page)) goto next; entry = get_fsync_inode(head, ino_of_node(page)); if (entry) { if (IS_INODE(page) && is_dent_dnode(page)) set_inode_flag(F2FS_I(entry->inode), FI_INC_LINK); } else { if (IS_INODE(page) && is_dent_dnode(page)) { err = recover_inode_page(sbi, page); if (err) { f2fs_msg(sbi->sb, KERN_INFO, "%s: recover_inode_page failed: %d", __func__, err); break; } } /* add this fsync inode to the list */ entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); if (!entry) { err = -ENOMEM; break; } entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); if (IS_ERR(entry->inode)) { err = PTR_ERR(entry->inode); f2fs_msg(sbi->sb, KERN_INFO, "%s: f2fs_iget failed: %d", __func__, err); kmem_cache_free(fsync_entry_slab, entry); break; } list_add_tail(&entry->list, head); } entry->blkaddr = blkaddr; err = recover_inode(entry->inode, page); if (err && err != -ENOENT) { f2fs_msg(sbi->sb, KERN_INFO, "%s: recover_inode failed: %d", __func__, err); break; } next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); } unlock_page(page); __free_pages(page, 0); return err; }