static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) { unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); struct curseg_info *curseg; struct page *page = NULL; block_t blkaddr; int err = 0; /* get node pages in the current segment */ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); ra_meta_pages(sbi, blkaddr, 1, META_POR, true); while (1) { struct fsync_inode_entry *entry; if (!is_valid_blkaddr(sbi, blkaddr, META_POR)) return 0; page = get_tmp_page(sbi, blkaddr); if (cp_ver != cpver_of_node(page)) break; if (!is_fsync_dnode(page)) goto next; entry = get_fsync_inode(head, ino_of_node(page)); if (entry) { if (!is_same_inode(entry->inode, page)) goto next; } else { if (IS_INODE(page) && is_dent_dnode(page)) { err = recover_inode_page(sbi, page); if (err) break; } /* add this fsync inode to the list */ entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); if (!entry) { err = -ENOMEM; break; } /* * CP | dnode(F) | inode(DF) * For this case, we should not give up now. */ entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); if (IS_ERR(entry->inode)) { err = PTR_ERR(entry->inode); kmem_cache_free(fsync_entry_slab, entry); if (err == -ENOENT) { err = 0; goto next; } break; } list_add_tail(&entry->list, head); } entry->blkaddr = blkaddr; if (IS_INODE(page)) { entry->last_inode = blkaddr; if (is_dent_dnode(page)) entry->last_dentry = blkaddr; } next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); f2fs_put_page(page, 1); ra_meta_pages_cond(sbi, blkaddr); } f2fs_put_page(page, 1); return err; }
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) { unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); struct curseg_info *curseg; struct page *page; block_t blkaddr; int err = 0; /* get node pages in the current segment */ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); /* read node page */ page = alloc_page(GFP_F2FS_ZERO); if (!page) return -ENOMEM; lock_page(page); while (1) { struct fsync_inode_entry *entry; err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC); if (err) return err; lock_page(page); if (cp_ver != cpver_of_node(page)) break; if (!is_fsync_dnode(page)) goto next; entry = get_fsync_inode(head, ino_of_node(page)); if (entry) { if (IS_INODE(page) && is_dent_dnode(page)) set_inode_flag(F2FS_I(entry->inode), FI_INC_LINK); } else { if (IS_INODE(page) && is_dent_dnode(page)) { err = recover_inode_page(sbi, page); if (err) { f2fs_msg(sbi->sb, KERN_INFO, "%s: recover_inode_page failed: %d", __func__, err); break; } } /* add this fsync inode to the list */ entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS); if (!entry) { err = -ENOMEM; break; } entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); if (IS_ERR(entry->inode)) { err = PTR_ERR(entry->inode); f2fs_msg(sbi->sb, KERN_INFO, "%s: f2fs_iget failed: %d", __func__, err); kmem_cache_free(fsync_entry_slab, entry); break; } list_add_tail(&entry->list, head); } entry->blkaddr = blkaddr; err = recover_inode(entry->inode, page); if (err && err != -ENOENT) { f2fs_msg(sbi->sb, KERN_INFO, "%s: recover_inode failed: %d", __func__, err); break; } next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); } unlock_page(page); __free_pages(page, 0); return err; }