static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) { unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); struct curseg_info *curseg; struct page *page = NULL; block_t blkaddr; int err = 0; /* get node pages in the current segment */ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); ra_meta_pages(sbi, blkaddr, 1, META_POR); while (1) { struct fsync_inode_entry *entry; if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi)) return 0; page = get_meta_page(sbi, blkaddr); if (cp_ver != cpver_of_node(page)) break; if (!is_fsync_dnode(page)) goto next; entry = get_fsync_inode(head, ino_of_node(page)); if (entry) { if (IS_INODE(page) && is_dent_dnode(page)) set_inode_flag(F2FS_I(entry->inode), FI_INC_LINK); } else { if (IS_INODE(page) && is_dent_dnode(page)) { err = recover_inode_page(sbi, page); if (err) break; } /* add this fsync inode to the list */ entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); if (!entry) { err = -ENOMEM; break; } /* * CP | dnode(F) | inode(DF) * For this case, we should not give up now. */ entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); if (IS_ERR(entry->inode)) { err = PTR_ERR(entry->inode); kmem_cache_free(fsync_entry_slab, entry); if (err == -ENOENT) goto next; break; } list_add_tail(&entry->list, head); } entry->blkaddr = blkaddr; if (IS_INODE(page)) { entry->last_inode = blkaddr; if (is_dent_dnode(page)) entry->last_dentry = blkaddr; } next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); f2fs_put_page(page, 1); ra_meta_pages_cond(sbi, blkaddr); } f2fs_put_page(page, 1); return err; }
static inline int write_all_xattrs(struct inode *inode, __u32 hsize, void *txattr_addr, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); size_t inline_size = 0; void *xattr_addr; struct page *xpage; nid_t new_nid = 0; int err; inline_size = inline_xattr_size(inode); if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid) if (!alloc_nid(sbi, &new_nid)) return -ENOSPC; /* write to inline xattr */ if (inline_size) { struct page *page = NULL; void *inline_addr; if (ipage) { inline_addr = inline_xattr_addr(ipage); f2fs_wait_on_page_writeback(ipage, NODE, true); } else { page = get_node_page(sbi, inode->i_ino); if (IS_ERR(page)) { alloc_nid_failed(sbi, new_nid); return PTR_ERR(page); } inline_addr = inline_xattr_addr(page); f2fs_wait_on_page_writeback(page, NODE, true); } memcpy(inline_addr, txattr_addr, inline_size); f2fs_put_page(page, 1); /* no need to use xattr node block */ if (hsize <= inline_size) { err = truncate_xattr_node(inode, ipage); alloc_nid_failed(sbi, new_nid); return err; } } /* write to xattr node block */ if (F2FS_I(inode)->i_xattr_nid) { xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); if (IS_ERR(xpage)) { alloc_nid_failed(sbi, new_nid); return PTR_ERR(xpage); } f2fs_bug_on(sbi, new_nid); f2fs_wait_on_page_writeback(xpage, NODE, true); } else { struct dnode_of_data dn; set_new_dnode(&dn, inode, NULL, NULL, new_nid); xpage = new_node_page(&dn, XATTR_NODE_OFFSET, ipage); if (IS_ERR(xpage)) { alloc_nid_failed(sbi, new_nid); return PTR_ERR(xpage); } alloc_nid_done(sbi, new_nid); } xattr_addr = page_address(xpage); memcpy(xattr_addr, txattr_addr + inline_size, PAGE_SIZE - sizeof(struct node_footer)); set_page_dirty(xpage); f2fs_put_page(xpage, 1); /* need to checkpoint during fsync */ F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); return 0; }