static void recover_inode(struct inode *inode, struct page *page) { struct f2fs_inode *raw = F2FS_INODE(page); char *name; inode->i_mode = le16_to_cpu(raw->i_mode); f2fs_i_size_write(inode, le64_to_cpu(raw->i_size)); inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime); inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime); inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime); inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec); F2FS_I(inode)->i_advise = raw->i_advise; recover_inline_flags(inode, raw); if (file_enc_name(inode)) name = "<encrypted>"; else name = F2FS_INODE(page)->i_name; f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s, inline = %x", ino_of_node(page), name, raw->i_inline); }
/* * NOTE: ipage is grabbed by caller, but if any error occurs, we should * release ipage in this function. */ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, struct f2fs_inline_dentry *inline_dentry) { struct page *page; struct dnode_of_data dn; struct f2fs_dentry_block *dentry_blk; int err; page = f2fs_grab_cache_page(dir->i_mapping, 0, false); if (!page) { f2fs_put_page(ipage, 1); return -ENOMEM; } set_new_dnode(&dn, dir, ipage, NULL, 0); err = f2fs_reserve_block(&dn, 0); if (err) goto out; f2fs_wait_on_page_writeback(page, DATA, true); zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE); dentry_blk = kmap_atomic(page); /* copy data from inline dentry block to new dentry block */ memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap, INLINE_DENTRY_BITMAP_SIZE); memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0, SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE); /* * we do not need to zero out remainder part of dentry and filename * field, since we have used bitmap for marking the usage status of * them, besides, we can also ignore copying/zeroing reserved space * of dentry block, because them haven't been used so far. */ memcpy(dentry_blk->dentry, inline_dentry->dentry, sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY); memcpy(dentry_blk->filename, inline_dentry->filename, NR_INLINE_DENTRY * F2FS_SLOT_LEN); kunmap_atomic(dentry_blk); if (!PageUptodate(page)) SetPageUptodate(page); set_page_dirty(page); /* clear inline dir and flag after data writeback */ truncate_inline_inode(dir, ipage, 0); stat_dec_inline_dir(dir); clear_inode_flag(dir, FI_INLINE_DENTRY); f2fs_i_depth_write(dir, 1); if (i_size_read(dir) < PAGE_SIZE) f2fs_i_size_write(dir, PAGE_SIZE); out: f2fs_put_page(page, 1); return err; }
int make_empty_inline_dir(struct inode *inode, struct inode *parent, struct page *ipage) { struct f2fs_inline_dentry *inline_dentry; struct f2fs_dentry_ptr d; inline_dentry = inline_data_addr(ipage); make_dentry_ptr_inline(NULL, &d, inline_dentry); do_make_empty_dir(inode, parent, &d); set_page_dirty(ipage); /* update i_size to MAX_INLINE_DATA */ if (i_size_read(inode) < MAX_INLINE_DATA) f2fs_i_size_write(inode, MAX_INLINE_DATA); return 0; }
static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, struct f2fs_inline_dentry *inline_dentry) { struct f2fs_inline_dentry *backup_dentry; int err; backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir), sizeof(struct f2fs_inline_dentry), GFP_F2FS_ZERO); if (!backup_dentry) { f2fs_put_page(ipage, 1); return -ENOMEM; } memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA); truncate_inline_inode(dir, ipage, 0); unlock_page(ipage); err = f2fs_add_inline_entries(dir, backup_dentry); if (err) goto recover; lock_page(ipage); stat_dec_inline_dir(dir); clear_inode_flag(dir, FI_INLINE_DENTRY); kfree(backup_dentry); return 0; recover: lock_page(ipage); memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA); f2fs_i_depth_write(dir, 0); f2fs_i_size_write(dir, MAX_INLINE_DATA); set_page_dirty(ipage); f2fs_put_page(ipage, 1); kfree(backup_dentry); return err; }
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page) { struct dnode_of_data dn; struct node_info ni; unsigned int start, end; int err = 0, recovered = 0; /* step 1: recover xattr */ if (IS_INODE(page)) { f2fs_recover_inline_xattr(inode, page); } else if (f2fs_has_xattr_block(ofs_of_node(page))) { err = f2fs_recover_xattr_data(inode, page); if (!err) recovered++; goto out; } /* step 2: recover inline data */ if (f2fs_recover_inline_data(inode, page)) goto out; /* step 3: recover data indices */ start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); end = start + ADDRS_PER_PAGE(page, inode); set_new_dnode(&dn, inode, NULL, NULL, 0); retry_dn: err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE); if (err) { if (err == -ENOMEM) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry_dn; } goto out; } f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); err = f2fs_get_node_info(sbi, dn.nid, &ni); if (err) goto err; f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { f2fs_msg(sbi->sb, KERN_WARNING, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", inode->i_ino, ofs_of_node(dn.node_page), ofs_of_node(page)); err = -EFAULT; goto err; } for (; start < end; start++, dn.ofs_in_node++) { block_t src, dest; src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); dest = datablock_addr(dn.inode, page, dn.ofs_in_node); if (__is_valid_data_blkaddr(src) && !f2fs_is_valid_blkaddr(sbi, src, META_POR)) { err = -EFAULT; goto err; } if (__is_valid_data_blkaddr(dest) && !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { err = -EFAULT; goto err; } /* skip recovering if dest is the same as src */ if (src == dest) continue; /* dest is invalid, just invalidate src block */ if (dest == NULL_ADDR) { f2fs_truncate_data_blocks_range(&dn, 1); continue; } if (!file_keep_isize(inode) && (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT))) f2fs_i_size_write(inode, (loff_t)(start + 1) << PAGE_SHIFT); /* * dest is reserved block, invalidate src block * and then reserve one new block in dnode page. */ if (dest == NEW_ADDR) { f2fs_truncate_data_blocks_range(&dn, 1); f2fs_reserve_new_block(&dn); continue; } /* dest is valid block, try to recover from src to dest */ if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { if (src == NULL_ADDR) { err = f2fs_reserve_new_block(&dn); while (err && IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) err = f2fs_reserve_new_block(&dn); /* We should not get -ENOSPC */ f2fs_bug_on(sbi, err); if (err) goto err; } retry_prev: /* Check the previous node page having this index */ err = check_index_in_prev_nodes(sbi, dest, &dn); if (err) { if (err == -ENOMEM) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry_prev; } goto err; } /* write dummy data page */ f2fs_replace_block(sbi, &dn, src, dest, ni.version, false, false); recovered++; } } copy_node_footer(dn.node_page, page); fill_node_footer(dn.node_page, dn.nid, ni.ino, ofs_of_node(page), false); set_page_dirty(dn.node_page); err: f2fs_put_dnode(&dn); out: f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", inode->i_ino, file_keep_isize(inode) ? "keep" : "recover", recovered, err); return err; }
static int recover_inode(struct inode *inode, struct page *page) { struct f2fs_inode *raw = F2FS_INODE(page); char *name; int err; inode->i_mode = le16_to_cpu(raw->i_mode); err = recover_quota_data(inode, page); if (err) return err; i_uid_write(inode, le32_to_cpu(raw->i_uid)); i_gid_write(inode, le32_to_cpu(raw->i_gid)); if (raw->i_inline & F2FS_EXTRA_ATTR) { if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize), i_projid)) { projid_t i_projid; kprojid_t kprojid; i_projid = (projid_t)le32_to_cpu(raw->i_projid); kprojid = make_kprojid(&init_user_ns, i_projid); if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) { err = f2fs_transfer_project_quota(inode, kprojid); if (err) return err; F2FS_I(inode)->i_projid = kprojid; } } } f2fs_i_size_write(inode, le64_to_cpu(raw->i_size)); inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime); inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime); inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime); inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec); F2FS_I(inode)->i_advise = raw->i_advise; F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags); f2fs_set_inode_flags(inode); F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = le16_to_cpu(raw->i_gc_failures); recover_inline_flags(inode, raw); f2fs_mark_inode_dirty_sync(inode, true); if (file_enc_name(inode)) name = "<encrypted>"; else name = F2FS_INODE(page)->i_name; f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s, inline = %x", ino_of_node(page), name, raw->i_inline); return 0; }