static int f2fs_remount(struct super_block *sb, int *flags, char *data) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct f2fs_mount_info org_mount_opt; int err, active_logs; bool need_restart_gc = false; bool need_stop_gc = false; sync_filesystem(sb); /* * Save the old mount options in case we * need to restore them. */ org_mount_opt = sbi->mount_opt; active_logs = sbi->active_logs; sbi->mount_opt.opt = 0; sbi->active_logs = NR_CURSEG_TYPE; /* parse mount options */ err = parse_options(sb, data); if (err) goto restore_opts; /* * Previous and new state of filesystem is RO, * so skip checking GC and FLUSH_MERGE conditions. */ if (f2fs_readonly(sb) && (*flags & MS_RDONLY)) goto skip; /* * We stop the GC thread if FS is mounted as RO * or if background_gc = off is passed in mount * option. Also sync the filesystem. */ if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) { if (sbi->gc_thread) { stop_gc_thread(sbi); f2fs_sync_fs(sb, 1); need_restart_gc = true; } } else if (!sbi->gc_thread) { err = start_gc_thread(sbi); if (err) goto restore_opts; need_stop_gc = true; } /* * We stop issue flush thread if FS is mounted as RO * or if flush_merge is not passed in mount option. */ if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { destroy_flush_cmd_control(sbi); } else if (!SM_I(sbi)->cmd_control_info) { err = create_flush_cmd_control(sbi); if (err) goto restore_gc; } skip: /* Update the POSIXACL Flag */ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); return 0; restore_gc: if (need_restart_gc) { if (start_gc_thread(sbi)) f2fs_msg(sbi->sb, KERN_WARNING, "background gc thread has stopped"); } else if (need_stop_gc) { stop_gc_thread(sbi); } restore_opts: sbi->mount_opt = org_mount_opt; sbi->active_logs = active_logs; return err; }
static int parse_options(struct super_block *sb, char *options) { struct f2fs_sb_info *sbi = F2FS_SB(sb); substring_t args[MAX_OPT_ARGS]; char *p, *name; int arg = 0; if (!options) return 0; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, f2fs_tokens, args); switch (token) { case Opt_gc_background: name = match_strdup(&args[0]); if (!name) return -ENOMEM; if (strlen(name) == 2 && !strncmp(name, "on", 2)) set_opt(sbi, BG_GC); else if (strlen(name) == 3 && !strncmp(name, "off", 3)) clear_opt(sbi, BG_GC); else { kfree(name); return -EINVAL; } kfree(name); break; case Opt_disable_roll_forward: set_opt(sbi, DISABLE_ROLL_FORWARD); break; case Opt_discard: set_opt(sbi, DISCARD); break; case Opt_noheap: set_opt(sbi, NOHEAP); break; #ifdef CONFIG_F2FS_FS_XATTR case Opt_user_xattr: set_opt(sbi, XATTR_USER); break; case Opt_nouser_xattr: clear_opt(sbi, XATTR_USER); break; case Opt_inline_xattr: set_opt(sbi, INLINE_XATTR); break; #else case Opt_user_xattr: f2fs_msg(sb, KERN_INFO, "user_xattr options not supported"); break; case Opt_nouser_xattr: f2fs_msg(sb, KERN_INFO, "nouser_xattr options not supported"); break; case Opt_inline_xattr: f2fs_msg(sb, KERN_INFO, "inline_xattr options not supported"); break; #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL case Opt_acl: set_opt(sbi, POSIX_ACL); break; case Opt_noacl: clear_opt(sbi, POSIX_ACL); break; #else case Opt_acl: f2fs_msg(sb, KERN_INFO, "acl options not supported"); break; case Opt_noacl: f2fs_msg(sb, KERN_INFO, "noacl options not supported"); break; #endif case Opt_active_logs: if (args->from && match_int(args, &arg)) return -EINVAL; if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) return -EINVAL; sbi->active_logs = arg; break; case Opt_disable_ext_identify: set_opt(sbi, DISABLE_EXT_IDENTIFY); break; case Opt_inline_data: set_opt(sbi, INLINE_DATA); break; case Opt_inline_dentry: set_opt(sbi, INLINE_DENTRY); break; case Opt_flush_merge: set_opt(sbi, FLUSH_MERGE); break; case Opt_nobarrier: set_opt(sbi, NOBARRIER); break; default: f2fs_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" or missing value", p); return -EINVAL; } } return 0; }
static int recover_dentry(struct page *ipage, struct inode *inode) { struct f2fs_inode *raw_inode = F2FS_INODE(ipage); nid_t pino = le32_to_cpu(raw_inode->i_pino); struct f2fs_dir_entry *de; struct qstr name; struct page *page; struct inode *dir, *einode; int err = 0; dir = f2fs_iget(inode->i_sb, pino); if (IS_ERR(dir)) { err = PTR_ERR(dir); goto out; } name.len = le32_to_cpu(raw_inode->i_namelen); name.name = raw_inode->i_name; if (unlikely(name.len > F2FS_NAME_LEN)) { WARN_ON(1); err = -ENAMETOOLONG; goto out_err; } retry: de = f2fs_find_entry(dir, &name, &page); if (de && inode->i_ino == le32_to_cpu(de->ino)) { clear_inode_flag(F2FS_I(inode), FI_INC_LINK); goto out_unmap_put; } if (de) { einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); if (IS_ERR(einode)) { WARN_ON(1); err = PTR_ERR(einode); if (err == -ENOENT) err = -EEXIST; goto out_unmap_put; } err = acquire_orphan_inode(F2FS_SB(inode->i_sb)); if (err) { iput(einode); goto out_unmap_put; } f2fs_delete_entry(de, page, einode); iput(einode); goto retry; } err = __f2fs_add_link(dir, &name, inode); if (err) goto out_err; if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) { iput(dir); } else { add_dirty_dir_inode(dir); set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT); } goto out; out_unmap_put: kunmap(page); f2fs_put_page(page, 0); out_err: iput(dir); out: f2fs_msg(inode->i_sb, KERN_NOTICE, "%s: ino = %x, name = %s, dir = %lx, err = %d", __func__, ino_of_node(ipage), raw_inode->i_name, IS_ERR(dir) ? 0 : dir->i_ino, err); return err; }
static void kill_f2fs_super(struct super_block *sb) { if (sb->s_root) set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE); kill_block_super(sb); }
int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode) { unsigned int bit_pos; unsigned int level; unsigned int current_depth; unsigned long bidx, block; f2fs_hash_t dentry_hash; struct f2fs_dir_entry *de; unsigned int nbucket, nblock; struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); size_t namelen = name->len; struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; int slots = GET_DENTRY_SLOTS(namelen); int err = 0; int i; dentry_hash = f2fs_dentry_hash(name->name, name->len); level = 0; current_depth = F2FS_I(dir)->i_current_depth; if (F2FS_I(dir)->chash == dentry_hash) { level = F2FS_I(dir)->clevel; F2FS_I(dir)->chash = 0; } start: if (current_depth == MAX_DIR_HASH_DEPTH) return -ENOSPC; /* Increase the depth, if required */ if (level == current_depth) ++current_depth; nbucket = dir_buckets(level); nblock = bucket_blocks(level); bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { mutex_lock_op(sbi, DENTRY_OPS); dentry_page = get_new_data_page(dir, block, true); if (IS_ERR(dentry_page)) { mutex_unlock_op(sbi, DENTRY_OPS); return PTR_ERR(dentry_page); } dentry_blk = kmap(dentry_page); bit_pos = room_for_filename(dentry_blk, slots); if (bit_pos < NR_DENTRY_IN_BLOCK) goto add_dentry; kunmap(dentry_page); f2fs_put_page(dentry_page, 1); mutex_unlock_op(sbi, DENTRY_OPS); } /* Move to next level to find the empty slot for new dentry */ ++level; goto start; add_dentry: err = init_inode_metadata(inode, dir, name); if (err) goto fail; wait_on_page_writeback(dentry_page); de = &dentry_blk->dentry[bit_pos]; de->hash_code = dentry_hash; de->name_len = cpu_to_le16(namelen); memcpy(dentry_blk->filename[bit_pos], name->name, name->len); de->ino = cpu_to_le32(inode->i_ino); set_de_type(de, inode); for (i = 0; i < slots; i++) test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); set_page_dirty(dentry_page); update_parent_metadata(dir, inode, current_depth); /* update parent inode number before releasing dentry page */ F2FS_I(inode)->i_pino = dir->i_ino; fail: kunmap(dentry_page); f2fs_put_page(dentry_page, 1); mutex_unlock_op(sbi, DENTRY_OPS); return err; }
/* * It only removes the dentry from the dentry page,corresponding name * entry in name page does not need to be touched during deletion. */ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *inode) { struct f2fs_dentry_block *dentry_blk; unsigned int bit_pos; struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); void *kaddr = page_address(page); int i; mutex_lock_op(sbi, DENTRY_OPS); lock_page(page); wait_on_page_writeback(page); dentry_blk = (struct f2fs_dentry_block *)kaddr; bit_pos = dentry - (struct f2fs_dir_entry *)dentry_blk->dentry; for (i = 0; i < slots; i++) test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); /* Let's check and deallocate this dentry page */ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, NR_DENTRY_IN_BLOCK, 0); kunmap(page); /* kunmap - pair of f2fs_find_entry */ set_page_dirty(page); dir->i_ctime = dir->i_mtime = CURRENT_TIME; if (inode && S_ISDIR(inode->i_mode)) { drop_nlink(dir); f2fs_write_inode(dir, NULL); } else { mark_inode_dirty(dir); } if (inode) { inode->i_ctime = CURRENT_TIME; drop_nlink(inode); if (S_ISDIR(inode->i_mode)) { drop_nlink(inode); i_size_write(inode, 0); } f2fs_write_inode(inode, NULL); if (inode->i_nlink == 0) add_orphan_inode(sbi, inode->i_ino); } if (bit_pos == NR_DENTRY_IN_BLOCK) { truncate_hole(dir, page->index, page->index + 1); clear_page_dirty_for_io(page); ClearPageUptodate(page); dec_page_count(sbi, F2FS_DIRTY_DENTS); inode_dec_dirty_dents(dir); } f2fs_put_page(page, 1); mutex_unlock_op(sbi, DENTRY_OPS); }
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int ret = 0; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) { trace_f2fs_iget(inode); return inode; } if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) goto make_now; ret = do_read_inode(inode); if (ret) goto bad_inode; make_now: if (ino == F2FS_NODE_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_node_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); } else if (ino == F2FS_META_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_meta_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); } else if (S_ISREG(inode->i_mode)) { inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; inode_nohighmem(inode); } else if (S_ISLNK(inode->i_mode)) { if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &f2fs_special_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } else { ret = -EIO; goto bad_inode; } f2fs_set_inode_flags(inode); unlock_new_inode(inode); trace_f2fs_iget(inode); return inode; bad_inode: iget_failed(inode); trace_f2fs_iget_exit(inode, ret); return ERR_PTR(ret); }
static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page, *new_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL; struct f2fs_dir_entry *old_entry, *new_entry; int old_nlink = 0, new_nlink = 0; int err = -ENOENT; f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_old; /* prepare for updating ".." directory entry info later */ if (old_dir != new_dir) { if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_new; } if (S_ISDIR(new_inode->i_mode)) { err = -EIO; new_dir_entry = f2fs_parent_dir(new_inode, &new_dir_page); if (!new_dir_entry) goto out_old_dir; } } /* * If cross rename between file and directory those are not * in the same directory, we will inc nlink of file's parent * later, so we should check upper boundary of its nlink. */ if ((!old_dir_entry || !new_dir_entry) && old_dir_entry != new_dir_entry) { old_nlink = old_dir_entry ? -1 : 1; new_nlink = -old_nlink; err = -EMLINK; if ((old_nlink > 0 && old_inode->i_nlink >= F2FS_LINK_MAX) || (new_nlink > 0 && new_inode->i_nlink >= F2FS_LINK_MAX)) goto out_new_dir; } f2fs_lock_op(sbi); err = update_dent_inode(old_inode, &new_dentry->d_name); if (err) goto out_unlock; err = update_dent_inode(new_inode, &old_dentry->d_name); if (err) goto out_undo; /* update ".." directory entry info of old dentry */ if (old_dir_entry) f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); /* update ".." directory entry info of new dentry */ if (new_dir_entry) f2fs_set_link(new_inode, new_dir_entry, new_dir_page, old_dir); /* update directory entry info of old dir inode */ f2fs_set_link(old_dir, old_entry, old_page, new_inode); down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); up_write(&F2FS_I(old_inode)->i_sem); update_inode_page(old_inode); old_dir->i_ctime = CURRENT_TIME; if (old_nlink) { down_write(&F2FS_I(old_dir)->i_sem); if (old_nlink < 0) drop_nlink(old_dir); else inc_nlink(old_dir); up_write(&F2FS_I(old_dir)->i_sem); } mark_inode_dirty(old_dir); update_inode_page(old_dir); /* update directory entry info of new dir inode */ f2fs_set_link(new_dir, new_entry, new_page, old_inode); down_write(&F2FS_I(new_inode)->i_sem); file_lost_pino(new_inode); up_write(&F2FS_I(new_inode)->i_sem); update_inode_page(new_inode); new_dir->i_ctime = CURRENT_TIME; if (new_nlink) { down_write(&F2FS_I(new_dir)->i_sem); if (new_nlink < 0) drop_nlink(new_dir); else inc_nlink(new_dir); up_write(&F2FS_I(new_dir)->i_sem); } mark_inode_dirty(new_dir); update_inode_page(new_dir); f2fs_unlock_op(sbi); return 0; out_undo: /* Still we may fail to recover name info of f2fs_inode here */ update_dent_inode(old_inode, &old_dentry->d_name); out_unlock: f2fs_unlock_op(sbi); out_new_dir: if (new_dir_entry) { kunmap(new_dir_page); f2fs_put_page(new_dir_page, 0); } out_old_dir: if (old_dir_entry) { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } out_new: kunmap(new_page); f2fs_put_page(new_page, 0); out_old: kunmap(old_page); f2fs_put_page(old_page, 0); out: return err; }
static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT; f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } f2fs_lock_op(sbi); if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_dir; err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; if (update_dent_inode(old_inode, &new_dentry->d_name)) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); else release_orphan_inode(sbi); update_inode_page(old_inode); update_inode_page(new_inode); } else { err = f2fs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); } else { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); update_inode_page(old_dir); } f2fs_unlock_op(sbi); return 0; put_out_dir: if (PageLocked(new_page)) f2fs_put_page(new_page, 1); else f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } f2fs_unlock_op(sbi); out_old: kunmap(old_page); f2fs_put_page(old_page, 0); out: return err; }
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) { int err; struct page *ipage; struct dnode_of_data dn; void *src_addr, *dst_addr; block_t new_blk_addr; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_io_info fio = { .type = DATA, .rw = WRITE_SYNC | REQ_PRIO, }; f2fs_lock_op(sbi); ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) return PTR_ERR(ipage); /* * i_addr[0] is not used for inline data, * so reserving new block will not destroy inline data */ set_new_dnode(&dn, inode, ipage, NULL, 0); err = f2fs_reserve_block(&dn, 0); if (err) { f2fs_unlock_op(sbi); return err; } zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); /* Copy the whole inline data block */ src_addr = inline_data_addr(ipage); dst_addr = kmap(page); memcpy(dst_addr, src_addr, MAX_INLINE_DATA); kunmap(page); SetPageUptodate(page); /* write data page to try to make data consistent */ set_page_writeback(page); write_data_page(page, &dn, &new_blk_addr, &fio); update_extent_cache(new_blk_addr, &dn); f2fs_wait_on_page_writeback(page, DATA); /* clear inline data and flag after data writeback */ zero_user_segment(ipage, INLINE_DATA_OFFSET, INLINE_DATA_OFFSET + MAX_INLINE_DATA); clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); stat_dec_inline_inode(inode); sync_inode_page(&dn); f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); return err; } int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size) { struct page *page; int err; if (!f2fs_has_inline_data(inode)) return 0; else if (to_size <= MAX_INLINE_DATA) return 0; page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS); if (!page) return -ENOMEM; err = __f2fs_convert_inline_data(inode, page); f2fs_put_page(page, 1); return err; } int f2fs_write_inline_data(struct inode *inode, struct page *page, unsigned size) { void *src_addr, *dst_addr; struct page *ipage; struct dnode_of_data dn; int err; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); if (err) return err; ipage = dn.inode_page; zero_user_segment(ipage, INLINE_DATA_OFFSET, INLINE_DATA_OFFSET + MAX_INLINE_DATA); src_addr = kmap(page); dst_addr = inline_data_addr(ipage); memcpy(dst_addr, src_addr, size); kunmap(page); /* Release the first data block if it is allocated */ if (!f2fs_has_inline_data(inode)) { truncate_data_blocks_range(&dn, 1); set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); stat_inc_inline_inode(inode); } sync_inode_page(&dn); f2fs_put_dnode(&dn); return 0; }
static bool f2fs_xattr_user_list(struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); return test_opt(sbi, XATTR_USER); }
static struct page *init_inode_metadata(struct inode *inode, struct inode *dir, const struct qstr *name) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); struct page *page; int err; if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) { page = new_inode_page(inode); if (IS_ERR(page)) return page; if (S_ISDIR(inode->i_mode)) { err = make_empty_dir(inode, dir, page); if (err) goto error; } err = f2fs_init_acl(inode, dir, page); if (err) goto put_error; err = f2fs_init_security(inode, dir, name, page); if (err) goto put_error; } else { page = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino); if (IS_ERR(page)) return page; set_cold_node(inode, page); } if (name) init_dent_inode(name, page); /* * This file should be checkpointed during fsync. * We lost i_pino from now on. */ if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) { file_lost_pino(inode); /* * If link the tmpfile to alias through linkat path, * we should remove this inode from orphan list. */ if (inode->i_nlink == 0) remove_orphan_inode(sbi, inode->i_ino); inc_nlink(inode); } return page; put_error: f2fs_put_page(page, 1); error: /* once the failed inode becomes a bad inode, i_mode is S_IFREG */ truncate_inode_pages(&inode->i_data, 0); truncate_blocks(inode, 0); remove_dirty_dir_inode(inode); remove_inode_page(inode); return ERR_PTR(err); }
static inline int write_all_xattrs(struct inode *inode, __u32 hsize, void *txattr_addr, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); size_t inline_size = 0; void *xattr_addr; struct page *xpage; nid_t new_nid = 0; int err; inline_size = inline_xattr_size(inode); if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid) if (!alloc_nid(sbi, &new_nid)) return -ENOSPC; /* write to inline xattr */ if (inline_size) { struct page *page = NULL; void *inline_addr; if (ipage) { inline_addr = inline_xattr_addr(ipage); } else { page = get_node_page(sbi, inode->i_ino); if (IS_ERR(page)) { alloc_nid_failed(sbi, new_nid); return PTR_ERR(page); } inline_addr = inline_xattr_addr(page); } memcpy(inline_addr, txattr_addr, inline_size); f2fs_put_page(page, 1); /* no need to use xattr node block */ if (hsize <= inline_size) { err = truncate_xattr_node(inode, ipage); alloc_nid_failed(sbi, new_nid); return err; } } /* write to xattr node block */ if (F2FS_I(inode)->i_xattr_nid) { xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); if (IS_ERR(xpage)) { alloc_nid_failed(sbi, new_nid); return PTR_ERR(xpage); } f2fs_bug_on(new_nid); } else { struct dnode_of_data dn; set_new_dnode(&dn, inode, NULL, NULL, new_nid); xpage = new_node_page(&dn, XATTR_NODE_OFFSET, ipage); if (IS_ERR(xpage)) { alloc_nid_failed(sbi, new_nid); return PTR_ERR(xpage); } alloc_nid_done(sbi, new_nid); } xattr_addr = page_address(xpage); memcpy(xattr_addr, txattr_addr + inline_size, PAGE_SIZE - sizeof(struct node_footer)); set_page_dirty(xpage); f2fs_put_page(xpage, 1); /* need to checkpoint during fsync */ F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); return 0; }
struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = {.name = "..", .len = 2}; unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (de) { nid_t ino = le32_to_cpu(de->ino); kunmap(page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); } return d_splice_alias(inode, dentry); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode = dentry->d_inode; struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) goto fail; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); kunmap(page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, inode); f2fs_unlock_op(sbi); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); fail: trace_f2fs_unlink_exit(inode, err); return err; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; size_t symlen = strlen(symname) + 1; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out; err = page_symlink(inode, symname, symlen); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return err; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); struct inode *inode; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out_fail; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int err = 0; if (!new_valid_dev(rdev)) return -EINVAL; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT; f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } f2fs_lock_op(sbi); if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_dir; err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; if (update_dent_inode(old_inode, &new_dentry->d_name)) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); F2FS_I(old_inode)->i_pino = new_dir->i_ino; new_inode->i_ctime = CURRENT_TIME; if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); mark_inode_dirty(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); else release_orphan_inode(sbi); update_inode_page(old_inode); update_inode_page(new_inode); } else { err = f2fs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); F2FS_I(old_inode)->i_pino = new_dir->i_ino; update_inode_page(old_inode); } else { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); mark_inode_dirty(old_dir); update_inode_page(old_dir); } f2fs_unlock_op(sbi); return 0; put_out_dir: if (PageLocked(new_page)) f2fs_put_page(new_page, 1); else f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } f2fs_unlock_op(sbi); out_old: kunmap(old_page); f2fs_put_page(old_page, 0); out: return err; } const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); block_t old_blk_addr; struct dnode_of_data dn; int err, ilock; f2fs_balance_fs(sbi); /* F2FS backport: We replace in old kernels sb_start_pagefault(inode->i_sb) with vfs_check_frozen() * and remove the original sb_end_pagefault(inode->i_sb) after the out label * * The introduction of sb_{start,end}_pagefault() was made post-3.2 kernels by Jan Kara * and merged in commit a0e881b7c189fa2bd76c024dbff91e79511c971d. * Discussed at https://lkml.org/lkml/2012/3/5/278 * * - Alex */ vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* block allocation */ ilock = mutex_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, page->index, ALLOC_NODE); if (err) { mutex_unlock_op(sbi, ilock); goto out; } old_blk_addr = dn.data_blkaddr; if (old_blk_addr == NULL_ADDR) { err = reserve_new_block(&dn); if (err) { f2fs_put_dnode(&dn); mutex_unlock_op(sbi, ilock); goto out; } } f2fs_put_dnode(&dn); mutex_unlock_op(sbi, ilock); file_update_time(vma->vm_file); lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); err = -EFAULT; goto out; } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto mapped; /* page is wholly or partially inside EOF */ if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { unsigned offset; offset = i_size_read(inode) & ~PAGE_CACHE_MASK; zero_user_segment(page, offset, PAGE_CACHE_SIZE); } set_page_dirty(page); SetPageUptodate(page); mapped: /* fill the page */ wait_on_page_writeback(page); out: return block_page_mkwrite_return(err); }
static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); nid_t ino; struct inode *inode; bool nid_free = false; int err; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); f2fs_lock_op(sbi); if (!alloc_nid(sbi, &ino)) { f2fs_unlock_op(sbi); err = -ENOSPC; goto fail; } f2fs_unlock_op(sbi); #ifdef CONFIG_F2FS_ANDROID_EMULATION_SUPPORT if (IS_ANDROID_EMU(sbi, F2FS_I(dir), F2FS_I(dir))) f2fs_android_emu(sbi, inode, &inode->i_uid, &inode->i_gid, &mode); else { inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else { inode->i_gid = current_fsgid(); } } #else inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else { inode->i_gid = current_fsgid(); } #endif inode->i_ino = ino; inode->i_mode = mode; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_generation = sbi->s_next_generation++; err = insert_inode_locked(inode); if (err) { err = -EINVAL; nid_free = true; goto out; } trace_f2fs_new_inode(inode, 0); mark_inode_dirty(inode); return inode; out: clear_nlink(inode); unlock_new_inode(inode); fail: trace_f2fs_new_inode(inode, err); make_bad_inode(inode); iput(inode); if (nid_free) alloc_nid_failed(sbi, ino); return ERR_PTR(err); }
int f2fs_setxattr(struct inode *inode, int name_index, const char *name, const void *value, size_t value_len) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_xattr_header *header = NULL; struct f2fs_xattr_entry *here, *last; struct page *page; void *base_addr; int error, found, free, name_len, newsize; char *pval; if (name == NULL) return -EINVAL; name_len = strlen(name); if (value == NULL) value_len = 0; if (name_len > 255 || value_len > MAX_VALUE_LEN) return -ERANGE; mutex_lock_op(sbi, NODE_NEW); if (!fi->i_xattr_nid) { /* Allocate new attribute block */ struct dnode_of_data dn; if (!alloc_nid(sbi, &fi->i_xattr_nid)) { mutex_unlock_op(sbi, NODE_NEW); return -ENOSPC; } set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid); mark_inode_dirty(inode); page = new_node_page(&dn, XATTR_NODE_OFFSET); if (IS_ERR(page)) { alloc_nid_failed(sbi, fi->i_xattr_nid); fi->i_xattr_nid = 0; mutex_unlock_op(sbi, NODE_NEW); return PTR_ERR(page); } alloc_nid_done(sbi, fi->i_xattr_nid); base_addr = page_address(page); header = XATTR_HDR(base_addr); header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC); header->h_refcount = cpu_to_le32(1); } else { /* The inode already has an extended attribute block. */ page = get_node_page(sbi, fi->i_xattr_nid); if (IS_ERR(page)) { mutex_unlock_op(sbi, NODE_NEW); return PTR_ERR(page); } base_addr = page_address(page); header = XATTR_HDR(base_addr); } if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) { error = -EIO; goto cleanup; } /* find entry with wanted name. */ found = 0; list_for_each_xattr(here, base_addr) { if (here->e_name_index != name_index) continue; if (here->e_name_len != name_len) continue; if (!memcmp(here->e_name, name, name_len)) { found = 1; break; } } last = here; while (!IS_XATTR_LAST_ENTRY(last)) last = XATTR_NEXT_ENTRY(last); newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + name_len + value_len); /* 1. Check space */ if (value) { /* If value is NULL, it is remove operation. * In case of update operation, we caculate free. */ free = MIN_OFFSET - ((char *)last - (char *)header); if (found) free = free - ENTRY_SIZE(here); if (free < newsize) { error = -ENOSPC; goto cleanup; } } /* 2. Remove old entry */ if (found) { /* If entry is found, remove old entry. * If not found, remove operation is not needed. */ struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here); int oldsize = ENTRY_SIZE(here); memmove(here, next, (char *)last - (char *)next); last = (struct f2fs_xattr_entry *)((char *)last - oldsize); memset(last, 0, oldsize); } /* 3. Write new entry */ if (value) { /* Before we come here, old entry is removed. * We just write new entry. */ memset(last, 0, newsize); last->e_name_index = name_index; last->e_name_len = name_len; memcpy(last->e_name, name, name_len); pval = last->e_name + name_len; memcpy(pval, value, value_len); last->e_value_size = cpu_to_le16(value_len); } set_page_dirty(page); f2fs_put_page(page, 1); if (is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; inode->i_ctime = CURRENT_TIME_SEC; clear_inode_flag(fi, FI_ACL_MODE); } f2fs_write_inode(inode, NULL); mutex_unlock_op(sbi, NODE_NEW); return 0; cleanup: f2fs_put_page(page, 1); mutex_unlock_op(sbi, NODE_NEW); return error; }