static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); ssize_t ret; if (f2fs_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode) && fscrypt_get_encryption_info(inode)) return -EACCES; inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret > 0) { ret = f2fs_preallocate_blocks(iocb, from); if (!ret) ret = __generic_file_write_iter(iocb, from); } inode_unlock(inode); if (ret > 0) { ssize_t err; err = generic_write_sync(file, iocb->ki_pos - ret, ret); if (err < 0) ret = err; } return ret; }
static int truncate_partial_data_page(struct inode *inode, u64 from, bool cache_only) { unsigned offset = from & (PAGE_CACHE_SIZE - 1); pgoff_t index = from >> PAGE_CACHE_SHIFT; struct address_space *mapping = inode->i_mapping; struct page *page; if (!offset && !cache_only) return 0; if (cache_only) { page = grab_cache_page(mapping, index); if (page && PageUptodate(page)) goto truncate_out; f2fs_put_page(page, 1); return 0; } page = get_lock_data_page(inode, index); if (IS_ERR(page)) return 0; truncate_out: f2fs_wait_on_page_writeback(page, DATA); zero_user(page, offset, PAGE_CACHE_SIZE - offset); if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode)) set_page_dirty(page); f2fs_put_page(page, 1); return 0; }
static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct f2fs_sb_info *sbi = F2FS_I_SB(dir); int err; if (f2fs_encrypted_inode(dir) && !f2fs_is_child_context_consistent_with_parent(dir, inode)) return -EPERM; f2fs_balance_fs(sbi); inode->i_ctime = CURRENT_TIME; ihold(inode); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); d_instantiate(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); iput(inode); f2fs_unlock_op(sbi); return err; }
static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); struct f2fs_sb_info *sbi = F2FS_I_SB(dir); int err; if (f2fs_encrypted_inode(dir) && !fscrypt_has_permitted_context(dir, inode)) return -EPERM; f2fs_balance_fs(sbi, true); inode->i_ctime = current_time(inode); ihold(inode); set_inode_flag(inode, FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); d_instantiate(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: clear_inode_flag(inode, FI_INC_LINK); iput(inode); f2fs_unlock_op(sbi); return err; }
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int ret = 0; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) { trace_f2fs_iget(inode); return inode; } if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) goto make_now; ret = do_read_inode(inode); if (ret) goto bad_inode; make_now: if (ino == F2FS_NODE_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_node_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); } else if (ino == F2FS_META_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_meta_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); } else if (S_ISREG(inode->i_mode)) { inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); } else if (S_ISLNK(inode->i_mode)) { if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &f2fs_special_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } else { ret = -EIO; goto bad_inode; } unlock_new_inode(inode); trace_f2fs_iget(inode); return inode; bad_inode: iget_failed(inode); trace_f2fs_iget_exit(inode, ret); return ERR_PTR(ret); }
static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) { if (f2fs_encrypted_inode(dir)) { int err = fscrypt_get_encryption_info(dir); if (err) return err; } return __f2fs_tmpfile(dir, dentry, mode, NULL); }
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); if (f2fs_encrypted_inode(inode) && !f2fs_has_encryption_key(inode) && f2fs_get_encryption_info(inode)) return -EACCES; return generic_file_write_iter(iocb, from); }
int do_write_data_page(struct f2fs_io_info *fio) { struct page *page = fio->page; struct inode *inode = page->mapping->host; struct dnode_of_data dn; int err = 0; set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); if (err) return err; fio->blk_addr = dn.data_blkaddr; /* This page is already truncated */ if (fio->blk_addr == NULL_ADDR) { ClearPageUptodate(page); goto out_writepage; } if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { fio->encrypted_page = f2fs_encrypt(inode, fio->page); if (IS_ERR(fio->encrypted_page)) { err = PTR_ERR(fio->encrypted_page); goto out_writepage; } } set_page_writeback(page); /* * If current allocation needs SSR, * it had better in-place writes for updated data. */ if (unlikely(fio->blk_addr != NEW_ADDR && !is_cold_data(page) && need_inplace_update(inode))) { rewrite_data_page(fio); set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); trace_f2fs_do_write_data_page(page, IPU); } else { write_data_page(&dn, fio); set_data_blkaddr(&dn); f2fs_update_extent_cache(&dn); trace_f2fs_do_write_data_page(page, OPU); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); if (page->index == 0) set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); } out_writepage: f2fs_put_dnode(&dn); return err; }
int f2fs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct f2fs_inode_info *fi = F2FS_I(inode); int err; err = inode_change_ok(inode, attr); if (err) return err; if (attr->ia_valid & ATTR_SIZE) { if (f2fs_encrypted_inode(inode) && fscrypt_get_encryption_info(inode)) return -EACCES; if (attr->ia_size <= i_size_read(inode)) { truncate_setsize(inode, attr->ia_size); err = f2fs_truncate(inode, true); if (err) return err; f2fs_balance_fs(F2FS_I_SB(inode), true); } else { /* * do not trim all blocks after i_size if target size is * larger than i_size. */ truncate_setsize(inode, attr->ia_size); /* should convert inline inode here */ if (!f2fs_may_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } inode->i_mtime = inode->i_ctime = CURRENT_TIME; } } __setattr_copy(inode, attr); if (attr->ia_valid & ATTR_MODE) { err = posix_acl_chmod(inode, get_inode_mode(inode)); if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; clear_inode_flag(fi, FI_ACL_MODE); } } mark_inode_dirty(inode); return err; }
static long f2fs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret = 0; /* f2fs only support ->fallocate for regular file */ if (!S_ISREG(inode->i_mode)) return -EINVAL; if (f2fs_encrypted_inode(inode) && (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) return -EOPNOTSUPP; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)) return -EOPNOTSUPP; inode_lock(inode); if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset >= inode->i_size) goto out; ret = punch_hole(inode, offset, len); } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { ret = f2fs_collapse_range(inode, offset, len); } else if (mode & FALLOC_FL_ZERO_RANGE) { ret = f2fs_zero_range(inode, offset, len, mode); } else if (mode & FALLOC_FL_INSERT_RANGE) { ret = f2fs_insert_range(inode, offset, len); } else { ret = expand_inode_data(inode, offset, len, mode); } if (!ret) { inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); } out: inode_unlock(inode); trace_f2fs_fallocate(inode, mode, offset, len, ret); return ret; }
bool f2fs_may_inline_data(struct inode *inode) { if (f2fs_is_atomic_file(inode)) return false; if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) return false; if (i_size_read(inode) > MAX_INLINE_DATA) return false; if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return false; return true; }
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_length(iov, nr_segs); int err; /* we don't need to use inline_data strictly */ if (f2fs_has_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return 0; err = check_direct_IO(inode, rw, iov, offset, nr_segs); if (err) return err; trace_f2fs_direct_IO_enter(inode, offset, count, rw); if (rw & WRITE) { __allocate_data_blocks(inode, offset, count); if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { err = -EIO; goto out; } } err = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, get_data_block_dio); out: if (err < 0 && (rw & WRITE)) f2fs_write_failed(mapping, offset + count); trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); return err; }
int f2fs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct f2fs_inode_info *fi = F2FS_I(inode); int err; err = inode_change_ok(inode, attr); if (err) return err; if (attr->ia_valid & ATTR_SIZE) { if (f2fs_encrypted_inode(inode) && f2fs_get_encryption_info(inode)) return -EACCES; if (attr->ia_size <= i_size_read(inode)) { truncate_setsize(inode, attr->ia_size); err = f2fs_truncate(inode, true); if (err) return err; f2fs_balance_fs(F2FS_I_SB(inode)); } else { /* * do not trim all blocks after i_size if target size is * larger than i_size. */ truncate_setsize(inode, attr->ia_size); } } __setattr_copy(inode, attr); if (attr->ia_valid & ATTR_MODE) { err = f2fs_acl_chmod(inode); if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; clear_inode_flag(fi, FI_ACL_MODE); } } mark_inode_dirty(inode); return err; }
static long f2fs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret = 0; if (f2fs_encrypted_inode(inode) && (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) return -EOPNOTSUPP; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)) return -EOPNOTSUPP; mutex_lock(&inode->i_mutex); if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset >= inode->i_size) goto out; ret = punch_hole(inode, offset, len); } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { ret = f2fs_collapse_range(inode, offset, len); } else if (mode & FALLOC_FL_ZERO_RANGE) { ret = f2fs_zero_range(inode, offset, len, mode); } else if (mode & FALLOC_FL_INSERT_RANGE) { ret = f2fs_insert_range(inode, offset, len); } else { ret = expand_inode_data(inode, offset, len, mode); } if (!ret) { inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); } out: mutex_unlock(&inode->i_mutex); trace_f2fs_fallocate(inode, mode, offset, len, ret); return ret; }
int f2fs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct f2fs_inode_info *fi = F2FS_I(inode); int err; err = inode_change_ok(inode, attr); if (err) return err; if (attr->ia_valid & ATTR_SIZE) { if (f2fs_encrypted_inode(inode) && f2fs_get_encryption_info(inode)) return -EACCES; if (attr->ia_size != i_size_read(inode)) { truncate_setsize(inode, attr->ia_size); f2fs_truncate(inode); f2fs_balance_fs(F2FS_I_SB(inode)); } else { /* * giving a chance to truncate blocks past EOF which * are fallocated with FALLOC_FL_KEEP_SIZE. */ f2fs_truncate(inode); } } __setattr_copy(inode, attr); if (attr->ia_valid & ATTR_MODE) { err = f2fs_acl_chmod(inode); if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; clear_inode_flag(fi, FI_ACL_MODE); } } mark_inode_dirty(inode); return err; }
void f2fs_set_inode_flags(struct inode *inode) { unsigned int flags = F2FS_I(inode)->i_flags; unsigned int new_fl = 0; if (flags & F2FS_SYNC_FL) new_fl |= S_SYNC; if (flags & F2FS_APPEND_FL) new_fl |= S_APPEND; if (flags & F2FS_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & F2FS_NOATIME_FL) new_fl |= S_NOATIME; if (flags & F2FS_DIRSYNC_FL) new_fl |= S_DIRSYNC; if (f2fs_encrypted_inode(inode)) new_fl |= S_ENCRYPTED; inode_set_flags(inode, new_fl, S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC| S_ENCRYPTED); }
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); int err; /* we don't need to use inline_data strictly */ if (f2fs_has_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return 0; err = check_direct_IO(inode, iter, offset); if (err) return err; trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); if (iov_iter_rw(iter) == WRITE) __allocate_data_blocks(inode, offset, count); err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); if (err < 0 && iov_iter_rw(iter) == WRITE) f2fs_write_failed(mapping, offset + count); trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); return err; }
static int f2fs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page = NULL; struct page *ipage; pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; struct dnode_of_data dn; int err = 0; trace_f2fs_write_begin(inode, pos, len, flags); f2fs_balance_fs(sbi); /* * We should check this at this moment to avoid deadlock on inode page * and #0 page. The locking rule for inline_data conversion should be: * lock_page(page #0) -> lock_page(inode_page) */ if (index != 0) { err = f2fs_convert_inline_inode(inode); if (err) goto fail; } repeat: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { err = -ENOMEM; goto fail; } *pagep = page; f2fs_lock_op(sbi); /* check inline_data */ ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto unlock_fail; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) { if (pos + len <= MAX_INLINE_DATA) { read_inline_data(page, ipage); set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); sync_inode_page(&dn); goto put_next; } err = f2fs_convert_inline_page(&dn, page); if (err) goto put_fail; } err = f2fs_get_block(&dn, index); if (err) goto put_fail; put_next: f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); f2fs_wait_on_page_writeback(page, DATA); if (len == PAGE_CACHE_SIZE) goto out_update; if (PageUptodate(page)) goto out_clear; if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); goto out_update; } if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); } else { struct f2fs_io_info fio = { .sbi = sbi, .type = DATA, .rw = READ_SYNC, .blk_addr = dn.data_blkaddr, .page = page, .encrypted_page = NULL, }; err = f2fs_submit_page_bio(&fio); if (err) goto fail; lock_page(page); if (unlikely(!PageUptodate(page))) { err = -EIO; goto fail; } if (unlikely(page->mapping != mapping)) { f2fs_put_page(page, 1); goto repeat; } /* avoid symlink page */ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { err = f2fs_decrypt_one(inode, page); if (err) goto fail; } } out_update: SetPageUptodate(page); out_clear: clear_cold_data(page); return 0; put_fail: f2fs_put_dnode(&dn); unlock_fail: f2fs_unlock_op(sbi); fail: f2fs_put_page(page, 1); f2fs_write_failed(mapping, pos + len); return err; } static int f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; trace_f2fs_write_end(inode, pos, len, copied); set_page_dirty(page); if (pos + copied > i_size_read(inode)) { i_size_write(inode, pos + copied); mark_inode_dirty(inode); update_inode_page(inode); } f2fs_put_page(page, 1); return copied; } static int check_direct_IO(struct inode *inode, struct iov_iter *iter, loff_t offset) { unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; if (offset & blocksize_mask) return -EINVAL; if (iov_iter_alignment(iter) & blocksize_mask) return -EINVAL; return 0; }
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t ino = inode->i_ino; int ret = 0; bool need_cp = false; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .for_reclaim = 0, }; if (unlikely(f2fs_readonly(inode->i_sb))) return 0; trace_f2fs_sync_file_enter(inode); /* if fdatasync is triggered, let's do in-place-update */ if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) set_inode_flag(fi, FI_NEED_IPU); ret = filemap_write_and_wait_range(inode->i_mapping, start, end); clear_inode_flag(fi, FI_NEED_IPU); if (ret) { trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); return ret; } /* if the inode is dirty, let's recover all the time */ if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) { update_inode_page(inode); goto go_write; } /* * if there is no written data, don't waste time to write recovery info. */ if (!is_inode_flag_set(fi, FI_APPEND_WRITE) && !exist_written_data(sbi, ino, APPEND_INO)) { /* it may call write_inode just prior to fsync */ if (need_inode_page_update(sbi, ino)) goto go_write; if (is_inode_flag_set(fi, FI_UPDATE_WRITE) || exist_written_data(sbi, ino, UPDATE_INO)) goto flush_out; goto out; } go_write: /* guarantee free sections for fsync */ f2fs_balance_fs(sbi); /* * Both of fdatasync() and fsync() are able to be recovered from * sudden-power-off. */ down_read(&fi->i_sem); need_cp = need_do_checkpoint(inode); up_read(&fi->i_sem); if (need_cp) { /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); /* * We've secured consistency through sync_fs. Following pino * will be used only for fsynced inodes after checkpoint. */ try_to_fix_pino(inode); clear_inode_flag(fi, FI_APPEND_WRITE); clear_inode_flag(fi, FI_UPDATE_WRITE); goto out; } sync_nodes: sync_node_pages(sbi, ino, &wbc); /* if cp_error was enabled, we should avoid infinite loop */ if (unlikely(f2fs_cp_error(sbi))) goto out; if (need_inode_block_update(sbi, ino)) { mark_inode_dirty_sync(inode); f2fs_write_inode(inode, NULL); goto sync_nodes; } ret = wait_on_node_pages_writeback(sbi, ino); if (ret) goto out; /* once recovery info is written, don't need to tack this */ remove_dirty_inode(sbi, ino, APPEND_INO); clear_inode_flag(fi, FI_APPEND_WRITE); flush_out: remove_dirty_inode(sbi, ino, UPDATE_INO); clear_inode_flag(fi, FI_UPDATE_WRITE); ret = f2fs_issue_flush(sbi); out: trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); f2fs_trace_ios(NULL, 1); return ret; } static pgoff_t __get_first_dirty_index(struct address_space *mapping, pgoff_t pgofs, int whence) { struct pagevec pvec; int nr_pages; if (whence != SEEK_DATA) return 0; /* find first dirty page index */ pagevec_init(&pvec, 0); nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs, PAGECACHE_TAG_DIRTY, 1); pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX; pagevec_release(&pvec); return pgofs; } static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, int whence) { switch (whence) { case SEEK_DATA: if ((blkaddr == NEW_ADDR && dirty == pgofs) || (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR)) return true; break; case SEEK_HOLE: if (blkaddr == NULL_ADDR) return true; break; } return false; } static inline int unsigned_offsets(struct file *file) { return file->f_mode & FMODE_UNSIGNED_OFFSET; } static loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize) { if (offset < 0 && !unsigned_offsets(file)) return -EINVAL; if (offset > maxsize) return -EINVAL; if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; } return offset; } static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = inode->i_sb->s_maxbytes; struct dnode_of_data dn; pgoff_t pgofs, end_offset, dirty; loff_t data_ofs = offset; loff_t isize; int err = 0; mutex_lock(&inode->i_mutex); isize = i_size_read(inode); if (offset >= isize) goto fail; /* handle inline data case */ if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { if (whence == SEEK_HOLE) data_ofs = isize; goto found; } pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) { set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); if (err && err != -ENOENT) { goto fail; } else if (err == -ENOENT) { /* direct node does not exists */ if (whence == SEEK_DATA) { pgofs = PGOFS_OF_NEXT_DNODE(pgofs, F2FS_I(inode)); continue; } else { goto found; } } end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; dn.ofs_in_node++, pgofs++, data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { block_t blkaddr; blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); if (__found_offset(blkaddr, dirty, pgofs, whence)) { f2fs_put_dnode(&dn); goto found; } } f2fs_put_dnode(&dn); } if (whence == SEEK_DATA) goto fail; found: if (whence == SEEK_HOLE && data_ofs > isize) data_ofs = isize; mutex_unlock(&inode->i_mutex); return vfs_setpos(file, data_ofs, maxbytes); fail: mutex_unlock(&inode->i_mutex); return -ENXIO; } static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = inode->i_sb->s_maxbytes; switch (whence) { case SEEK_SET: case SEEK_CUR: case SEEK_END: return generic_file_llseek_size(file, offset, whence, maxbytes); case SEEK_DATA: case SEEK_HOLE: if (offset < 0) return -ENXIO; return f2fs_seek_block(file, offset, whence); } return -EINVAL; } static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); if (f2fs_encrypted_inode(inode)) { int err = f2fs_get_encryption_info(inode); if (err) return 0; } /* we don't need to use inline_data strictly */ if (f2fs_has_inline_data(inode)) { int err = f2fs_convert_inline_inode(inode); if (err) return err; } file_accessed(file); vma->vm_ops = &f2fs_file_vm_ops; return 0; } static int f2fs_file_open(struct inode *inode, struct file *filp) { int ret = generic_file_open(inode, filp); if (!ret && f2fs_encrypted_inode(inode)) { ret = f2fs_get_encryption_info(inode); if (ret) ret = -EACCES; } return ret; }
/* * This function was originally taken from fs/mpage.c, and customized for f2fs. * Major change was from block_size == page_size in f2fs by default. */ static int f2fs_mpage_readpages(struct address_space *mapping, struct list_head *pages, struct page *page, unsigned nr_pages) { struct bio *bio = NULL; unsigned page_idx; sector_t last_block_in_bio = 0; struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; sector_t block_nr; struct block_device *bdev = inode->i_sb->s_bdev; struct f2fs_map_blocks map; map.m_pblk = 0; map.m_lblk = 0; map.m_len = 0; map.m_flags = 0; for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { prefetchw(&page->flags); if (pages) { page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, GFP_KERNEL)) goto next_page; } block_in_file = (sector_t)page->index; last_block = block_in_file + nr_pages; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) last_block = last_block_in_file; /* * Map blocks using the previous result first. */ if ((map.m_flags & F2FS_MAP_MAPPED) && block_in_file > map.m_lblk && block_in_file < (map.m_lblk + map.m_len)) goto got_it; /* * Then do more f2fs_map_blocks() calls until we are * done with this page. */ map.m_flags = 0; if (block_in_file < last_block) { map.m_lblk = block_in_file; map.m_len = last_block - block_in_file; if (f2fs_map_blocks(inode, &map, 0, false)) goto set_error_page; } got_it: if ((map.m_flags & F2FS_MAP_MAPPED)) { block_nr = map.m_pblk + block_in_file - map.m_lblk; SetPageMappedToDisk(page); if (!PageUptodate(page) && !cleancache_get_page(page)) { SetPageUptodate(page); goto confused; } } else { zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); unlock_page(page); goto next_page; } /* * This page will go to BIO. Do we need to send this * BIO off first? */ if (bio && (last_block_in_bio != block_nr - 1)) { submit_and_realloc: submit_bio(READ, bio); bio = NULL; } if (bio == NULL) { struct f2fs_crypto_ctx *ctx = NULL; if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { struct page *cpage; ctx = f2fs_get_crypto_ctx(inode); if (IS_ERR(ctx)) goto set_error_page; /* wait the page to be moved by cleaning */ cpage = find_lock_page( META_MAPPING(F2FS_I_SB(inode)), block_nr); if (cpage) { f2fs_wait_on_page_writeback(cpage, DATA); f2fs_put_page(cpage, 1); } } bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES)); if (!bio) { if (ctx) f2fs_release_crypto_ctx(ctx); goto set_error_page; } bio->bi_bdev = bdev; bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); bio->bi_end_io = f2fs_read_end_io; bio->bi_private = ctx; } if (bio_add_page(bio, page, blocksize, 0) < blocksize) goto submit_and_realloc; last_block_in_bio = block_nr; goto next_page; set_error_page: SetPageError(page); zero_user_segment(page, 0, PAGE_CACHE_SIZE); unlock_page(page); goto next_page; confused: if (bio) { submit_bio(READ, bio); bio = NULL; } unlock_page(page); next_page: if (pages) page_cache_release(page); } BUG_ON(pages && !list_empty(pages)); if (bio) submit_bio(READ, bio); return 0; }
static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; nid_t ino; int err = 0; unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); if (f2fs_encrypted_inode(dir)) { int res = fscrypt_get_encryption_info(dir); /* * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is * created while the directory was encrypted and we * don't have access to the key. */ if (fscrypt_has_encryption_key(dir)) fscrypt_set_encrypted_dentry(dentry); fscrypt_set_d_op(dentry); if (res && res != -ENOKEY) return ERR_PTR(res); } if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { if (IS_ERR(page)) return (struct dentry *)page; return d_splice_alias(inode, dentry); } ino = le32_to_cpu(de->ino); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { err = __recover_dot_dentries(dir, root_ino); if (err) goto err_out; } if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) goto err_out; } if (f2fs_encrypted_inode(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { f2fs_msg(inode->i_sb, KERN_WARNING, "Inconsistent encryption contexts: %lu/%lu", dir->i_ino, inode->i_ino); err = -EPERM; goto err_out; } return d_splice_alias(inode, dentry); err_out: iput(inode); return ERR_PTR(err); }
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; int err; f2fs_balance_fs(sbi); sb_start_pagefault(inode->i_sb); f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); /* block allocation */ f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_reserve_block(&dn, page->index); if (err) { f2fs_unlock_op(sbi); goto out; } f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); file_update_time(vma->vm_file); lock_page(page); if (unlikely(page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode) || !PageUptodate(page))) { unlock_page(page); err = -EFAULT; goto out; } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto mapped; /* page is wholly or partially inside EOF */ if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { unsigned offset; offset = i_size_read(inode) & ~PAGE_CACHE_MASK; zero_user_segment(page, offset, PAGE_CACHE_SIZE); } set_page_dirty(page); SetPageUptodate(page); trace_f2fs_vm_page_mkwrite(page, DATA); mapped: /* fill the page */ f2fs_wait_on_page_writeback(page, DATA); /* wait for GCed encrypted page writeback */ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr); /* if gced page is attached, don't write to cold segment */ clear_cold_data(page); out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(err); }
static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1); struct fscrypt_symlink_data *sd = NULL; int err; if (f2fs_encrypted_inode(dir)) { err = fscrypt_get_encryption_info(dir); if (err) return err; if (!fscrypt_has_encryption_key(dir)) return -ENOKEY; disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + sizeof(struct fscrypt_symlink_data)); } if (disk_link.len > dir->i_sb->s_blocksize) return -ENAMETOOLONG; err = dquot_initialize(dir); if (err) return err; inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); if (f2fs_encrypted_inode(inode)) { struct qstr istr = QSTR_INIT(symname, len); struct fscrypt_str ostr; sd = kzalloc(disk_link.len, GFP_NOFS); if (!sd) { err = -ENOMEM; goto err_out; } err = fscrypt_get_encryption_info(inode); if (err) goto err_out; if (!fscrypt_has_encryption_key(inode)) { err = -ENOKEY; goto err_out; } ostr.name = sd->encrypted_path; ostr.len = disk_link.len; err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); if (err) goto err_out; sd->len = cpu_to_le16(ostr.len); disk_link.name = (char *)sd; } err = page_symlink(inode, disk_link.name, disk_link.len); err_out: d_instantiate(dentry, inode); unlock_new_inode(inode); /* * Let's flush symlink data in order to avoid broken symlink as much as * possible. Nevertheless, fsyncing is the best way, but there is no * way to get a file descriptor in order to flush that. * * Note that, it needs to do dir->fsync to make this recoverable. * If the symlink path is stored into inline_data, there is no * performance regression. */ if (!err) { filemap_write_and_wait_range(inode->i_mapping, 0, disk_link.len - 1); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); } else { f2fs_unlink(dir, dentry); } kfree(sd); f2fs_balance_fs(sbi, true); return err; out: handle_failed_inode(inode); return err; }
static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct inode *whiteout = NULL; struct page *old_dir_page; struct page *old_page, *new_page = NULL; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; bool is_old_inline = f2fs_has_inline_dentry(old_dir); int err = -ENOENT; if ((f2fs_encrypted_inode(old_dir) && !fscrypt_has_encryption_key(old_dir)) || (f2fs_encrypted_inode(new_dir) && !fscrypt_has_encryption_key(new_dir))) return -ENOKEY; if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) && !fscrypt_has_permitted_context(new_dir, old_inode)) { err = -EPERM; goto out; } err = dquot_initialize(old_dir); if (err) goto out; err = dquot_initialize(new_dir); if (err) goto out; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) err = PTR_ERR(old_page); goto out; } if (S_ISDIR(old_inode->i_mode)) { old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) { if (IS_ERR(old_dir_page)) err = PTR_ERR(old_dir_page); goto out_old; } } if (flags & RENAME_WHITEOUT) { err = f2fs_create_whiteout(old_dir, &whiteout); if (err) goto out_dir; } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_whiteout; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) { if (IS_ERR(new_page)) err = PTR_ERR(new_page); goto out_whiteout; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = current_time(new_inode); down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) f2fs_i_links_write(new_inode, false); f2fs_i_links_write(new_inode, false); up_write(&F2FS_I(new_inode)->i_sem); if (!new_inode->i_nlink) add_orphan_inode(new_inode); else release_orphan_inode(sbi); } else { f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_whiteout; } if (old_dir_entry) f2fs_i_links_write(new_dir, true); /* * old entry and new entry can locate in the same inline * dentry in inode, when attaching new entry in inline dentry, * it could force inline dentry conversion, after that, * old_entry and old_page will point to wrong address, in * order to avoid this, let's do the check and update here. */ if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) { f2fs_put_page(old_page, 0); old_page = NULL; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { err = -ENOENT; if (IS_ERR(old_page)) err = PTR_ERR(old_page); f2fs_unlock_op(sbi); goto out_whiteout; } } } down_write(&F2FS_I(old_inode)->i_sem); if (!old_dir_entry || whiteout) file_lost_pino(old_inode); else F2FS_I(old_inode)->i_pino = new_dir->i_ino; up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = current_time(old_inode); f2fs_mark_inode_dirty_sync(old_inode, false); f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (whiteout) { whiteout->i_state |= I_LINKABLE; set_inode_flag(whiteout, FI_INC_LINK); err = f2fs_add_link(old_dentry, whiteout); if (err) goto put_out_dir; whiteout->i_state &= ~I_LINKABLE; iput(whiteout); } if (old_dir_entry) { if (old_dir != new_dir && !whiteout) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); } else { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } f2fs_i_links_write(old_dir, false); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; put_out_dir: f2fs_unlock_op(sbi); if (new_page) { f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); } out_whiteout: if (whiteout) iput(whiteout); out_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; }
struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = {.len = 2, .name = ".."}; struct page *page; unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page); if (!ino) { if (IS_ERR(page)) return ERR_CAST(page); return ERR_PTR(-ENOENT); } return d_obtain_alias(f2fs_iget(child->d_sb, ino)); } static int __recover_dot_dentries(struct inode *dir, nid_t pino) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct qstr dot = QSTR_INIT(".", 1); struct qstr dotdot = QSTR_INIT("..", 2); struct f2fs_dir_entry *de; struct page *page; int err = 0; if (f2fs_readonly(sbi->sb)) { f2fs_msg(sbi->sb, KERN_INFO, "skip recovering inline_dots inode (ino:%lu, pino:%u) " "in readonly mountpoint", dir->i_ino, pino); return 0; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); de = f2fs_find_entry(dir, &dot, &page); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else if (IS_ERR(page)) { err = PTR_ERR(page); goto out; } else { err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); if (err) goto out; } de = f2fs_find_entry(dir, &dotdot, &page); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else if (IS_ERR(page)) { err = PTR_ERR(page); } else { err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR); } out: if (!err) clear_inode_flag(dir, FI_INLINE_DOTS); f2fs_unlock_op(sbi); return err; } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; nid_t ino; int err = 0; unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); if (f2fs_encrypted_inode(dir)) { int res = fscrypt_get_encryption_info(dir); /* * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is * created while the directory was encrypted and we * don't have access to the key. */ if (fscrypt_has_encryption_key(dir)) fscrypt_set_encrypted_dentry(dentry); fscrypt_set_d_op(dentry); if (res && res != -ENOKEY) return ERR_PTR(res); } if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { if (IS_ERR(page)) return (struct dentry *)page; return d_splice_alias(inode, dentry); } ino = le32_to_cpu(de->ino); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { err = __recover_dot_dentries(dir, root_ino); if (err) goto err_out; } if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) goto err_out; } if (!IS_ERR(inode) && f2fs_encrypted_inode(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { bool nokey = f2fs_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode); err = nokey ? -ENOKEY : -EPERM; goto err_out; } return d_splice_alias(inode, dentry); err_out: iput(inode); return ERR_PTR(err); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = d_inode(dentry); struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { if (IS_ERR(page)) err = PTR_ERR(page); goto fail; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, dir, inode); f2fs_unlock_op(sbi); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); fail: trace_f2fs_unlink_exit(inode, err); return err; } static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page; char *link; page = page_follow_link_light(dentry, nd); if (IS_ERR(page)) return page; link = nd_get_link(nd); if (IS_ERR(link)) return link; /* this is broken symlink case */ if (*link == 0) { kunmap(page); page_cache_release(page); return ERR_PTR(-ENOENT); } return page; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1); struct fscrypt_symlink_data *sd = NULL; int err; if (f2fs_encrypted_inode(dir)) { err = fscrypt_get_encryption_info(dir); if (err) return err; if (!fscrypt_has_encryption_key(dir)) return -EPERM; disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + sizeof(struct fscrypt_symlink_data)); } if (disk_link.len > dir->i_sb->s_blocksize) return -ENAMETOOLONG; inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); if (f2fs_encrypted_inode(inode)) { struct qstr istr = QSTR_INIT(symname, len); struct fscrypt_str ostr; sd = kzalloc(disk_link.len, GFP_NOFS); if (!sd) { err = -ENOMEM; goto err_out; } err = fscrypt_get_encryption_info(inode); if (err) goto err_out; if (!fscrypt_has_encryption_key(inode)) { err = -EPERM; goto err_out; } ostr.name = sd->encrypted_path; ostr.len = disk_link.len; err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); if (err < 0) goto err_out; sd->len = cpu_to_le16(ostr.len); disk_link.name = (char *)sd; } err = page_symlink(inode, disk_link.name, disk_link.len); err_out: d_instantiate(dentry, inode); unlock_new_inode(inode); /* * Let's flush symlink data in order to avoid broken symlink as much as * possible. Nevertheless, fsyncing is the best way, but there is no * way to get a file descriptor in order to flush that. * * Note that, it needs to do dir->fsync to make this recoverable. * If the symlink path is stored into inline_data, there is no * performance regression. */ if (!err) { filemap_write_and_wait_range(inode->i_mapping, 0, disk_link.len - 1); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); } else { f2fs_unlink(dir, dentry); } kfree(sd); return err; out: handle_failed_inode(inode); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); f2fs_balance_fs(sbi, true); set_inode_flag(inode, FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_fail; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out_fail: clear_inode_flag(inode, FI_INC_LINK); handle_failed_inode(inode); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; if (!new_valid_dev(rdev)) return -EINVAL; inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: handle_failed_inode(inode); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; bool is_old_inline = f2fs_has_inline_dentry(old_dir); int err = -ENOENT; if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) && !fscrypt_has_permitted_context(new_dir, old_inode)) { err = -EPERM; goto out; } old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) err = PTR_ERR(old_page); goto out; } if (S_ISDIR(old_inode->i_mode)) { old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) { if (IS_ERR(old_dir_page)) err = PTR_ERR(old_dir_page); goto out_old; } } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) { if (IS_ERR(new_page)) err = PTR_ERR(new_page); goto out_dir; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; err = update_dent_inode(old_inode, new_inode, &new_dentry->d_name); if (err) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) f2fs_i_links_write(new_inode, false); f2fs_i_links_write(new_inode, false); up_write(&F2FS_I(new_inode)->i_sem); if (!new_inode->i_nlink) add_orphan_inode(new_inode); else release_orphan_inode(sbi); } else { f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_dir; } if (old_dir_entry) f2fs_i_links_write(new_dir, true); /* * old entry and new entry can locate in the same inline * dentry in inode, when attaching new entry in inline dentry, * it could force inline dentry conversion, after that, * old_entry and old_page will point to wrong address, in * order to avoid this, let's do the check and update here. */ if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) { f2fs_put_page(old_page, 0); old_page = NULL; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { err = -ENOENT; if (IS_ERR(old_page)) err = PTR_ERR(old_page); f2fs_unlock_op(sbi); goto out_dir; } } } down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); if (new_inode && file_enc_name(new_inode)) file_set_enc_name(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = CURRENT_TIME; f2fs_mark_inode_dirty_sync(old_inode); f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); } else { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } f2fs_i_links_write(old_dir, false); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; put_out_dir: f2fs_unlock_op(sbi); f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; } static void *f2fs_encrypted_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct fscrypt_str cstr = FSTR_INIT(NULL, 0); struct fscrypt_str pstr = FSTR_INIT(NULL, 0); struct fscrypt_symlink_data *sd; struct inode *inode = d_inode(dentry); loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); u32 max_size = inode->i_sb->s_blocksize; int res; res = fscrypt_get_encryption_info(inode); if (res) return ERR_PTR(res); cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return cpage; caddr = kmap(cpage); caddr[size] = 0; /* Symlink is encrypted */ sd = (struct fscrypt_symlink_data *)caddr; cstr.name = sd->encrypted_path; cstr.len = le16_to_cpu(sd->len); /* this is broken symlink case */ if (unlikely(cstr.len == 0)) { res = -ENOENT; goto errout; } if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; goto errout; } res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); if (res) goto errout; res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); if (res < 0) goto errout; /* this is broken symlink case */ if (unlikely(pstr.name[0] == 0)) { res = -ENOENT; goto errout; } paddr = pstr.name; /* Null-terminate the name */ paddr[res] = '\0'; nd_set_link(nd, paddr); kunmap(cpage); page_cache_release(cpage); return NULL; errout: fscrypt_fname_free_buffer(&pstr); kunmap(cpage); page_cache_release(cpage); return ERR_PTR(res); } void kfree_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); }
struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = {.len = 2, .name = ".."}; unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); } static int __recover_dot_dentries(struct inode *dir, nid_t pino) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct qstr dot = {.len = 1, .name = "."}; struct qstr dotdot = {.len = 2, .name = ".."}; struct f2fs_dir_entry *de; struct page *page; int err = 0; f2fs_lock_op(sbi); de = f2fs_find_entry(dir, &dot, &page, 0); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else { err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); if (err) goto out; } de = f2fs_find_entry(dir, &dotdot, &page, 0); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else { err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR); } out: if (!err) { clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS); mark_inode_dirty(dir); } f2fs_unlock_op(sbi); return err; } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; nid_t ino; int err = 0; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page, nd ? nd->flags : 0); if (!de) return d_splice_alias(inode, dentry); ino = le32_to_cpu(de->ino); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) goto err_out; } return d_splice_alias(inode, dentry); err_out: iget_failed(inode); return ERR_PTR(err); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = dentry->d_inode; struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page, 0); if (!de) goto fail; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, dir, inode); f2fs_unlock_op(sbi); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); fail: trace_f2fs_unlink_exit(inode, err); return err; } static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page; page = page_follow_link_light(dentry, nd); if (IS_ERR(page)) return page; /* this is broken symlink case */ if (*nd_get_link(nd) == 0) { kunmap(page); page_cache_release(page); return ERR_PTR(-ENOENT); } return page; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); size_t p_len; char *p_str; struct f2fs_str disk_link = FSTR_INIT(NULL, 0); struct f2fs_encrypted_symlink_data *sd = NULL; int err; if (len > dir->i_sb->s_blocksize) return -ENAMETOOLONG; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); if (f2fs_encrypted_inode(dir)) { struct qstr istr = QSTR_INIT(symname, len); err = f2fs_get_encryption_info(inode); if (err) goto err_out; err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link); if (err) goto err_out; err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link); if (err < 0) goto err_out; p_len = encrypted_symlink_data_len(disk_link.len) + 1; if (p_len > dir->i_sb->s_blocksize) { err = -ENAMETOOLONG; goto err_out; } sd = kzalloc(p_len, GFP_NOFS); if (!sd) { err = -ENOMEM; goto err_out; } memcpy(sd->encrypted_path, disk_link.name, disk_link.len); sd->len = cpu_to_le16(disk_link.len); p_str = (char *)sd; } else { p_len = len + 1; p_str = (char *)symname; } err = page_symlink(inode, p_str, p_len); err_out: d_instantiate(dentry, inode); unlock_new_inode(inode); /* * Let's flush symlink data in order to avoid broken symlink as much as * possible. Nevertheless, fsyncing is the best way, but there is no * way to get a file descriptor in order to flush that. * * Note that, it needs to do dir->fsync to make this recoverable. * If the symlink path is stored into inline_data, there is no * performance regression. */ if (!err) filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); kfree(sd); f2fs_fname_crypto_free_buffer(&disk_link); return err; out: handle_failed_inode(inode); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_fail; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); handle_failed_inode(inode); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; if (!new_valid_dev(rdev)) return -EINVAL; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: handle_failed_inode(inode); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT; if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) && !f2fs_is_child_context_consistent_with_parent(new_dir, old_inode)) { err = -EPERM; goto out; } f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page, 0); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page, 0); if (!new_entry) goto out_dir; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; if (update_dent_inode(old_inode, new_inode, &new_dentry->d_name)) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); up_write(&F2FS_I(new_inode)->i_sem); mark_inode_dirty(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); else release_orphan_inode(sbi); update_inode_page(old_inode); update_inode_page(new_inode); } else { f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_dir; } if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); if (new_inode && file_enc_name(new_inode)) file_set_enc_name(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); update_inode_page(old_inode); } else { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); mark_inode_dirty(old_dir); update_inode_page(old_dir); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; put_out_dir: f2fs_unlock_op(sbi); f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; } #ifdef CONFIG_F2FS_FS_ENCRYPTION static void *f2fs_encrypted_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct f2fs_str cstr; struct f2fs_str pstr = FSTR_INIT(NULL, 0); struct inode *inode = dentry->d_inode; struct f2fs_encrypted_symlink_data *sd; loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); u32 max_size = inode->i_sb->s_blocksize; int res; res = f2fs_get_encryption_info(inode); if (res) return ERR_PTR(res); cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return cpage; caddr = kmap(cpage); caddr[size] = 0; /* Symlink is encrypted */ sd = (struct f2fs_encrypted_symlink_data *)caddr; cstr.name = sd->encrypted_path; cstr.len = le16_to_cpu(sd->len); /* this is broken symlink case */ if (cstr.name[0] == 0 && cstr.len == 0) { res = -ENOENT; goto errout; } if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; goto errout; } res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr); if (res) goto errout; res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr); if (res < 0) goto errout; paddr = pstr.name; /* Null-terminate the name */ paddr[res] = '\0'; nd_set_link(nd, paddr); kunmap(cpage); page_cache_release(cpage); return NULL; errout: f2fs_fname_crypto_free_buffer(&pstr); kunmap(cpage); page_cache_release(cpage); return ERR_PTR(res); } void kfree_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); } const struct inode_operations f2fs_encrypted_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = f2fs_encrypted_follow_link, .put_link = kfree_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, }; #endif const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .check_acl = f2fs_check_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = f2fs_follow_link, .put_link = page_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .check_acl = f2fs_check_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };
struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw) { struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; struct extent_info ei; int err; struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), .type = DATA, .rw = rw, .encrypted_page = NULL, }; if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return read_mapping_page(mapping, index, NULL); page = grab_cache_page(mapping, index); if (!page) return ERR_PTR(-ENOMEM); if (f2fs_lookup_extent_cache(inode, index, &ei)) { dn.data_blkaddr = ei.blk + index - ei.fofs; goto got_it; } set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) goto put_err; f2fs_put_dnode(&dn); if (unlikely(dn.data_blkaddr == NULL_ADDR)) { err = -ENOENT; goto put_err; } got_it: if (PageUptodate(page)) { unlock_page(page); return page; } /* * A new dentry page is allocated but not able to be written, since its * new inode page couldn't be allocated due to -ENOSPC. * In such the case, its blkaddr can be remained as NEW_ADDR. * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. */ if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); unlock_page(page); return page; } fio.blk_addr = dn.data_blkaddr; fio.page = page; err = f2fs_submit_page_bio(&fio); if (err) goto put_err; return page; put_err: f2fs_put_page(page, 1); return ERR_PTR(err); }
static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); nid_t ino; struct inode *inode; bool nid_free = false; int err; inode = new_inode(dir->i_sb); if (!inode) return ERR_PTR(-ENOMEM); f2fs_lock_op(sbi); if (!alloc_nid(sbi, &ino)) { f2fs_unlock_op(sbi); err = -ENOSPC; goto fail; } f2fs_unlock_op(sbi); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_generation = sbi->s_next_generation++; err = insert_inode_locked(inode); if (err) { err = -EINVAL; nid_free = true; goto out; } /* If the directory encrypted, then we should encrypt the inode. */ if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) f2fs_set_encrypted_inode(inode); if (f2fs_may_inline_data(inode)) set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); if (f2fs_may_inline_dentry(inode)) set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY); stat_inc_inline_inode(inode); stat_inc_inline_dir(inode); trace_f2fs_new_inode(inode, 0); mark_inode_dirty(inode); return inode; out: clear_nlink(inode); unlock_new_inode(inode); fail: trace_f2fs_new_inode(inode, err); make_bad_inode(inode); iput(inode); if (nid_free) alloc_nid_failed(sbi, ino); return ERR_PTR(err); }
static int f2fs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page = NULL; struct page *ipage; pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; struct dnode_of_data dn; int err = 0; trace_f2fs_write_begin(inode, pos, len, flags); f2fs_balance_fs(sbi); /* * We should check this at this moment to avoid deadlock on inode page * and #0 page. The locking rule for inline_data conversion should be: * lock_page(page #0) -> lock_page(inode_page) */ if (index != 0) { err = f2fs_convert_inline_inode(inode); if (err) goto fail; } repeat: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { err = -ENOMEM; goto fail; } *pagep = page; f2fs_lock_op(sbi); /* check inline_data */ ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto unlock_fail; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) { if (pos + len <= MAX_INLINE_DATA) { read_inline_data(page, ipage); set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); sync_inode_page(&dn); goto put_next; } err = f2fs_convert_inline_page(&dn, page); if (err) goto put_fail; } err = f2fs_get_block(&dn, index); if (err) goto put_fail; put_next: f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); f2fs_wait_on_page_writeback(page, DATA); if (len == PAGE_CACHE_SIZE) goto out_update; if (PageUptodate(page)) goto out_clear; if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); goto out_update; } if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); } else { struct f2fs_io_info fio = { .sbi = sbi, .type = DATA, .rw = READ_SYNC, .blk_addr = dn.data_blkaddr, .page = page, .encrypted_page = NULL, }; err = f2fs_submit_page_bio(&fio); if (err) goto fail; lock_page(page); if (unlikely(!PageUptodate(page))) { err = -EIO; goto fail; } if (unlikely(page->mapping != mapping)) { f2fs_put_page(page, 1); goto repeat; } /* avoid symlink page */ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { err = f2fs_decrypt_one(inode, page); if (err) goto fail; } } out_update: SetPageUptodate(page); out_clear: clear_cold_data(page); return 0; put_fail: f2fs_put_dnode(&dn); unlock_fail: f2fs_unlock_op(sbi); fail: f2fs_put_page(page, 1); f2fs_write_failed(mapping, pos + len); return err; } static int f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; trace_f2fs_write_end(inode, pos, len, copied); set_page_dirty(page); if (pos + copied > i_size_read(inode)) { i_size_write(inode, pos + copied); mark_inode_dirty(inode); update_inode_page(inode); } f2fs_put_page(page, 1); return copied; } static ssize_t check_direct_IO(struct inode *inode, int rw, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; int seg, i; size_t size; unsigned long addr; ssize_t retval = -EINVAL; loff_t end = offset; if (offset & blocksize_mask) return -EINVAL; /* Check the memory alignment. Blocks cannot straddle pages */ for (seg = 0; seg < nr_segs; seg++) { addr = (unsigned long)iov[seg].iov_base; size = iov[seg].iov_len; end += size; if ((addr & blocksize_mask) || (size & blocksize_mask)) goto out; /* If this is a write we don't need to check anymore */ if (rw & WRITE) continue; /* * Check to make sure we don't have duplicate iov_base's in this * iovec, if so return EINVAL, otherwise we'll get csum errors * when reading back. */ for (i = seg + 1; i < nr_segs; i++) { if (iov[seg].iov_base == iov[i].iov_base) goto out; } } retval = 0; out: return retval; }
static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct page *old_dir_page, *new_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL; struct f2fs_dir_entry *old_entry, *new_entry; int old_nlink = 0, new_nlink = 0; int err = -ENOENT; if ((f2fs_encrypted_inode(old_dir) && !fscrypt_has_encryption_key(old_dir)) || (f2fs_encrypted_inode(new_dir) && !fscrypt_has_encryption_key(new_dir))) return -ENOKEY; if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) && (old_dir != new_dir) && (!fscrypt_has_permitted_context(new_dir, old_inode) || !fscrypt_has_permitted_context(old_dir, new_inode))) return -EPERM; err = dquot_initialize(old_dir); if (err) goto out; err = dquot_initialize(new_dir); if (err) goto out; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) err = PTR_ERR(old_page); goto out; } new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) { if (IS_ERR(new_page)) err = PTR_ERR(new_page); goto out_old; } /* prepare for updating ".." directory entry info later */ if (old_dir != new_dir) { if (S_ISDIR(old_inode->i_mode)) { old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) { if (IS_ERR(old_dir_page)) err = PTR_ERR(old_dir_page); goto out_new; } } if (S_ISDIR(new_inode->i_mode)) { new_dir_entry = f2fs_parent_dir(new_inode, &new_dir_page); if (!new_dir_entry) { if (IS_ERR(new_dir_page)) err = PTR_ERR(new_dir_page); goto out_old_dir; } } } /* * If cross rename between file and directory those are not * in the same directory, we will inc nlink of file's parent * later, so we should check upper boundary of its nlink. */ if ((!old_dir_entry || !new_dir_entry) && old_dir_entry != new_dir_entry) { old_nlink = old_dir_entry ? -1 : 1; new_nlink = -old_nlink; err = -EMLINK; if ((old_nlink > 0 && old_dir->i_nlink >= F2FS_LINK_MAX) || (new_nlink > 0 && new_dir->i_nlink >= F2FS_LINK_MAX)) goto out_new_dir; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); /* update ".." directory entry info of old dentry */ if (old_dir_entry) f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); /* update ".." directory entry info of new dentry */ if (new_dir_entry) f2fs_set_link(new_inode, new_dir_entry, new_dir_page, old_dir); /* update directory entry info of old dir inode */ f2fs_set_link(old_dir, old_entry, old_page, new_inode); down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_dir->i_ctime = current_time(old_dir); if (old_nlink) { down_write(&F2FS_I(old_dir)->i_sem); f2fs_i_links_write(old_dir, old_nlink > 0); up_write(&F2FS_I(old_dir)->i_sem); } f2fs_mark_inode_dirty_sync(old_dir, false); /* update directory entry info of new dir inode */ f2fs_set_link(new_dir, new_entry, new_page, old_inode); down_write(&F2FS_I(new_inode)->i_sem); file_lost_pino(new_inode); up_write(&F2FS_I(new_inode)->i_sem); new_dir->i_ctime = current_time(new_dir); if (new_nlink) { down_write(&F2FS_I(new_dir)->i_sem); f2fs_i_links_write(new_dir, new_nlink > 0); up_write(&F2FS_I(new_dir)->i_sem); } f2fs_mark_inode_dirty_sync(new_dir, false); f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out_new_dir: if (new_dir_entry) { f2fs_dentry_kunmap(new_inode, new_dir_page); f2fs_put_page(new_dir_page, 0); } out_old_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_new: f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; }