int f2fs_truncate(struct inode *inode, bool lock) { int err; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return 0; trace_f2fs_truncate(inode); /* we should check inline_data size */ if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } err = truncate_blocks(inode, i_size_read(inode), lock); if (err) return err; inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); return 0; }
static int expand_inode_data(struct inode *inode, loff_t offset, loff_t len, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t index, pg_start, pg_end; loff_t new_size = i_size_read(inode); loff_t off_start, off_end; int ret = 0; f2fs_balance_fs(sbi); ret = inode_newsize_ok(inode, (len + offset)); if (ret) return ret; if (f2fs_has_inline_data(inode)) { ret = f2fs_convert_inline_inode(inode); if (ret) return ret; } pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; off_start = offset & (PAGE_CACHE_SIZE - 1); off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); f2fs_lock_op(sbi); for (index = pg_start; index <= pg_end; index++) { struct dnode_of_data dn; if (index == pg_end && !off_end) goto noalloc; set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_reserve_block(&dn, index); if (ret) break; noalloc: if (pg_start == pg_end) new_size = offset + len; else if (index == pg_start && off_start) new_size = (index + 1) << PAGE_CACHE_SHIFT; else if (index == pg_end) new_size = (index << PAGE_CACHE_SHIFT) + off_end; else new_size += PAGE_CACHE_SIZE; } if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) { i_size_write(inode, new_size); mark_inode_dirty(inode); update_inode_page(inode); } f2fs_unlock_op(sbi); return ret; }
static int punch_hole(struct inode *inode, loff_t offset, loff_t len) { pgoff_t pg_start, pg_end; loff_t off_start, off_end; int ret = 0; if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; if (f2fs_has_inline_data(inode)) { ret = f2fs_convert_inline_inode(inode); if (ret) return ret; } pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; off_start = offset & (PAGE_CACHE_SIZE - 1); off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, off_end - off_start); if (ret) return ret; } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, PAGE_CACHE_SIZE - off_start); if (ret) return ret; } if (off_end) { ret = fill_zero(inode, pg_end, 0, off_end); if (ret) return ret; } if (pg_start < pg_end) { struct address_space *mapping = inode->i_mapping; loff_t blk_start, blk_end; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); f2fs_balance_fs(sbi); blk_start = pg_start << PAGE_CACHE_SHIFT; blk_end = pg_end << PAGE_CACHE_SHIFT; truncate_inode_pages_range(mapping, blk_start, blk_end - 1); f2fs_lock_op(sbi); ret = truncate_hole(inode, pg_start, pg_end); f2fs_unlock_op(sbi); } } return ret; }
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t pg_start, pg_end, delta, nrpages, idx; loff_t new_size; int ret = 0; new_size = i_size_read(inode) + len; if (new_size > inode->i_sb->s_maxbytes) return -EFBIG; if (offset >= i_size_read(inode)) return -EINVAL; /* insert range should be aligned to block size of f2fs. */ if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) return -EINVAL; f2fs_balance_fs(sbi); if (f2fs_has_inline_data(inode)) { ret = f2fs_convert_inline_inode(inode); if (ret) return ret; } ret = truncate_blocks(inode, i_size_read(inode), true); if (ret) return ret; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); if (ret) return ret; truncate_pagecache(inode, 0, offset); pg_start = offset >> PAGE_CACHE_SHIFT; pg_end = (offset + len) >> PAGE_CACHE_SHIFT; delta = pg_end - pg_start; nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) { f2fs_lock_op(sbi); ret = __exchange_data_block(inode, idx, idx + delta, false); f2fs_unlock_op(sbi); if (ret) break; } /* write out all moved pages, if possible */ filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); truncate_pagecache(inode, 0, offset); if (!ret) i_size_write(inode, new_size); return ret; }
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; /* we don't need to use inline_data strictly */ if (f2fs_has_inline_data(inode)) { int err = f2fs_convert_inline_inode(inode); if (err) return err; } return generic_block_bmap(mapping, block, get_data_block_bmap); }
int f2fs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct f2fs_inode_info *fi = F2FS_I(inode); int err; err = inode_change_ok(inode, attr); if (err) return err; if (attr->ia_valid & ATTR_SIZE) { if (f2fs_encrypted_inode(inode) && fscrypt_get_encryption_info(inode)) return -EACCES; if (attr->ia_size <= i_size_read(inode)) { truncate_setsize(inode, attr->ia_size); err = f2fs_truncate(inode, true); if (err) return err; f2fs_balance_fs(F2FS_I_SB(inode), true); } else { /* * do not trim all blocks after i_size if target size is * larger than i_size. */ truncate_setsize(inode, attr->ia_size); /* should convert inline inode here */ if (!f2fs_may_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } inode->i_mtime = inode->i_ctime = CURRENT_TIME; } } __setattr_copy(inode, attr); if (attr->ia_valid & ATTR_MODE) { err = posix_acl_chmod(inode, get_inode_mode(inode)); if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; clear_inode_flag(fi, FI_ACL_MODE); } } mark_inode_dirty(inode); return err; }
static int f2fs_ioc_start_volatile_write(struct file *filp) { struct inode *inode = file_inode(filp); if (!inode_owner_or_capable(inode)) return -EACCES; if (f2fs_is_volatile_file(inode)) return 0; set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); return f2fs_convert_inline_inode(inode); }
static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) { pgoff_t pg_start, pg_end; loff_t new_size; int ret; if (offset + len >= i_size_read(inode)) return -EINVAL; /* collapse range should be aligned to block size of f2fs. */ if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) return -EINVAL; f2fs_balance_fs(F2FS_I_SB(inode)); if (f2fs_has_inline_data(inode)) { ret = f2fs_convert_inline_inode(inode); if (ret) return ret; } pg_start = offset >> PAGE_CACHE_SHIFT; pg_end = (offset + len) >> PAGE_CACHE_SHIFT; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); if (ret) return ret; truncate_pagecache(inode, 0, offset); ret = f2fs_do_collapse(inode, pg_start, pg_end); if (ret) return ret; /* write out all moved pages, if possible */ filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); truncate_pagecache(inode, 0, offset); new_size = i_size_read(inode) - len; truncate_pagecache(inode, 0, new_size); ret = truncate_blocks(inode, new_size, true); if (!ret) i_size_write(inode, new_size); return ret; }
static int f2fs_ioc_start_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); if (!inode_owner_or_capable(inode)) return -EACCES; f2fs_balance_fs(F2FS_I_SB(inode)); if (f2fs_is_atomic_file(inode)) return 0; set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); return f2fs_convert_inline_inode(inode); }
static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_length(iov, nr_segs); int err; /* we don't need to use inline_data strictly */ if (f2fs_has_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return 0; err = check_direct_IO(inode, rw, iov, offset, nr_segs); if (err) return err; trace_f2fs_direct_IO_enter(inode, offset, count, rw); if (rw & WRITE) { __allocate_data_blocks(inode, offset, count); if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { err = -EIO; goto out; } } err = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, get_data_block_dio); out: if (err < 0 && (rw & WRITE)) f2fs_write_failed(mapping, offset + count); trace_f2fs_direct_IO_exit(inode, offset, count, rw, err); return err; }
static int f2fs_ioc_start_volatile_write(struct file *filp) { struct inode *inode = file_inode(filp); int ret; if (!inode_owner_or_capable(inode)) return -EACCES; if (f2fs_is_volatile_file(inode)) return 0; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return 0; }
void f2fs_truncate(struct inode *inode) { if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; trace_f2fs_truncate(inode); /* we should check inline_data size */ if (f2fs_has_inline_data(inode) && !f2fs_may_inline(inode)) { if (f2fs_convert_inline_inode(inode)) return; } if (!truncate_blocks(inode, i_size_read(inode), true)) { inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); } }
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); int err; /* we don't need to use inline_data strictly */ if (f2fs_has_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return 0; err = check_direct_IO(inode, iter, offset); if (err) return err; trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); if (iov_iter_rw(iter) == WRITE) __allocate_data_blocks(inode, offset, count); err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); if (err < 0 && iov_iter_rw(iter) == WRITE) f2fs_write_failed(mapping, offset + count); trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); return err; }
static int f2fs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page = NULL; struct page *ipage; pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; struct dnode_of_data dn; int err = 0; trace_f2fs_write_begin(inode, pos, len, flags); f2fs_balance_fs(sbi); /* * We should check this at this moment to avoid deadlock on inode page * and #0 page. The locking rule for inline_data conversion should be: * lock_page(page #0) -> lock_page(inode_page) */ if (index != 0) { err = f2fs_convert_inline_inode(inode); if (err) goto fail; } repeat: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { err = -ENOMEM; goto fail; } *pagep = page; f2fs_lock_op(sbi); /* check inline_data */ ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto unlock_fail; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) { if (pos + len <= MAX_INLINE_DATA) { read_inline_data(page, ipage); set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); sync_inode_page(&dn); goto put_next; } err = f2fs_convert_inline_page(&dn, page); if (err) goto put_fail; } err = f2fs_get_block(&dn, index); if (err) goto put_fail; put_next: f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); f2fs_wait_on_page_writeback(page, DATA); if (len == PAGE_CACHE_SIZE) goto out_update; if (PageUptodate(page)) goto out_clear; if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); goto out_update; } if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); } else { struct f2fs_io_info fio = { .sbi = sbi, .type = DATA, .rw = READ_SYNC, .blk_addr = dn.data_blkaddr, .page = page, .encrypted_page = NULL, }; err = f2fs_submit_page_bio(&fio); if (err) goto fail; lock_page(page); if (unlikely(!PageUptodate(page))) { err = -EIO; goto fail; } if (unlikely(page->mapping != mapping)) { f2fs_put_page(page, 1); goto repeat; } /* avoid symlink page */ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { err = f2fs_decrypt_one(inode, page); if (err) goto fail; } } out_update: SetPageUptodate(page); out_clear: clear_cold_data(page); return 0; put_fail: f2fs_put_dnode(&dn); unlock_fail: f2fs_unlock_op(sbi); fail: f2fs_put_page(page, 1); f2fs_write_failed(mapping, pos + len); return err; } static int f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; trace_f2fs_write_end(inode, pos, len, copied); set_page_dirty(page); if (pos + copied > i_size_read(inode)) { i_size_write(inode, pos + copied); mark_inode_dirty(inode); update_inode_page(inode); } f2fs_put_page(page, 1); return copied; } static int check_direct_IO(struct inode *inode, struct iov_iter *iter, loff_t offset) { unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; if (offset & blocksize_mask) return -EINVAL; if (iov_iter_alignment(iter) & blocksize_mask) return -EINVAL; return 0; }
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct address_space *mapping = inode->i_mapping; pgoff_t index, pg_start, pg_end; loff_t new_size = i_size_read(inode); loff_t off_start, off_end; int ret = 0; if (!S_ISREG(inode->i_mode)) return -EINVAL; ret = inode_newsize_ok(inode, (len + offset)); if (ret) return ret; f2fs_balance_fs(sbi); if (f2fs_has_inline_data(inode)) { ret = f2fs_convert_inline_inode(inode); if (ret) return ret; } ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); if (ret) return ret; truncate_pagecache_range(inode, offset, offset + len - 1); pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; off_start = offset & (PAGE_CACHE_SIZE - 1); off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, off_end - off_start); if (ret) return ret; if (offset + len > new_size) new_size = offset + len; new_size = max_t(loff_t, new_size, offset + len); } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, PAGE_CACHE_SIZE - off_start); if (ret) return ret; new_size = max_t(loff_t, new_size, pg_start << PAGE_CACHE_SHIFT); } for (index = pg_start; index < pg_end; index++) { struct dnode_of_data dn; struct page *ipage; f2fs_lock_op(sbi); ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { ret = PTR_ERR(ipage); f2fs_unlock_op(sbi); goto out; } set_new_dnode(&dn, inode, ipage, NULL, 0); ret = f2fs_reserve_block(&dn, index); if (ret) { f2fs_unlock_op(sbi); goto out; } if (dn.data_blkaddr != NEW_ADDR) { invalidate_blocks(sbi, dn.data_blkaddr); dn.data_blkaddr = NEW_ADDR; set_data_blkaddr(&dn); dn.data_blkaddr = NULL_ADDR; f2fs_update_extent_cache(&dn); } f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); new_size = max_t(loff_t, new_size, (index + 1) << PAGE_CACHE_SHIFT); } if (off_end) { ret = fill_zero(inode, pg_end, 0, off_end); if (ret) goto out; new_size = max_t(loff_t, new_size, offset + len); } } out: if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) { i_size_write(inode, new_size); mark_inode_dirty(inode); update_inode_page(inode); } return ret; }
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t ino = inode->i_ino; int ret = 0; bool need_cp = false; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .for_reclaim = 0, }; if (unlikely(f2fs_readonly(inode->i_sb))) return 0; trace_f2fs_sync_file_enter(inode); /* if fdatasync is triggered, let's do in-place-update */ if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) set_inode_flag(fi, FI_NEED_IPU); ret = filemap_write_and_wait_range(inode->i_mapping, start, end); clear_inode_flag(fi, FI_NEED_IPU); if (ret) { trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); return ret; } /* if the inode is dirty, let's recover all the time */ if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) { update_inode_page(inode); goto go_write; } /* * if there is no written data, don't waste time to write recovery info. */ if (!is_inode_flag_set(fi, FI_APPEND_WRITE) && !exist_written_data(sbi, ino, APPEND_INO)) { /* it may call write_inode just prior to fsync */ if (need_inode_page_update(sbi, ino)) goto go_write; if (is_inode_flag_set(fi, FI_UPDATE_WRITE) || exist_written_data(sbi, ino, UPDATE_INO)) goto flush_out; goto out; } go_write: /* guarantee free sections for fsync */ f2fs_balance_fs(sbi); /* * Both of fdatasync() and fsync() are able to be recovered from * sudden-power-off. */ down_read(&fi->i_sem); need_cp = need_do_checkpoint(inode); up_read(&fi->i_sem); if (need_cp) { /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); /* * We've secured consistency through sync_fs. Following pino * will be used only for fsynced inodes after checkpoint. */ try_to_fix_pino(inode); clear_inode_flag(fi, FI_APPEND_WRITE); clear_inode_flag(fi, FI_UPDATE_WRITE); goto out; } sync_nodes: sync_node_pages(sbi, ino, &wbc); /* if cp_error was enabled, we should avoid infinite loop */ if (unlikely(f2fs_cp_error(sbi))) goto out; if (need_inode_block_update(sbi, ino)) { mark_inode_dirty_sync(inode); f2fs_write_inode(inode, NULL); goto sync_nodes; } ret = wait_on_node_pages_writeback(sbi, ino); if (ret) goto out; /* once recovery info is written, don't need to tack this */ remove_dirty_inode(sbi, ino, APPEND_INO); clear_inode_flag(fi, FI_APPEND_WRITE); flush_out: remove_dirty_inode(sbi, ino, UPDATE_INO); clear_inode_flag(fi, FI_UPDATE_WRITE); ret = f2fs_issue_flush(sbi); out: trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); f2fs_trace_ios(NULL, 1); return ret; } static pgoff_t __get_first_dirty_index(struct address_space *mapping, pgoff_t pgofs, int whence) { struct pagevec pvec; int nr_pages; if (whence != SEEK_DATA) return 0; /* find first dirty page index */ pagevec_init(&pvec, 0); nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs, PAGECACHE_TAG_DIRTY, 1); pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX; pagevec_release(&pvec); return pgofs; } static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, int whence) { switch (whence) { case SEEK_DATA: if ((blkaddr == NEW_ADDR && dirty == pgofs) || (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR)) return true; break; case SEEK_HOLE: if (blkaddr == NULL_ADDR) return true; break; } return false; } static inline int unsigned_offsets(struct file *file) { return file->f_mode & FMODE_UNSIGNED_OFFSET; } static loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize) { if (offset < 0 && !unsigned_offsets(file)) return -EINVAL; if (offset > maxsize) return -EINVAL; if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; } return offset; } static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = inode->i_sb->s_maxbytes; struct dnode_of_data dn; pgoff_t pgofs, end_offset, dirty; loff_t data_ofs = offset; loff_t isize; int err = 0; mutex_lock(&inode->i_mutex); isize = i_size_read(inode); if (offset >= isize) goto fail; /* handle inline data case */ if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { if (whence == SEEK_HOLE) data_ofs = isize; goto found; } pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) { set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); if (err && err != -ENOENT) { goto fail; } else if (err == -ENOENT) { /* direct node does not exists */ if (whence == SEEK_DATA) { pgofs = PGOFS_OF_NEXT_DNODE(pgofs, F2FS_I(inode)); continue; } else { goto found; } } end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; dn.ofs_in_node++, pgofs++, data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { block_t blkaddr; blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); if (__found_offset(blkaddr, dirty, pgofs, whence)) { f2fs_put_dnode(&dn); goto found; } } f2fs_put_dnode(&dn); } if (whence == SEEK_DATA) goto fail; found: if (whence == SEEK_HOLE && data_ofs > isize) data_ofs = isize; mutex_unlock(&inode->i_mutex); return vfs_setpos(file, data_ofs, maxbytes); fail: mutex_unlock(&inode->i_mutex); return -ENXIO; } static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = inode->i_sb->s_maxbytes; switch (whence) { case SEEK_SET: case SEEK_CUR: case SEEK_END: return generic_file_llseek_size(file, offset, whence, maxbytes); case SEEK_DATA: case SEEK_HOLE: if (offset < 0) return -ENXIO; return f2fs_seek_block(file, offset, whence); } return -EINVAL; } static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); if (f2fs_encrypted_inode(inode)) { int err = f2fs_get_encryption_info(inode); if (err) return 0; } /* we don't need to use inline_data strictly */ if (f2fs_has_inline_data(inode)) { int err = f2fs_convert_inline_inode(inode); if (err) return err; } file_accessed(file); vma->vm_ops = &f2fs_file_vm_ops; return 0; } static int f2fs_file_open(struct inode *inode, struct file *filp) { int ret = generic_file_open(inode, filp); if (!ret && f2fs_encrypted_inode(inode)) { ret = f2fs_get_encryption_info(inode); if (ret) ret = -EACCES; } return ret; }
static int f2fs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page = NULL; struct page *ipage; pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; struct dnode_of_data dn; int err = 0; trace_f2fs_write_begin(inode, pos, len, flags); f2fs_balance_fs(sbi); /* * We should check this at this moment to avoid deadlock on inode page * and #0 page. The locking rule for inline_data conversion should be: * lock_page(page #0) -> lock_page(inode_page) */ if (index != 0) { err = f2fs_convert_inline_inode(inode); if (err) goto fail; } repeat: page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { err = -ENOMEM; goto fail; } *pagep = page; f2fs_lock_op(sbi); /* check inline_data */ ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto unlock_fail; } set_new_dnode(&dn, inode, ipage, ipage, 0); if (f2fs_has_inline_data(inode)) { if (pos + len <= MAX_INLINE_DATA) { read_inline_data(page, ipage); set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); sync_inode_page(&dn); goto put_next; } err = f2fs_convert_inline_page(&dn, page); if (err) goto put_fail; } err = f2fs_get_block(&dn, index); if (err) goto put_fail; put_next: f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); f2fs_wait_on_page_writeback(page, DATA); if (len == PAGE_CACHE_SIZE) goto out_update; if (PageUptodate(page)) goto out_clear; if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); goto out_update; } if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); } else { struct f2fs_io_info fio = { .sbi = sbi, .type = DATA, .rw = READ_SYNC, .blk_addr = dn.data_blkaddr, .page = page, .encrypted_page = NULL, }; err = f2fs_submit_page_bio(&fio); if (err) goto fail; lock_page(page); if (unlikely(!PageUptodate(page))) { err = -EIO; goto fail; } if (unlikely(page->mapping != mapping)) { f2fs_put_page(page, 1); goto repeat; } /* avoid symlink page */ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { err = f2fs_decrypt_one(inode, page); if (err) goto fail; } } out_update: SetPageUptodate(page); out_clear: clear_cold_data(page); return 0; put_fail: f2fs_put_dnode(&dn); unlock_fail: f2fs_unlock_op(sbi); fail: f2fs_put_page(page, 1); f2fs_write_failed(mapping, pos + len); return err; } static int f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; trace_f2fs_write_end(inode, pos, len, copied); set_page_dirty(page); if (pos + copied > i_size_read(inode)) { i_size_write(inode, pos + copied); mark_inode_dirty(inode); update_inode_page(inode); } f2fs_put_page(page, 1); return copied; } static ssize_t check_direct_IO(struct inode *inode, int rw, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; int seg, i; size_t size; unsigned long addr; ssize_t retval = -EINVAL; loff_t end = offset; if (offset & blocksize_mask) return -EINVAL; /* Check the memory alignment. Blocks cannot straddle pages */ for (seg = 0; seg < nr_segs; seg++) { addr = (unsigned long)iov[seg].iov_base; size = iov[seg].iov_len; end += size; if ((addr & blocksize_mask) || (size & blocksize_mask)) goto out; /* If this is a write we don't need to check anymore */ if (rw & WRITE) continue; /* * Check to make sure we don't have duplicate iov_base's in this * iovec, if so return EINVAL, otherwise we'll get csum errors * when reading back. */ for (i = seg + 1; i < nr_segs; i++) { if (iov[seg].iov_base == iov[i].iov_base) goto out; } } retval = 0; out: return retval; }
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t pg_start, pg_end, delta, nrpages, idx; loff_t new_size; int ret; if (!S_ISREG(inode->i_mode)) return -EINVAL; new_size = i_size_read(inode) + len; if (new_size > inode->i_sb->s_maxbytes) return -EFBIG; if (offset >= i_size_read(inode)) return -EINVAL; /* insert range should be aligned to block size of f2fs. */ if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) return -EINVAL; f2fs_balance_fs(sbi); if (f2fs_has_inline_data(inode)) { ret = f2fs_convert_inline_inode(inode); if (ret) return ret; } ret = truncate_blocks(inode, i_size_read(inode), true); if (ret) return ret; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); if (ret) return ret; truncate_pagecache(inode, 0, offset); pg_start = offset >> PAGE_CACHE_SHIFT; pg_end = (offset + len) >> PAGE_CACHE_SHIFT; delta = pg_end - pg_start; nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) { struct dnode_of_data dn; struct page *ipage; block_t new_addr, old_addr; f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = get_dnode_of_data(&dn, idx, LOOKUP_NODE_RA); if (ret && ret != -ENOENT) { goto out; } else if (ret == -ENOENT) { goto next; } else if (dn.data_blkaddr == NULL_ADDR) { f2fs_put_dnode(&dn); goto next; } else { new_addr = dn.data_blkaddr; truncate_data_blocks_range(&dn, 1); f2fs_put_dnode(&dn); } ipage = get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { ret = PTR_ERR(ipage); goto out; } set_new_dnode(&dn, inode, ipage, NULL, 0); ret = f2fs_reserve_block(&dn, idx + delta); if (ret) goto out; old_addr = dn.data_blkaddr; f2fs_bug_on(sbi, old_addr != NEW_ADDR); if (new_addr != NEW_ADDR) { struct node_info ni; get_node_info(sbi, dn.nid, &ni); f2fs_replace_block(sbi, &dn, old_addr, new_addr, ni.version, true); } f2fs_put_dnode(&dn); next: f2fs_unlock_op(sbi); } i_size_write(inode, new_size); return 0; out: f2fs_unlock_op(sbi); return ret; }