int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct inode *inode = file->f_mapping->host; int err; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); if (!nilfs_inode_dirty(inode)) { mutex_unlock(&inode->i_mutex); return 0; } if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); mutex_unlock(&inode->i_mutex); return err; }
static int nilfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; int err; if (inode->i_sb->s_flags & MS_RDONLY) { /* * It means that filesystem was remounted in read-only * mode because of error or metadata corruption. But we * have dirty pages that try to be flushed in background. * So, here we simply discard this dirty page. */ nilfs_clear_dirty_page(page, false); unlock_page(page); return -EROFS; } redirty_page_for_writepage(wbc, page); unlock_page(page); if (wbc->sync_mode == WB_SYNC_ALL) { err = nilfs_construct_segment(inode->i_sb); if (unlikely(err)) return err; } else if (wbc->for_reclaim) nilfs_flush_segment(inode->i_sb, inode->i_ino); return 0; }
static int nilfs_sync_fs(struct super_block *sb, int wait) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int err = 0; /* This function is called when super block should be written back */ if (wait) err = nilfs_construct_segment(sb); down_write(&nilfs->ns_sem); if (nilfs_sb_dirty(nilfs)) { sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs)); if (likely(sbp)) { nilfs_set_log_cursor(sbp[0], nilfs); nilfs_commit_super(sb, NILFS_SB_COMMIT); } } up_write(&nilfs->ns_sem); if (!err) err = nilfs_flush_device(nilfs); return err; }
static int nilfs_sync_fs(struct super_block *sb, int wait) { int err = 0; /* This function is called when super block should be written back */ if (wait) err = nilfs_construct_segment(sb); return err; }
static int nilfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; int err; redirty_page_for_writepage(wbc, page); unlock_page(page); if (wbc->sync_mode == WB_SYNC_ALL) { err = nilfs_construct_segment(inode->i_sb); if (unlikely(err)) return err; } else if (wbc->for_reclaim) nilfs_flush_segment(inode->i_sb, inode->i_ino); return 0; }
static int nilfs_sync_fs(struct super_block *sb, int wait) { struct nilfs_sb_info *sbi = NILFS_SB(sb); struct the_nilfs *nilfs = sbi->s_nilfs; int err = 0; /* This function is called when super block should be written back */ if (wait) err = nilfs_construct_segment(sb); down_write(&nilfs->ns_sem); if (nilfs_sb_dirty(nilfs)) nilfs_commit_super(sbi, 1); up_write(&nilfs->ns_sem); return err; }
static int nilfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; int err; page_debug(3, "called (page=%p, index=%lu, wbc nonblocking %d, " "wbc for_reclaim %d)\n", page, page->index, wbc->nonblocking, wbc->for_reclaim); redirty_page_for_writepage(wbc, page); unlock_page(page); if (wbc->sync_mode == WB_SYNC_ALL) { err = nilfs_construct_segment(inode->i_sb); if (unlikely(err)) return err; } else if (wbc->for_reclaim) nilfs_flush_segment(inode->i_sb, inode->i_ino); return 0; }
int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct the_nilfs *nilfs; struct inode *inode = file->f_mapping->host; int err; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); if (nilfs_inode_dirty(inode)) { if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); } mutex_unlock(&inode->i_mutex); nilfs = inode->i_sb->s_fs_info; if (!err && nilfs_test_opt(nilfs, BARRIER)) { err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); if (err != -EIO) err = 0; } return err; }
int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int err; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); if (!nilfs_inode_dirty(inode)) { mutex_unlock(&inode->i_mutex); return 0; } if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); mutex_unlock(&inode->i_mutex); return err; }
int nilfs_sync_file(struct file *file, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct inode *inode = file->f_mapping->host; int err; if (!nilfs_inode_dirty(inode)) return 0; if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); return err; }
/** * nilfs_resize_fs - resize the filesystem * @sb: super block instance * @newsize: new size of the filesystem (in bytes) */ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; __u64 devsize, newnsegs; loff_t sb2off; int ret; ret = -ERANGE; devsize = i_size_read(sb->s_bdev->bd_inode); if (newsize > devsize) goto out; /* * Write lock is required to protect some functions depending * on the number of segments, the number of reserved segments, * and so forth. */ down_write(&nilfs->ns_segctor_sem); sb2off = NILFS_SB2_OFFSET_BYTES(newsize); newnsegs = sb2off >> nilfs->ns_blocksize_bits; do_div(newnsegs, nilfs->ns_blocks_per_segment); ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs); up_write(&nilfs->ns_segctor_sem); if (ret < 0) goto out; ret = nilfs_construct_segment(sb); if (ret < 0) goto out; down_write(&nilfs->ns_sem); nilfs_move_2nd_super(sb, sb2off); ret = -EIO; sbp = nilfs_prepare_super(sb, 0); if (likely(sbp)) { nilfs_set_log_cursor(sbp[0], nilfs); /* * Drop NILFS_RESIZE_FS flag for compatibility with * mount-time resize which may be implemented in a * future release. */ sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_RESIZE_FS); sbp[0]->s_dev_size = cpu_to_le64(newsize); sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments); if (sbp[1]) memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); } up_write(&nilfs->ns_sem); /* * Reset the range of allocatable segments last. This order * is important in the case of expansion because the secondary * superblock must be protected from log write until migration * completes. */ if (!ret) nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1); out: return ret; }