int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct inode *inode = file->f_mapping->host; int err; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); if (!nilfs_inode_dirty(inode)) { mutex_unlock(&inode->i_mutex); return 0; } if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); mutex_unlock(&inode->i_mutex); return err; }
static int nilfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; int err = 0; if (wbc->sync_mode == WB_SYNC_ALL) err = nilfs_construct_dsync_segment(inode->i_sb, inode, wbc->range_start, wbc->range_end); return err; }
static int nilfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; int err = 0; if (inode->i_sb->s_flags & MS_RDONLY) { nilfs_clear_dirty_pages(mapping, false); return -EROFS; } if (wbc->sync_mode == WB_SYNC_ALL) err = nilfs_construct_dsync_segment(inode->i_sb, inode, wbc->range_start, wbc->range_end); return err; }
static int nilfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; int err = 0; #if NEED_WB_SYNC_NONE_CHECK_FOR_DO_SYNC_MAPPING_RANGE if (wbc->sync_mode == WB_SYNC_ALL || (wbc->sync_mode == WB_SYNC_NONE && !current_is_pdflush())) #else if (wbc->sync_mode == WB_SYNC_ALL) #endif err = nilfs_construct_dsync_segment(inode->i_sb, inode, wbc->range_start, wbc->range_end); return err; }
int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct the_nilfs *nilfs; struct inode *inode = file->f_mapping->host; int err; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); if (nilfs_inode_dirty(inode)) { if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); } mutex_unlock(&inode->i_mutex); nilfs = inode->i_sb->s_fs_info; if (!err && nilfs_test_opt(nilfs, BARRIER)) { err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); if (err != -EIO) err = 0; } return err; }
int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int err; err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; mutex_lock(&inode->i_mutex); if (!nilfs_inode_dirty(inode)) { mutex_unlock(&inode->i_mutex); return 0; } if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); mutex_unlock(&inode->i_mutex); return err; }
int nilfs_sync_file(struct file *file, int datasync) { /* * Called from fsync() system call * This is the only entry point that can catch write and synch * timing for both data blocks and intermediate blocks. * * This function should be implemented when the writeback function * will be implemented. */ struct inode *inode = file->f_mapping->host; int err; if (!nilfs_inode_dirty(inode)) return 0; if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, LLONG_MAX); else err = nilfs_construct_segment(inode->i_sb); return err; }