/* * Write the MMP block using WRITE_SYNC to try to get the block on-disk * faster. */ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) { struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data); /* * We protect against freezing so that we don't create dirty buffers * on frozen filesystem. */ sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); mark_buffer_dirty(bh); lock_buffer(bh); bh->b_end_io = end_buffer_write_sync; get_bh(bh); #ifdef FEATURE_STORAGE_META_LOG if( bh && bh->b_bdev && bh->b_bdev->bd_disk) set_metadata_rw_status(bh->b_bdev->bd_disk->first_minor, WAIT_WRITE_CNT); #endif submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); wait_on_buffer(bh); sb_end_write(sb); if (unlikely(!buffer_uptodate(bh))) return 1; return 0; }
/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */ int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr, struct file *h_file) { int err; struct inode *h_inode; struct super_block *h_sb; if (!h_file) { err = vfsub_truncate(h_path, length); goto out; } h_inode = h_path->dentry->d_inode; h_sb = h_inode->i_sb; lockdep_off(); sb_start_write(h_sb); lockdep_on(); err = locks_verify_truncate(h_inode, h_file, length); if (!err) err = security_path_truncate(h_path); if (!err) { lockdep_off(); err = do_truncate(h_path->dentry, length, attr, h_file); lockdep_on(); } lockdep_off(); sb_end_write(h_sb); lockdep_on(); out: return err; }
int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret; if (offset < 0 || len <= 0) return -EINVAL; /* Return error if mode is not supported */ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; /* Punch hole must have keep size set */ if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; /* It's not possible punch hole on append only file */ if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode)) return -EPERM; if (IS_IMMUTABLE(inode)) return -EPERM; /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. */ ret = security_file_permission(file, MAY_WRITE); if (ret) return ret; if (S_ISFIFO(inode->i_mode)) return -ESPIPE; /* * Let individual file system decide if it supports preallocation * for directories or not. */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) return -EFBIG; if (!file->f_op->fallocate) return -EOPNOTSUPP; sb_start_write(inode->i_sb); ret = file->f_op->fallocate(file, mode, offset, len); sb_end_write(inode->i_sb); return ret; }
int gfs2_statfs_sync(struct super_block *sb, int type) { struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; struct gfs2_holder gh; struct buffer_head *m_bh, *l_bh; int error; sb_start_write(sb); error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &gh); if (error) goto out; error = gfs2_meta_inode_buffer(m_ip, &m_bh); if (error) goto out_unlock; spin_lock(&sdp->sd_statfs_spin); gfs2_statfs_change_in(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) { spin_unlock(&sdp->sd_statfs_spin); goto out_bh; } spin_unlock(&sdp->sd_statfs_spin); error = gfs2_meta_inode_buffer(l_ip, &l_bh); if (error) goto out_bh; error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); if (error) goto out_bh2; update_statfs(sdp, m_bh, l_bh); sdp->sd_statfs_force_sync = 0; gfs2_trans_end(sdp); out_bh2: brelse(l_bh); out_bh: brelse(m_bh); out_unlock: gfs2_glock_dq_uninit(&gh); out: sb_end_write(sb); return error; }
STATIC ssize_t xfs_file_aio_write( struct kiocb *iocb, const struct iovec *iovp, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); ssize_t ret; size_t ocount = 0; XFS_STATS_INC(xs_write_calls); BUG_ON(iocb->ki_pos != pos); ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); if (ret) return ret; if (ocount == 0) return 0; sb_start_write(inode->i_sb); if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { ret = -EIO; goto out; } if (unlikely(file->f_flags & O_DIRECT)) ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); else ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, ocount); if (ret > 0) { ssize_t err; XFS_STATS_ADD(xs_write_bytes, ret); /* Handle various SYNC-type writes */ err = generic_write_sync(file, pos, ret); if (err < 0) ret = err; } out: sb_end_write(inode->i_sb); return ret; }
int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret; if (offset < 0 || len <= 0) return -EINVAL; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode)) return -EPERM; if (IS_IMMUTABLE(inode)) return -EPERM; ret = security_file_permission(file, MAY_WRITE); if (ret) return ret; if (S_ISFIFO(inode->i_mode)) return -ESPIPE; if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -ENODEV; if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) return -EFBIG; if (!file->f_op->fallocate) return -EOPNOTSUPP; sb_start_write(inode->i_sb); ret = file->f_op->fallocate(file, mode, offset, len); sb_end_write(inode->i_sb); return ret; }
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) { struct inode *inode; struct dentry *dentry; struct vfsmount *mnt; struct fd f; int error; error = -EINVAL; if (length < 0) goto out; error = -EBADF; f = fdget(fd); if (!f.file) goto out; /* explicitly opened as large or we are on 64-bit box */ if (f.file->f_flags & O_LARGEFILE) small = 0; dentry = f.file->f_path.dentry; mnt = f.file->f_path.mnt; inode = dentry->d_inode; error = -EINVAL; if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE)) goto out_putf; error = -EINVAL; /* Cannot ftruncate over 2^31 bytes without large file support */ if (small && length > MAX_NON_LFS) goto out_putf; error = -EPERM; if (IS_APPEND(inode)) goto out_putf; sb_start_write(inode->i_sb); error = locks_verify_truncate(inode, f.file, length); if (!error) error = security_path_truncate(&f.file->f_path); if (!error) error = do_truncate2(mnt, dentry, length, ATTR_MTIME|ATTR_CTIME, f.file); sb_end_write(inode->i_sb); out_putf: fdput(f); out: return error; }
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) { struct inode *inode; struct dentry *dentry; struct fd f; int error; error = -EINVAL; if (length < 0) goto out; error = -EBADF; f = fdget(fd); if (!f.file) goto out; if (f.file->f_flags & O_LARGEFILE) small = 0; dentry = f.file->f_path.dentry; inode = dentry->d_inode; error = -EINVAL; if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE)) goto out_putf; error = -EINVAL; if (small && length > MAX_NON_LFS) goto out_putf; error = -EPERM; if (IS_APPEND(inode)) goto out_putf; sb_start_write(inode->i_sb); error = locks_verify_truncate(inode, f.file, length); if (!error) error = security_path_truncate(&f.file->f_path); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file); sb_end_write(inode->i_sb); out_putf: fdput(f); out: return error; }
/** * Open, mmap and mlock the specified file to be able to read and * write to it in softirqs. * Use MAP_SHARED to synchronize the mapping with underlying file. * * The function must not be called from softirq! */ int tdb_file_open(TDB *db, unsigned long size) { unsigned long ret, addr; struct file *filp; struct inode *inode; filp = filp_open(db->path, O_CREAT | O_RDWR, 0600); if (IS_ERR(filp)) { TDB_ERR("Cannot open db file %s\n", db->path); return PTR_ERR(filp); } BUG_ON(!filp || !filp->f_path.dentry); if (!filp->f_op->fallocate) { TDB_ERR("TDB requires filesystem with fallocate support\n"); filp_close(db->filp, NULL); return -EBADF; } /* Allocate continuous extents. */ inode = file_inode(filp); sb_start_write(inode->i_sb); ret = filp->f_op->fallocate(filp, 0, 0, size); sb_end_write(inode->i_sb); if (ret) { TDB_ERR("Cannot fallocate file, %ld\n", ret); filp_close(db->filp, NULL); return ret; } addr = tempesta_map_file(filp, size, db->node); if (IS_ERR((void *)addr)) { TDB_ERR("Cannot map file\n"); filp_close(filp, NULL); return (int)addr; } db->filp = filp; db->hdr = (TdbHdr *)addr; file_accessed(filp); return 0; }
/* * Write the MMP block using WRITE_SYNC to try to get the block on-disk * faster. */ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) { struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data); /* * We protect against freezing so that we don't create dirty buffers * on frozen filesystem. */ sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); mark_buffer_dirty(bh); lock_buffer(bh); bh->b_end_io = end_buffer_write_sync; get_bh(bh); submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); wait_on_buffer(bh); sb_end_write(sb); if (unlikely(!buffer_uptodate(bh))) return 1; return 0; }
int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret; if (offset < 0 || len <= 0) return -EINVAL; /* Return error if mode is not supported */ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) return -EOPNOTSUPP; /* Punch hole and zero range are mutually exclusive */ if ((mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) == (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) return -EOPNOTSUPP; /* Punch hole must have keep size set */ if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; /* Collapse range should only be used exclusively. */ if ((mode & FALLOC_FL_COLLAPSE_RANGE) && (mode & ~FALLOC_FL_COLLAPSE_RANGE)) return -EINVAL; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; /* * We can only allow pure fallocate on append only files */ if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode)) return -EPERM; if (IS_IMMUTABLE(inode)) return -EPERM; /* * We cannot allow any fallocate operation on an active swapfile */ if (IS_SWAPFILE(inode)) return -ETXTBSY; /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. */ ret = security_file_permission(file, MAY_WRITE); if (ret) return ret; if (S_ISFIFO(inode->i_mode)) return -ESPIPE; /* * Let individual file system decide if it supports preallocation * for directories or not. */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) return -EFBIG; if (!file->f_op->fallocate) return -EOPNOTSUPP; sb_start_write(inode->i_sb); ret = file->f_op->fallocate(file, mode, offset, len); sb_end_write(inode->i_sb); return ret; }
/* * Almost copy of generic_file_splice_write() (added changed_begin/end, * tux3_iattrdirty()). */ static ssize_t tux3_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { if(DEBUG_MODE_K==1) { printk(KERN_INFO"%25s %25s %4d #in\n",__FILE__,__func__,__LINE__); } struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; struct sb *sb = tux_sb(inode->i_sb); struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; ssize_t ret; sb_start_write(inode->i_sb); pipe_lock(pipe); splice_from_pipe_begin(&sd); do { ret = splice_from_pipe_next(pipe, &sd); if (ret <= 0) break; mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); /* For each ->write_end() calls change_end(). */ change_begin(sb); /* For timestamp. FIXME: convert this to ->update_time * handler? */ tux3_iattrdirty(inode); ret = file_remove_suid(out); if (!ret) { ret = file_update_time(out); if (!ret) ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file); } change_end_if_needed(sb); mutex_unlock(&inode->i_mutex); } while (ret > 0); splice_from_pipe_end(pipe, &sd); pipe_unlock(pipe); if (sd.num_spliced) ret = sd.num_spliced; if (ret > 0) { int err; err = generic_write_sync(out, *ppos, ret); if (err) ret = err; else *ppos += ret; balance_dirty_pages_ratelimited(mapping); } sb_end_write(inode->i_sb); return ret; }
static inline void dummy(void){ struct inode i; sb_start_write(i.i_sb); sb_end_write(i.i_sb); }