static int v9fs_write_end(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { loff_t last_pos = pos + copied; struct inode *inode = page->mapping->host; //p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); if (unlikely(copied < len)) { /* * zero out the rest of the area */ unsigned from = pos & (PAGE_CACHE_SIZE - 1); zero_user(page, from + copied, len - copied); flush_dcache_page(page); } if (!PageUptodate(page)) SetPageUptodate(page); /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold the i_mutex. */ if (last_pos > inode->i_size) { inode_add_bytes(inode, last_pos - inode->i_size); i_size_write(inode, last_pos); } set_page_dirty(page); unlock_page(page); page_cache_release(page); return copied; }
/** * v9fs_file_write - write to a file * @filp: file pointer to write * @data: data buffer to write data from * @count: size of buffer * @offset: offset at which to write data * */ static ssize_t v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; ssize_t retval; loff_t origin; int err = 0; retval = generic_write_checks(iocb, from); if (retval <= 0) return retval; origin = iocb->ki_pos; retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err); if (retval > 0) { struct inode *inode = file_inode(file); loff_t i_size; unsigned long pg_start, pg_end; pg_start = origin >> PAGE_SHIFT; pg_end = (origin + retval - 1) >> PAGE_SHIFT; if (inode->i_mapping && inode->i_mapping->nrpages) invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); iocb->ki_pos += retval; i_size = i_size_read(inode); if (iocb->ki_pos > i_size) { inode_add_bytes(inode, iocb->ki_pos - i_size); i_size_write(inode, iocb->ki_pos); } return retval; } return err; }
ssize_t v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid, const char __user *data, size_t count, loff_t *offset, int invalidate) { int n; loff_t i_size; size_t total = 0; loff_t origin = *offset; unsigned long pg_start, pg_end; p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n", data, (int)count, (int)*offset); do { n = p9_client_write(fid, NULL, data+total, origin+total, count); if (n <= 0) break; count -= n; total += n; } while (count > 0); if (invalidate && (total > 0)) { pg_start = origin >> PAGE_CACHE_SHIFT; pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT; if (inode->i_mapping && inode->i_mapping->nrpages) invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); *offset += total; i_size = i_size_read(inode); if (*offset > i_size) { inode_add_bytes(inode, *offset - i_size); i_size_write(inode, *offset); } }
void nilfs_inode_add_blocks(struct inode *inode, int n) { struct nilfs_root *root = NILFS_I(inode)->i_root; inode_add_bytes(inode, (1 << inode->i_blkbits) * n); if (root) atomic_add(n, &root->blocks_count); }
void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n) { inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); if (NILFS_MDT(bmap->b_inode)) nilfs_mdt_mark_dirty(bmap->b_inode); else mark_inode_dirty(bmap->b_inode); }
/* * hfs_get_block */ int hfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct super_block *sb; u16 dblock, ablock; int res; sb = inode->i_sb; /* Convert inode block to disk allocation block */ ablock = (u32)block / HFS_SB(sb)->fs_div; if (block >= HFS_I(inode)->fs_blocks) { if (block > HFS_I(inode)->fs_blocks || !create) return -EIO; if (ablock >= HFS_I(inode)->alloc_blocks) { res = hfs_extend_file(inode); if (res) return res; } } else create = 0; if (ablock < HFS_I(inode)->first_blocks) { dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock); goto done; } mutex_lock(&HFS_I(inode)->extents_lock); res = hfs_ext_read_extent(inode, ablock); if (!res) dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents, ablock - HFS_I(inode)->cached_start); else { mutex_unlock(&HFS_I(inode)->extents_lock); return -EIO; } mutex_unlock(&HFS_I(inode)->extents_lock); done: map_bh(bh_result, sb, HFS_SB(sb)->fs_start + dblock * HFS_SB(sb)->fs_div + (u32)block % HFS_SB(sb)->fs_div); if (create) { set_buffer_new(bh_result); HFS_I(inode)->phys_size += sb->s_blocksize; HFS_I(inode)->fs_blocks++; inode_add_bytes(inode, sb->s_blocksize); mark_inode_dirty(inode); } return 0; }
/** * v9fs_file_write - write to a file * @filp: file pointer to write * @data: data buffer to write data from * @count: size of buffer * @offset: offset at which to write data * */ static ssize_t v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; ssize_t retval = 0; loff_t origin = iocb->ki_pos; size_t count = iov_iter_count(from); int err = 0; retval = generic_write_checks(file, &origin, &count, 0); if (retval) return retval; iov_iter_truncate(from, count); if (!count) return 0; retval = p9_client_write(file->private_data, origin, from, &err); if (retval > 0) { struct inode *inode = file_inode(file); loff_t i_size; unsigned long pg_start, pg_end; pg_start = origin >> PAGE_CACHE_SHIFT; pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT; if (inode->i_mapping && inode->i_mapping->nrpages) invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); origin += retval; i_size = i_size_read(inode); iocb->ki_pos = origin; if (origin > i_size) { inode_add_bytes(inode, origin - i_size); i_size_write(inode, origin); } return retval; } return err; }
void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n) { inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); }
/* Get a block at iblock for inode, possibly allocating if create */ int hfsplus_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct super_block *sb = inode->i_sb; struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct hfsplus_inode_info *hip = HFSPLUS_I(inode); int res = -EIO; u32 ablock, dblock, mask; sector_t sector; int was_dirty = 0; /* Convert inode block to disk allocation block */ ablock = iblock >> sbi->fs_shift; if (iblock >= hip->fs_blocks) { if (iblock > hip->fs_blocks || !create) return -EIO; if (ablock >= hip->alloc_blocks) { res = hfsplus_file_extend(inode, false); if (res) return res; } } else create = 0; if (ablock < hip->first_blocks) { dblock = hfsplus_ext_find_block(hip->first_extents, ablock); goto done; } if (inode->i_ino == HFSPLUS_EXT_CNID) return -EIO; mutex_lock(&hip->extents_lock); /* * hfsplus_ext_read_extent will write out a cached extent into * the extents btree. In that case we may have to mark the inode * dirty even for a pure read of an extent here. */ was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY); res = hfsplus_ext_read_extent(inode, ablock); if (res) { mutex_unlock(&hip->extents_lock); return -EIO; } dblock = hfsplus_ext_find_block(hip->cached_extents, ablock - hip->cached_start); mutex_unlock(&hip->extents_lock); done: hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); mask = (1 << sbi->fs_shift) - 1; sector = ((sector_t)dblock << sbi->fs_shift) + sbi->blockoffset + (iblock & mask); map_bh(bh_result, sb, sector); if (create) { set_buffer_new(bh_result); hip->phys_size += sb->s_blocksize; hip->fs_blocks++; inode_add_bytes(inode, sb->s_blocksize); } if (create || was_dirty) mark_inode_dirty(inode); return 0; }
static int vzquota_alloc_space2(struct inode *inode, qsize_t number, int prealloc) { inode_add_bytes(inode, number); return QUOTA_OK; }
/* Get a block at iblock for inode, possibly allocating if create */ int hfsplus_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct super_block *sb; int res = -EIO; u32 ablock, dblock, mask; int shift; hfsplus_handle_t *hfsplus_handle, tmp_hfsplus_handle; tmp_hfsplus_handle.journaled = !HFSPLUS_JOURNAL_PRESENT; tmp_hfsplus_handle.handle = NULL; sb = inode->i_sb; /* Journal device */ if (HFSPLUS_SB(sb).jnl.journaled == HFSPLUS_JOURNAL_PRESENT) { /* Write Metadata */ if (((inode->i_mapping->a_ops == &hfsplus_journalled_btree_aops) || (inode->i_mapping->a_ops == &hfsplus_journalled_aops)) && create) { hfsplus_handle = hfsplus_jbd_current_handle(); if (hfsplus_handle == NULL) { printk("hfsplus_handle is NULL\n"); hfsplus_handle = &tmp_hfsplus_handle; } } else { hfsplus_handle = &tmp_hfsplus_handle; } } /* Non-journal device */ else { hfsplus_handle = &tmp_hfsplus_handle; } /* Convert inode block to disk allocation block */ shift = HFSPLUS_SB(sb).alloc_blksz_shift - sb->s_blocksize_bits; ablock = iblock >> HFSPLUS_SB(sb).fs_shift; if (iblock >= HFSPLUS_I(inode).fs_blocks) { if (iblock > HFSPLUS_I(inode).fs_blocks || !create) { return -EIO; } if (ablock >= HFSPLUS_I(inode).alloc_blocks) { res = hfsplus_file_extend(hfsplus_handle, inode); if (res) { return res; } } } else create = 0; if (ablock < HFSPLUS_I(inode).first_blocks) { dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).first_extents, ablock); goto done; } mutex_lock(&HFSPLUS_I(inode).extents_lock); res = hfsplus_ext_read_extent(hfsplus_handle, inode, ablock); if (!res) { dblock = hfsplus_ext_find_block(HFSPLUS_I(inode).cached_extents, ablock - HFSPLUS_I(inode).cached_start); } else { mutex_unlock(&HFSPLUS_I(inode).extents_lock); return -EIO; } mutex_unlock(&HFSPLUS_I(inode).extents_lock); done: dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); mask = (1 << HFSPLUS_SB(sb).fs_shift) - 1; map_bh(bh_result, sb, (dblock << HFSPLUS_SB(sb).fs_shift) + HFSPLUS_SB(sb).blockoffset + (iblock & mask)); if (create) { set_buffer_new(bh_result); HFSPLUS_I(inode).phys_size += sb->s_blocksize; HFSPLUS_I(inode).fs_blocks++; inode_add_bytes(inode, sb->s_blocksize); if (hfsplus_journalled_mark_inode_dirty(__FUNCTION__, hfsplus_handle, inode)) { printk("HFS+-fs: Error in %s()\n", __FUNCTION__); return -1; } } return 0; }