/* * Migrate a simple extent-based inode to use the i_blocks[] array */ int ext4_ind_migrate(struct inode *inode) { struct ext4_extent_header *eh; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent *ex; unsigned int i, len; ext4_fsblk_t blk; handle_t *handle; int ret; if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_INCOMPAT_EXTENTS) || (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EINVAL; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) return -EOPNOTSUPP; handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) return PTR_ERR(handle); down_write(&EXT4_I(inode)->i_data_sem); ret = ext4_ext_check_inode(inode); if (ret) goto errout; eh = ext_inode_hdr(inode); ex = EXT_FIRST_EXTENT(eh); if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS || eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) { ret = -EOPNOTSUPP; goto errout; } if (eh->eh_entries == 0) blk = len = 0; else { len = le16_to_cpu(ex->ee_len); blk = ext4_ext_pblock(ex); if (len > EXT4_NDIR_BLOCKS) { ret = -EOPNOTSUPP; goto errout; } } ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); memset(ei->i_data, 0, sizeof(ei->i_data)); for (i=0; i < len; i++) ei->i_data[i] = cpu_to_le32(blk++); ext4_mark_inode_dirty(handle, inode); errout: ext4_journal_stop(handle); up_write(&EXT4_I(inode)->i_data_sem); return ret; }
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) { struct ext4_crypto_ctx *ctx; struct page *ciphertext_page = NULL; struct bio *bio; ext4_lblk_t lblk = ex->ee_block; ext4_fsblk_t pblk = ext4_ext_pblock(ex); unsigned int len = ext4_ext_get_actual_len(ex); int err = 0; BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); ctx = ext4_get_crypto_ctx(inode); if (IS_ERR(ctx)) return PTR_ERR(ctx); ciphertext_page = alloc_bounce_page(ctx); if (IS_ERR(ciphertext_page)) { err = PTR_ERR(ciphertext_page); goto errout; } while (len--) { err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, ZERO_PAGE(0), ciphertext_page); if (err) goto errout; bio = bio_alloc(GFP_KERNEL, 1); if (!bio) { err = -ENOMEM; goto errout; } bio->bi_bdev = inode->i_sb->s_bdev; bio->bi_sector = pblk; err = bio_add_page(bio, ciphertext_page, inode->i_sb->s_blocksize, 0); if (err) { bio_put(bio); goto errout; } err = submit_bio_wait(WRITE, bio); bio_put(bio); if (err) goto errout; } err = 0; errout: ext4_release_crypto_ctx(ctx); return err; }
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) { struct ext4_crypto_ctx *ctx; struct page *ciphertext_page = NULL; struct bio *bio; ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); ext4_fsblk_t pblk = ext4_ext_pblock(ex); unsigned int len = ext4_ext_get_actual_len(ex); int ret, err = 0; #if 0 ext4_msg(inode->i_sb, KERN_CRIT, "ext4_encrypted_zeroout ino %lu lblk %u len %u", (unsigned long) inode->i_ino, lblk, len); #endif BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); ctx = ext4_get_crypto_ctx(inode); if (IS_ERR(ctx)) return PTR_ERR(ctx); ciphertext_page = alloc_bounce_page(ctx); if (IS_ERR(ciphertext_page)) { err = PTR_ERR(ciphertext_page); goto errout; } while (len--) { err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, ZERO_PAGE(0), ciphertext_page); if (err) goto errout; bio = bio_alloc(GFP_KERNEL, 1); if (!bio) { err = -ENOMEM; goto errout; } bio->bi_bdev = inode->i_sb->s_bdev; bio->bi_iter.bi_sector = pblk << (inode->i_sb->s_blocksize_bits - 9); ret = bio_add_page(bio, ciphertext_page, inode->i_sb->s_blocksize, 0); if (ret != inode->i_sb->s_blocksize) { /* should never happen! */ ext4_msg(inode->i_sb, KERN_ERR, "bio_add_page failed: %d", ret); WARN_ON(1); bio_put(bio); err = -EIO; goto errout; } err = submit_bio_wait(WRITE, bio); if ((err == 0) && bio->bi_error) err = -EIO; bio_put(bio); if (err) goto errout; lblk++; pblk++; } err = 0; errout: ext4_release_crypto_ctx(ctx); return err; }
/* * Migrate a simple extent-based inode to use the i_blocks[] array */ int ext4_ind_migrate(struct inode *inode) { struct ext4_extent_header *eh; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent *ex; unsigned int i, len; ext4_lblk_t start, end; ext4_fsblk_t blk; handle_t *handle; int ret; if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_INCOMPAT_EXTENTS) || (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EINVAL; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) return -EOPNOTSUPP; /* * In order to get correct extent info, force all delayed allocation * blocks to be allocated, otherwise delayed allocation blocks may not * be reflected and bypass the checks on extent header. */ if (test_opt(inode->i_sb, DELALLOC)) ext4_alloc_da_blocks(inode); handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) return PTR_ERR(handle); down_write(&EXT4_I(inode)->i_data_sem); ret = ext4_ext_check_inode(inode); if (ret) goto errout; eh = ext_inode_hdr(inode); ex = EXT_FIRST_EXTENT(eh); if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS || eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) { ret = -EOPNOTSUPP; goto errout; } if (eh->eh_entries == 0) blk = len = start = end = 0; else { len = le16_to_cpu(ex->ee_len); blk = ext4_ext_pblock(ex); start = le32_to_cpu(ex->ee_block); end = start + len - 1; if (end >= EXT4_NDIR_BLOCKS) { ret = -EOPNOTSUPP; goto errout; } } ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); memset(ei->i_data, 0, sizeof(ei->i_data)); for (i = start; i <= end; i++) ei->i_data[i] = cpu_to_le32(blk++); ext4_mark_inode_dirty(handle, inode); errout: ext4_journal_stop(handle); up_write(&EXT4_I(inode)->i_data_sem); return ret; }