Пример #1
0
/**
 * ext4_encrypt() - Encrypts a page
 * @inode:          The inode for which the encryption should take place
 * @plaintext_page: The page to encrypt. Must be locked.
 *
 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
 * encryption context.
 *
 * Called on the page write path.  The caller must call
 * ext4_restore_control_page() on the returned ciphertext page to
 * release the bounce buffer and the encryption context.
 *
 * Return: An allocated page with the encrypted content on success. Else, an
 * error value or NULL.
 */
struct page *ext4_encrypt(struct inode *inode,
			  struct page *plaintext_page)
{
	struct ext4_crypto_ctx *ctx;
	struct page *ciphertext_page = NULL;
	int err;

	BUG_ON(!PageLocked(plaintext_page));

	ctx = ext4_get_crypto_ctx(inode);
	if (IS_ERR(ctx))
		return (struct page *) ctx;

	/* The encryption operation will require a bounce page. */
	ciphertext_page = alloc_bounce_page(ctx);
	if (IS_ERR(ciphertext_page))
		goto errout;
	ctx->w.control_page = plaintext_page;
	err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
			       plaintext_page, ciphertext_page);
	if (err) {
		ciphertext_page = ERR_PTR(err);
	errout:
		ext4_release_crypto_ctx(ctx);
		return ciphertext_page;
	}
	SetPagePrivate(ciphertext_page);
	set_page_private(ciphertext_page, (unsigned long)ctx);
	lock_page(ciphertext_page);
	return ciphertext_page;
}
Пример #2
0
void ext4_restore_control_page(struct page *data_page)
{
	struct ext4_crypto_ctx *ctx =
		(struct ext4_crypto_ctx *)page_private(data_page);

	set_page_private(data_page, (unsigned long)NULL);
	ClearPagePrivate(data_page);
	unlock_page(data_page);
	ext4_release_crypto_ctx(ctx);
}
Пример #3
0
/*
 * Convenience function which takes care of allocating and
 * deallocating the encryption context
 */
int ext4_decrypt_one(struct inode *inode, struct page *page)
{
	int ret;

	struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);

	if (!ctx)
		return -ENOMEM;
	ret = ext4_decrypt(ctx, page);
	ext4_release_crypto_ctx(ctx);
	return ret;
}
Пример #4
0
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
{
	struct ext4_crypto_ctx	*ctx;
	struct page		*ciphertext_page = NULL;
	struct bio		*bio;
	ext4_lblk_t		lblk = ex->ee_block;
	ext4_fsblk_t		pblk = ext4_ext_pblock(ex);
	unsigned int		len = ext4_ext_get_actual_len(ex);
	int			err = 0;

	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);

	ctx = ext4_get_crypto_ctx(inode);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	ciphertext_page = alloc_bounce_page(ctx);
	if (IS_ERR(ciphertext_page)) {
		err = PTR_ERR(ciphertext_page);
		goto errout;
	}

	while (len--) {
		err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
				       ZERO_PAGE(0), ciphertext_page);
		if (err)
			goto errout;

		bio = bio_alloc(GFP_KERNEL, 1);
		if (!bio) {
			err = -ENOMEM;
			goto errout;
		}
		bio->bi_bdev = inode->i_sb->s_bdev;
		bio->bi_sector = pblk;
		err = bio_add_page(bio, ciphertext_page,
				   inode->i_sb->s_blocksize, 0);
		if (err) {
			bio_put(bio);
			goto errout;
		}
		err = submit_bio_wait(WRITE, bio);
		bio_put(bio);
		if (err)
			goto errout;
	}
	err = 0;
errout:
	ext4_release_crypto_ctx(ctx);
	return err;
}
Пример #5
0
/**
 * ext4_get_crypto_ctx() - Gets an encryption context
 * @inode:       The inode for which we are doing the crypto
 *
 * Allocates and initializes an encryption context.
 *
 * Return: An allocated and initialized encryption context on success; error
 * value or NULL otherwise.
 */
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
{
	struct ext4_crypto_ctx *ctx = NULL;
	int res = 0;
	unsigned long flags;
	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;

	if (ci == NULL)
		return ERR_PTR(-ENOKEY);

	/*
	 * We first try getting the ctx from a free list because in
	 * the common case the ctx will have an allocated and
	 * initialized crypto tfm, so it's probably a worthwhile
	 * optimization. For the bounce page, we first try getting it
	 * from the kernel allocator because that's just about as fast
	 * as getting it from a list and because a cache of free pages
	 * should generally be a "last resort" option for a filesystem
	 * to be able to do its job.
	 */
	spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
	ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
				       struct ext4_crypto_ctx, free_list);
	if (ctx)
		list_del(&ctx->free_list);
	spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
	if (!ctx) {
		ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
		if (!ctx) {
			res = -ENOMEM;
			goto out;
		}
		ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
	} else {
		ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
	}
	ctx->flags &= ~EXT4_WRITE_PATH_FL;

out:
	if (res) {
		if (!IS_ERR_OR_NULL(ctx))
			ext4_release_crypto_ctx(ctx);
		ctx = ERR_PTR(res);
	}
	return ctx;
}
Пример #6
0
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
{
	struct ext4_crypto_ctx	*ctx;
	struct page		*ciphertext_page = NULL;
	struct bio		*bio;
	ext4_lblk_t		lblk = le32_to_cpu(ex->ee_block);
	ext4_fsblk_t		pblk = ext4_ext_pblock(ex);
	unsigned int		len = ext4_ext_get_actual_len(ex);
	int			ret, err = 0;

#if 0
	ext4_msg(inode->i_sb, KERN_CRIT,
		 "ext4_encrypted_zeroout ino %lu lblk %u len %u",
		 (unsigned long) inode->i_ino, lblk, len);
#endif

	BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);

	ctx = ext4_get_crypto_ctx(inode);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	ciphertext_page = alloc_bounce_page(ctx);
	if (IS_ERR(ciphertext_page)) {
		err = PTR_ERR(ciphertext_page);
		goto errout;
	}

	while (len--) {
		err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
				       ZERO_PAGE(0), ciphertext_page);
		if (err)
			goto errout;

		bio = bio_alloc(GFP_KERNEL, 1);
		if (!bio) {
			err = -ENOMEM;
			goto errout;
		}
		bio->bi_bdev = inode->i_sb->s_bdev;
		bio->bi_iter.bi_sector =
			pblk << (inode->i_sb->s_blocksize_bits - 9);
		ret = bio_add_page(bio, ciphertext_page,
				   inode->i_sb->s_blocksize, 0);
		if (ret != inode->i_sb->s_blocksize) {
			/* should never happen! */
			ext4_msg(inode->i_sb, KERN_ERR,
				 "bio_add_page failed: %d", ret);
			WARN_ON(1);
			bio_put(bio);
			err = -EIO;
			goto errout;
		}
		err = submit_bio_wait(WRITE, bio);
		if ((err == 0) && bio->bi_error)
			err = -EIO;
		bio_put(bio);
		if (err)
			goto errout;
		lblk++; pblk++;
	}
	err = 0;
errout:
	ext4_release_crypto_ctx(ctx);
	return err;
}