mirror of
https://github.com/torvalds/linux.git
synced 2024-12-16 16:12:52 +00:00
ffceeefb33
In __fscrypt_decrypt_bio(), only decrypt the blocks that actually comprise the bio, rather than assuming blocksize == PAGE_SIZE and decrypting the entirety of every page used in the bio. This is in preparation for allowing encryption on ext4 filesystems with blocksize != PAGE_SIZE. This is based on work by Chandan Rajendra. Reviewed-by: Chandan Rajendra <chandan@linux.ibm.com> Signed-off-by: Eric Biggers <ebiggers@google.com>
121 lines
2.8 KiB
C
121 lines
2.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* This contains encryption functions for per-file encryption.
|
|
*
|
|
* Copyright (C) 2015, Google, Inc.
|
|
* Copyright (C) 2015, Motorola Mobility
|
|
*
|
|
* Written by Michael Halcrow, 2014.
|
|
*
|
|
* Filename encryption additions
|
|
* Uday Savagaonkar, 2014
|
|
* Encryption policy handling additions
|
|
* Ildar Muslukhov, 2014
|
|
* Add fscrypt_pullback_bio_page()
|
|
* Jaegeuk Kim, 2015.
|
|
*
|
|
* This has not yet undergone a rigorous security audit.
|
|
*
|
|
* The usage of AES-XTS should conform to recommendations in NIST
|
|
* Special Publication 800-38E and IEEE P1619/D16.
|
|
*/
|
|
|
|
#include <linux/pagemap.h>
|
|
#include <linux/module.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/namei.h>
|
|
#include "fscrypt_private.h"
|
|
|
|
static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
|
|
{
|
|
struct bio_vec *bv;
|
|
struct bvec_iter_all iter_all;
|
|
|
|
bio_for_each_segment_all(bv, bio, iter_all) {
|
|
struct page *page = bv->bv_page;
|
|
int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
|
|
bv->bv_offset);
|
|
if (ret)
|
|
SetPageError(page);
|
|
else if (done)
|
|
SetPageUptodate(page);
|
|
if (done)
|
|
unlock_page(page);
|
|
}
|
|
}
|
|
|
|
void fscrypt_decrypt_bio(struct bio *bio)
|
|
{
|
|
__fscrypt_decrypt_bio(bio, false);
|
|
}
|
|
EXPORT_SYMBOL(fscrypt_decrypt_bio);
|
|
|
|
static void completion_pages(struct work_struct *work)
|
|
{
|
|
struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
|
|
struct bio *bio = ctx->bio;
|
|
|
|
__fscrypt_decrypt_bio(bio, true);
|
|
fscrypt_release_ctx(ctx);
|
|
bio_put(bio);
|
|
}
|
|
|
|
void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
|
|
{
|
|
INIT_WORK(&ctx->work, completion_pages);
|
|
ctx->bio = bio;
|
|
fscrypt_enqueue_decrypt_work(&ctx->work);
|
|
}
|
|
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
|
|
|
|
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
|
sector_t pblk, unsigned int len)
|
|
{
|
|
const unsigned int blockbits = inode->i_blkbits;
|
|
const unsigned int blocksize = 1 << blockbits;
|
|
struct page *ciphertext_page;
|
|
struct bio *bio;
|
|
int ret, err = 0;
|
|
|
|
ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
|
|
if (!ciphertext_page)
|
|
return -ENOMEM;
|
|
|
|
while (len--) {
|
|
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
|
|
ZERO_PAGE(0), ciphertext_page,
|
|
blocksize, 0, GFP_NOFS);
|
|
if (err)
|
|
goto errout;
|
|
|
|
bio = bio_alloc(GFP_NOWAIT, 1);
|
|
if (!bio) {
|
|
err = -ENOMEM;
|
|
goto errout;
|
|
}
|
|
bio_set_dev(bio, inode->i_sb->s_bdev);
|
|
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
|
|
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
|
ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
|
|
if (WARN_ON(ret != blocksize)) {
|
|
/* should never happen! */
|
|
bio_put(bio);
|
|
err = -EIO;
|
|
goto errout;
|
|
}
|
|
err = submit_bio_wait(bio);
|
|
if (err == 0 && bio->bi_status)
|
|
err = -EIO;
|
|
bio_put(bio);
|
|
if (err)
|
|
goto errout;
|
|
lblk++;
|
|
pblk++;
|
|
}
|
|
err = 0;
|
|
errout:
|
|
fscrypt_free_bounce_page(ciphertext_page);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL(fscrypt_zeroout_range);
|