mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
crypto: adiantum - add fast path for single-page messages
When the source scatterlist is a single page, optimize the first hash step of adiantum to use crypto_shash_digest() instead of init/update/final, and use the same local kmap for both hashing the bulk part and loading the narrow part of the source data. Likewise, when the destination scatterlist is a single page, optimize the second hash step of adiantum to use crypto_shash_digest() instead of init/update/final, and use the same local kmap for both hashing the bulk part and storing the narrow part of the destination data. In some cases these optimizations improve performance significantly. Note: ideally, for optimal performance each architecture should implement the full "adiantum(xchacha12,aes)" algorithm and fully optimize the contiguous buffer case to use no indirect calls. That's not something I've gotten around to doing, though. This commit just makes a relatively small change that provides some benefit with the existing template-based approach. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
01aed663e6
commit
dadf5e56c9
@ -245,10 +245,9 @@ static void adiantum_hash_header(struct skcipher_request *req)
|
|||||||
|
|
||||||
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
|
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
|
||||||
static int adiantum_hash_message(struct skcipher_request *req,
|
static int adiantum_hash_message(struct skcipher_request *req,
|
||||||
struct scatterlist *sgl, le128 *digest)
|
struct scatterlist *sgl, unsigned int nents,
|
||||||
|
le128 *digest)
|
||||||
{
|
{
|
||||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
||||||
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
|
||||||
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
|
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
|
||||||
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
|
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
|
||||||
struct shash_desc *hash_desc = &rctx->u.hash_desc;
|
struct shash_desc *hash_desc = &rctx->u.hash_desc;
|
||||||
@ -256,14 +255,11 @@ static int adiantum_hash_message(struct skcipher_request *req,
|
|||||||
unsigned int i, n;
|
unsigned int i, n;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
hash_desc->tfm = tctx->hash;
|
|
||||||
|
|
||||||
err = crypto_shash_init(hash_desc);
|
err = crypto_shash_init(hash_desc);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
sg_miter_start(&miter, sgl, sg_nents(sgl),
|
sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
|
||||||
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
|
|
||||||
for (i = 0; i < bulk_len; i += n) {
|
for (i = 0; i < bulk_len; i += n) {
|
||||||
sg_miter_next(&miter);
|
sg_miter_next(&miter);
|
||||||
n = min_t(unsigned int, miter.length, bulk_len - i);
|
n = min_t(unsigned int, miter.length, bulk_len - i);
|
||||||
@ -285,6 +281,8 @@ static int adiantum_finish(struct skcipher_request *req)
|
|||||||
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
||||||
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
|
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
|
||||||
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
|
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
|
||||||
|
struct scatterlist *dst = req->dst;
|
||||||
|
const unsigned int dst_nents = sg_nents(dst);
|
||||||
le128 digest;
|
le128 digest;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -298,13 +296,30 @@ static int adiantum_finish(struct skcipher_request *req)
|
|||||||
* enc: C_R = C_M - H_{K_H}(T, C_L)
|
* enc: C_R = C_M - H_{K_H}(T, C_L)
|
||||||
* dec: P_R = P_M - H_{K_H}(T, P_L)
|
* dec: P_R = P_M - H_{K_H}(T, P_L)
|
||||||
*/
|
*/
|
||||||
err = adiantum_hash_message(req, req->dst, &digest);
|
rctx->u.hash_desc.tfm = tctx->hash;
|
||||||
if (err)
|
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
|
||||||
return err;
|
if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
|
||||||
le128_add(&digest, &digest, &rctx->header_hash);
|
/* Fast path for single-page destination */
|
||||||
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
|
void *virt = kmap_local_page(sg_page(dst)) + dst->offset;
|
||||||
scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
|
|
||||||
bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
|
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
|
||||||
|
(u8 *)&digest);
|
||||||
|
if (err) {
|
||||||
|
kunmap_local(virt);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
|
||||||
|
memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
|
||||||
|
kunmap_local(virt);
|
||||||
|
} else {
|
||||||
|
/* Slow path that works for any destination scatterlist */
|
||||||
|
err = adiantum_hash_message(req, dst, dst_nents, &digest);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
|
||||||
|
scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
|
||||||
|
bulk_len, sizeof(le128), 1);
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,6 +339,8 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
|
|||||||
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
|
||||||
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
|
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
|
||||||
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
|
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
|
||||||
|
struct scatterlist *src = req->src;
|
||||||
|
const unsigned int src_nents = sg_nents(src);
|
||||||
unsigned int stream_len;
|
unsigned int stream_len;
|
||||||
le128 digest;
|
le128 digest;
|
||||||
int err;
|
int err;
|
||||||
@ -339,12 +356,24 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
|
|||||||
* dec: C_M = C_R + H_{K_H}(T, C_L)
|
* dec: C_M = C_R + H_{K_H}(T, C_L)
|
||||||
*/
|
*/
|
||||||
adiantum_hash_header(req);
|
adiantum_hash_header(req);
|
||||||
err = adiantum_hash_message(req, req->src, &digest);
|
rctx->u.hash_desc.tfm = tctx->hash;
|
||||||
|
if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
|
||||||
|
/* Fast path for single-page source */
|
||||||
|
void *virt = kmap_local_page(sg_page(src)) + src->offset;
|
||||||
|
|
||||||
|
err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
|
||||||
|
(u8 *)&digest);
|
||||||
|
memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
|
||||||
|
kunmap_local(virt);
|
||||||
|
} else {
|
||||||
|
/* Slow path that works for any source scatterlist */
|
||||||
|
err = adiantum_hash_message(req, src, src_nents, &digest);
|
||||||
|
scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
|
||||||
|
bulk_len, sizeof(le128), 0);
|
||||||
|
}
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
le128_add(&digest, &digest, &rctx->header_hash);
|
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
|
||||||
scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
|
|
||||||
bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
|
|
||||||
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
|
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
|
||||||
|
|
||||||
/* If encrypting, encrypt P_M with the block cipher to get C_M */
|
/* If encrypting, encrypt P_M with the block cipher to get C_M */
|
||||||
|
Loading…
Reference in New Issue
Block a user