crypto: powerpc/p10-aes-gcm - Register modules as SIMD

This patch is to fix an issue when simd is not usable that data mismatch
may occur. The fix is to register algs as SIMD modules so that the
algorithm is excecuted when SIMD instructions is usable.  Called
gcm_update() to generate the final digest if needed.

A new module rfc4106(gcm(aes)) is also added.

Fixes: cdcecfd999 ("crypto: p10-aes-gcm - Glue code for AES/GCM stitched implementation")

Signed-off-by: Danny Tsen <dtsen@linux.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Danny Tsen 2024-09-23 09:30:39 -04:00 committed by Herbert Xu
parent 7aa747edcb
commit c954b252de

View File

@ -8,6 +8,7 @@
#include <asm/unaligned.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/gcm.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/b128ops.h>
@ -24,6 +25,7 @@
#define PPC_ALIGN 16
#define GCM_IV_SIZE 12
#define RFC4106_NONCE_SIZE 4
MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
@ -40,6 +42,7 @@ asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
unsigned char *aad, unsigned int alen);
asmlinkage void gcm_update(u8 *iv, void *Xi);
struct aes_key {
u8 key[AES_MAX_KEYLENGTH];
@ -52,6 +55,7 @@ struct gcm_ctx {
u8 aad_hash[16];
u64 aadLen;
u64 Plen; /* offset 56 - used in aes_p10_gcm_{en/de}crypt */
u8 pblock[16];
};
struct Hash_ctx {
u8 H[16]; /* subkey */
@ -60,17 +64,20 @@ struct Hash_ctx {
struct p10_aes_gcm_ctx {
struct aes_key enc_key;
u8 nonce[RFC4106_NONCE_SIZE];
};
static void vsx_begin(void)
{
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
}
static void vsx_end(void)
{
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
@ -198,7 +205,8 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
return ret ? -EINVAL : 0;
}
static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
int assoclen, int enc)
{
struct crypto_tfm *tfm = req->base.tfm;
struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
@ -210,7 +218,6 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
struct skcipher_walk walk;
u8 *assocmem = NULL;
u8 *assoc;
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
@ -218,11 +225,12 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
u8 otag[16];
int total_processed = 0;
int nbytes;
memset(databuf, 0, sizeof(databuf));
memset(hashbuf, 0, sizeof(hashbuf));
memset(ivbuf, 0, sizeof(ivbuf));
memcpy(iv, req->iv, GCM_IV_SIZE);
memcpy(iv, riv, GCM_IV_SIZE);
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length) {
@ -257,19 +265,25 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
if (ret)
return ret;
while (walk.nbytes > 0 && ret == 0) {
while ((nbytes = walk.nbytes) > 0 && ret == 0) {
u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
u8 buf[AES_BLOCK_SIZE];
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
src = dst = memcpy(buf, src, nbytes);
vsx_begin();
if (enc)
aes_p10_gcm_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
walk.nbytes,
aes_p10_gcm_encrypt(src, dst, nbytes,
&ctx->enc_key, gctx->iv, hash->Htable);
else
aes_p10_gcm_decrypt(walk.src.virt.addr,
walk.dst.virt.addr,
walk.nbytes,
aes_p10_gcm_decrypt(src, dst, nbytes,
&ctx->enc_key, gctx->iv, hash->Htable);
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
memcpy(walk.dst.virt.addr, buf, nbytes);
vsx_end();
total_processed += walk.nbytes;
@ -281,6 +295,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
/* Finalize hash */
vsx_begin();
gcm_update(gctx->iv, hash->Htable);
finish_tag(gctx, hash, total_processed);
vsx_end();
@ -302,17 +317,63 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
return 0;
}
static int rfc4106_setkey(struct crypto_aead *tfm, const u8 *inkey,
unsigned int keylen)
{
struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
int err;
keylen -= RFC4106_NONCE_SIZE;
err = p10_aes_gcm_setkey(tfm, inkey, keylen);
if (err)
return err;
memcpy(ctx->nonce, inkey + keylen, RFC4106_NONCE_SIZE);
return 0;
}
static int rfc4106_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
return crypto_rfc4106_check_authsize(authsize);
}
static int rfc4106_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 iv[AES_BLOCK_SIZE];
memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
return crypto_ipsec_check_assoclen(req->assoclen) ?:
p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 1);
}
static int rfc4106_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 iv[AES_BLOCK_SIZE];
memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
return crypto_ipsec_check_assoclen(req->assoclen) ?:
p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 0);
}
static int p10_aes_gcm_encrypt(struct aead_request *req)
{
return p10_aes_gcm_crypt(req, 1);
return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 1);
}
static int p10_aes_gcm_decrypt(struct aead_request *req)
{
return p10_aes_gcm_crypt(req, 0);
return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 0);
}
static struct aead_alg gcm_aes_alg = {
static struct aead_alg gcm_aes_algs[] = {{
.ivsize = GCM_IV_SIZE,
.maxauthsize = 16,
@ -321,23 +382,57 @@ static struct aead_alg gcm_aes_alg = {
.encrypt = p10_aes_gcm_encrypt,
.decrypt = p10_aes_gcm_decrypt,
.base.cra_name = "gcm(aes)",
.base.cra_driver_name = "aes_gcm_p10",
.base.cra_name = "__gcm(aes)",
.base.cra_driver_name = "__aes_gcm_p10",
.base.cra_priority = 2100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx),
.base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx)+
4 * sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
};
.base.cra_flags = CRYPTO_ALG_INTERNAL,
}, {
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.setkey = rfc4106_setkey,
.setauthsize = rfc4106_setauthsize,
.encrypt = rfc4106_encrypt,
.decrypt = rfc4106_decrypt,
.base.cra_name = "__rfc4106(gcm(aes))",
.base.cra_driver_name = "__rfc4106_aes_gcm_p10",
.base.cra_priority = 2100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx) +
4 * sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
}};
static struct simd_aead_alg *p10_simd_aeads[ARRAY_SIZE(gcm_aes_algs)];
static int __init p10_init(void)
{
return crypto_register_aead(&gcm_aes_alg);
int ret;
if (!cpu_has_feature(PPC_FEATURE2_ARCH_3_1))
return 0;
ret = simd_register_aeads_compat(gcm_aes_algs,
ARRAY_SIZE(gcm_aes_algs),
p10_simd_aeads);
if (ret) {
simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs),
p10_simd_aeads);
return ret;
}
return 0;
}
static void __exit p10_exit(void)
{
crypto_unregister_aead(&gcm_aes_alg);
simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs),
p10_simd_aeads);
}
module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init);
module_init(p10_init);
module_exit(p10_exit);