mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (125 commits) [CRYPTO] twofish: Merge common glue code [CRYPTO] hifn_795x: Fixup container_of() usage [CRYPTO] cast6: inline bloat-- [CRYPTO] api: Set default CRYPTO_MINALIGN to unsigned long long [CRYPTO] tcrypt: Make xcbc available as a standalone test [CRYPTO] xcbc: Remove bogus hash/cipher test [CRYPTO] xcbc: Fix algorithm leak when block size check fails [CRYPTO] tcrypt: Zero axbuf in the right function [CRYPTO] padlock: Only reset the key once for each CBC and ECB operation [CRYPTO] api: Include sched.h for cond_resched in scatterwalk.h [CRYPTO] salsa20-asm: Remove unnecessary dependency on CRYPTO_SALSA20 [CRYPTO] tcrypt: Add select of AEAD [CRYPTO] salsa20: Add x86-64 assembly version [CRYPTO] salsa20_i586: Salsa20 stream cipher algorithm (i586 version) [CRYPTO] gcm: Introduce rfc4106 [CRYPTO] api: Show async type [CRYPTO] chainiv: Avoid lock spinning where possible [CRYPTO] seqiv: Add select AEAD in Kconfig [CRYPTO] scatterwalk: Handle zero nbytes in scatterwalk_map_and_copy [CRYPTO] null: Allow setkey on digest_null ...
This commit is contained in:
commit
eba0e319c1
@ -33,9 +33,16 @@ The idea is to make the user interface and algorithm registration API
|
||||
very simple, while hiding the core logic from both. Many good ideas
|
||||
from existing APIs such as Cryptoapi and Nettle have been adapted for this.
|
||||
|
||||
The API currently supports three types of transforms: Ciphers, Digests and
|
||||
Compressors. The compression algorithms especially seem to be performing
|
||||
very well so far.
|
||||
The API currently supports five main types of transforms: AEAD (Authenticated
|
||||
Encryption with Associated Data), Block Ciphers, Ciphers, Compressors and
|
||||
Hashes.
|
||||
|
||||
Please note that Block Ciphers is somewhat of a misnomer. It is in fact
|
||||
meant to support all ciphers including stream ciphers. The difference
|
||||
between Block Ciphers and Ciphers is that the latter operates on exactly
|
||||
one block while the former can operate on an arbitrary amount of data,
|
||||
subject to block size requirements (i.e., non-stream ciphers can only
|
||||
process multiples of blocks).
|
||||
|
||||
Support for hardware crypto devices via an asynchronous interface is
|
||||
under development.
|
||||
@ -69,29 +76,12 @@ Here's an example of how to use the API:
|
||||
Many real examples are available in the regression test module (tcrypt.c).
|
||||
|
||||
|
||||
CONFIGURATION NOTES
|
||||
|
||||
As Triple DES is part of the DES module, for those using modular builds,
|
||||
add the following line to /etc/modprobe.conf:
|
||||
|
||||
alias des3_ede des
|
||||
|
||||
The Null algorithms reside in the crypto_null module, so these lines
|
||||
should also be added:
|
||||
|
||||
alias cipher_null crypto_null
|
||||
alias digest_null crypto_null
|
||||
alias compress_null crypto_null
|
||||
|
||||
The SHA384 algorithm shares code within the SHA512 module, so you'll
|
||||
also need:
|
||||
alias sha384 sha512
|
||||
|
||||
|
||||
DEVELOPER NOTES
|
||||
|
||||
Transforms may only be allocated in user context, and cryptographic
|
||||
methods may only be called from softirq and user contexts.
|
||||
methods may only be called from softirq and user contexts. For
|
||||
transforms with a setkey method it too should only be called from
|
||||
user context.
|
||||
|
||||
When using the API for ciphers, performance will be optimal if each
|
||||
scatterlist contains data which is a multiple of the cipher's block
|
||||
@ -130,8 +120,9 @@ might already be working on.
|
||||
BUGS
|
||||
|
||||
Send bug reports to:
|
||||
Herbert Xu <herbert@gondor.apana.org.au>
|
||||
Cc: David S. Miller <davem@redhat.com>
|
||||
linux-crypto@vger.kernel.org
|
||||
Cc: Herbert Xu <herbert@gondor.apana.org.au>,
|
||||
David S. Miller <davem@redhat.com>
|
||||
|
||||
|
||||
FURTHER INFORMATION
|
||||
|
@ -6,6 +6,7 @@
|
||||
* s390 Version:
|
||||
* Copyright IBM Corp. 2005,2007
|
||||
* Author(s): Jan Glauber (jang@de.ibm.com)
|
||||
* Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
|
||||
*
|
||||
* Derived from "crypto/aes_generic.c"
|
||||
*
|
||||
@ -16,17 +17,13 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include "crypt_s390.h"
|
||||
|
||||
#define AES_MIN_KEY_SIZE 16
|
||||
#define AES_MAX_KEY_SIZE 32
|
||||
|
||||
/* data block size for all key lengths */
|
||||
#define AES_BLOCK_SIZE 16
|
||||
|
||||
#define AES_KEYLEN_128 1
|
||||
#define AES_KEYLEN_192 2
|
||||
#define AES_KEYLEN_256 4
|
||||
@ -39,45 +36,89 @@ struct s390_aes_ctx {
|
||||
long enc;
|
||||
long dec;
|
||||
int key_len;
|
||||
union {
|
||||
struct crypto_blkcipher *blk;
|
||||
struct crypto_cipher *cip;
|
||||
} fallback;
|
||||
};
|
||||
|
||||
/*
|
||||
* Check if the key_len is supported by the HW.
|
||||
* Returns 0 if it is, a positive number if it is not and software fallback is
|
||||
* required or a negative number in case the key size is not valid
|
||||
*/
|
||||
static int need_fallback(unsigned int key_len)
|
||||
{
|
||||
switch (key_len) {
|
||||
case 16:
|
||||
if (!(keylen_flag & AES_KEYLEN_128))
|
||||
return 1;
|
||||
break;
|
||||
case 24:
|
||||
if (!(keylen_flag & AES_KEYLEN_192))
|
||||
return 1;
|
||||
break;
|
||||
case 32:
|
||||
if (!(keylen_flag & AES_KEYLEN_256))
|
||||
return 1;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
|
||||
if (ret) {
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
int ret;
|
||||
|
||||
switch (key_len) {
|
||||
case 16:
|
||||
if (!(keylen_flag & AES_KEYLEN_128))
|
||||
goto fail;
|
||||
break;
|
||||
case 24:
|
||||
if (!(keylen_flag & AES_KEYLEN_192))
|
||||
goto fail;
|
||||
|
||||
break;
|
||||
case 32:
|
||||
if (!(keylen_flag & AES_KEYLEN_256))
|
||||
goto fail;
|
||||
break;
|
||||
default:
|
||||
goto fail;
|
||||
break;
|
||||
ret = need_fallback(key_len);
|
||||
if (ret < 0) {
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sctx->key_len = key_len;
|
||||
memcpy(sctx->key, in_key, key_len);
|
||||
return 0;
|
||||
fail:
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
if (!ret) {
|
||||
memcpy(sctx->key, in_key, key_len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return setkey_fallback_cip(tfm, in_key, key_len);
|
||||
}
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (unlikely(need_fallback(sctx->key_len))) {
|
||||
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (sctx->key_len) {
|
||||
case 16:
|
||||
crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
|
||||
@ -98,6 +139,11 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (unlikely(need_fallback(sctx->key_len))) {
|
||||
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (sctx->key_len) {
|
||||
case 16:
|
||||
crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
|
||||
@ -114,6 +160,29 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
}
|
||||
}
|
||||
|
||||
static int fallback_init_cip(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *name = tfm->__crt_alg->cra_name;
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
sctx->fallback.cip = crypto_alloc_cipher(name, 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
|
||||
if (IS_ERR(sctx->fallback.cip)) {
|
||||
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
|
||||
return PTR_ERR(sctx->fallback.blk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fallback_exit_cip(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_cipher(sctx->fallback.cip);
|
||||
sctx->fallback.cip = NULL;
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
@ -125,6 +194,8 @@ static struct crypto_alg aes_alg = {
|
||||
.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
||||
.cra_init = fallback_init_cip,
|
||||
.cra_exit = fallback_exit_cip,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
@ -136,10 +207,70 @@ static struct crypto_alg aes_alg = {
|
||||
}
|
||||
};
|
||||
|
||||
static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
unsigned int ret;
|
||||
|
||||
sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
|
||||
if (ret) {
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fallback_blk_dec(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
unsigned int ret;
|
||||
struct crypto_blkcipher *tfm;
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
tfm = desc->tfm;
|
||||
desc->tfm = sctx->fallback.blk;
|
||||
|
||||
ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
|
||||
|
||||
desc->tfm = tfm;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fallback_blk_enc(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
unsigned int ret;
|
||||
struct crypto_blkcipher *tfm;
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
tfm = desc->tfm;
|
||||
desc->tfm = sctx->fallback.blk;
|
||||
|
||||
ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
|
||||
|
||||
desc->tfm = tfm;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
ret = need_fallback(key_len);
|
||||
if (ret > 0) {
|
||||
sctx->key_len = key_len;
|
||||
return setkey_fallback_blk(tfm, in_key, key_len);
|
||||
}
|
||||
|
||||
switch (key_len) {
|
||||
case 16:
|
||||
@ -188,6 +319,9 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(need_fallback(sctx->key_len)))
|
||||
return fallback_blk_enc(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
|
||||
}
|
||||
@ -199,10 +333,37 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(need_fallback(sctx->key_len)))
|
||||
return fallback_blk_dec(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
|
||||
}
|
||||
|
||||
static int fallback_init_blk(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *name = tfm->__crt_alg->cra_name;
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
|
||||
if (IS_ERR(sctx->fallback.blk)) {
|
||||
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
|
||||
return PTR_ERR(sctx->fallback.blk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fallback_exit_blk(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_blkcipher(sctx->fallback.blk);
|
||||
sctx->fallback.blk = NULL;
|
||||
}
|
||||
|
||||
static struct crypto_alg ecb_aes_alg = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-s390",
|
||||
@ -214,6 +375,8 @@ static struct crypto_alg ecb_aes_alg = {
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
|
||||
.cra_init = fallback_init_blk,
|
||||
.cra_exit = fallback_exit_blk,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
@ -229,6 +392,13 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
ret = need_fallback(key_len);
|
||||
if (ret > 0) {
|
||||
sctx->key_len = key_len;
|
||||
return setkey_fallback_blk(tfm, in_key, key_len);
|
||||
}
|
||||
|
||||
switch (key_len) {
|
||||
case 16:
|
||||
@ -283,6 +453,9 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(need_fallback(sctx->key_len)))
|
||||
return fallback_blk_enc(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
|
||||
}
|
||||
@ -294,6 +467,9 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
if (unlikely(need_fallback(sctx->key_len)))
|
||||
return fallback_blk_dec(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
|
||||
}
|
||||
@ -309,6 +485,8 @@ static struct crypto_alg cbc_aes_alg = {
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
|
||||
.cra_init = fallback_init_blk,
|
||||
.cra_exit = fallback_exit_blk,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
@ -336,14 +514,10 @@ static int __init aes_init(void)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* z9 109 and z9 BC/EC only support 128 bit key length */
|
||||
if (keylen_flag == AES_KEYLEN_128) {
|
||||
aes_alg.cra_u.cipher.cia_max_keysize = AES_MIN_KEY_SIZE;
|
||||
ecb_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
|
||||
cbc_aes_alg.cra_u.blkcipher.max_keysize = AES_MIN_KEY_SIZE;
|
||||
if (keylen_flag == AES_KEYLEN_128)
|
||||
printk(KERN_INFO
|
||||
"aes_s390: hardware acceleration only available for"
|
||||
"128 bit keys\n");
|
||||
}
|
||||
|
||||
ret = crypto_register_alg(&aes_alg);
|
||||
if (ret)
|
||||
@ -382,4 +556,3 @@ MODULE_ALIAS("aes");
|
||||
|
||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@ -4,12 +4,16 @@
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
|
||||
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
|
||||
|
||||
aes-i586-y := aes-i586-asm_32.o aes_32.o
|
||||
twofish-i586-y := twofish-i586-asm_32.o twofish_32.o
|
||||
aes-i586-y := aes-i586-asm_32.o aes_glue.o
|
||||
twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
|
||||
salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
|
||||
|
||||
aes-x86_64-y := aes-x86_64-asm_64.o aes_64.o
|
||||
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_64.o
|
||||
aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
|
||||
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
|
||||
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
|
||||
|
@ -46,9 +46,9 @@
|
||||
#define in_blk 16
|
||||
|
||||
/* offsets in crypto_tfm structure */
|
||||
#define ekey (crypto_tfm_ctx_offset + 0)
|
||||
#define nrnd (crypto_tfm_ctx_offset + 256)
|
||||
#define dkey (crypto_tfm_ctx_offset + 260)
|
||||
#define klen (crypto_tfm_ctx_offset + 0)
|
||||
#define ekey (crypto_tfm_ctx_offset + 4)
|
||||
#define dkey (crypto_tfm_ctx_offset + 244)
|
||||
|
||||
// register mapping for encrypt and decrypt subroutines
|
||||
|
||||
@ -221,8 +221,8 @@
|
||||
|
||||
.global aes_enc_blk
|
||||
|
||||
.extern ft_tab
|
||||
.extern fl_tab
|
||||
.extern crypto_ft_tab
|
||||
.extern crypto_fl_tab
|
||||
|
||||
.align 4
|
||||
|
||||
@ -236,7 +236,7 @@ aes_enc_blk:
|
||||
1: push %ebx
|
||||
mov in_blk+4(%esp),%r2
|
||||
push %esi
|
||||
mov nrnd(%ebp),%r3 // number of rounds
|
||||
mov klen(%ebp),%r3 // key size
|
||||
push %edi
|
||||
#if ekey != 0
|
||||
lea ekey(%ebp),%ebp // key pointer
|
||||
@ -255,26 +255,26 @@ aes_enc_blk:
|
||||
|
||||
sub $8,%esp // space for register saves on stack
|
||||
add $16,%ebp // increment to next round key
|
||||
cmp $12,%r3
|
||||
cmp $24,%r3
|
||||
jb 4f // 10 rounds for 128-bit key
|
||||
lea 32(%ebp),%ebp
|
||||
je 3f // 12 rounds for 192-bit key
|
||||
lea 32(%ebp),%ebp
|
||||
|
||||
2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 256-bit key
|
||||
fwd_rnd2( -48(%ebp) ,ft_tab)
|
||||
3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 192-bit key
|
||||
fwd_rnd2( -16(%ebp) ,ft_tab)
|
||||
4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key
|
||||
fwd_rnd2( +16(%ebp) ,ft_tab)
|
||||
fwd_rnd1( +32(%ebp) ,ft_tab)
|
||||
fwd_rnd2( +48(%ebp) ,ft_tab)
|
||||
fwd_rnd1( +64(%ebp) ,ft_tab)
|
||||
fwd_rnd2( +80(%ebp) ,ft_tab)
|
||||
fwd_rnd1( +96(%ebp) ,ft_tab)
|
||||
fwd_rnd2(+112(%ebp) ,ft_tab)
|
||||
fwd_rnd1(+128(%ebp) ,ft_tab)
|
||||
fwd_rnd2(+144(%ebp) ,fl_tab) // last round uses a different table
|
||||
2: fwd_rnd1( -64(%ebp), crypto_ft_tab) // 14 rounds for 256-bit key
|
||||
fwd_rnd2( -48(%ebp), crypto_ft_tab)
|
||||
3: fwd_rnd1( -32(%ebp), crypto_ft_tab) // 12 rounds for 192-bit key
|
||||
fwd_rnd2( -16(%ebp), crypto_ft_tab)
|
||||
4: fwd_rnd1( (%ebp), crypto_ft_tab) // 10 rounds for 128-bit key
|
||||
fwd_rnd2( +16(%ebp), crypto_ft_tab)
|
||||
fwd_rnd1( +32(%ebp), crypto_ft_tab)
|
||||
fwd_rnd2( +48(%ebp), crypto_ft_tab)
|
||||
fwd_rnd1( +64(%ebp), crypto_ft_tab)
|
||||
fwd_rnd2( +80(%ebp), crypto_ft_tab)
|
||||
fwd_rnd1( +96(%ebp), crypto_ft_tab)
|
||||
fwd_rnd2(+112(%ebp), crypto_ft_tab)
|
||||
fwd_rnd1(+128(%ebp), crypto_ft_tab)
|
||||
fwd_rnd2(+144(%ebp), crypto_fl_tab) // last round uses a different table
|
||||
|
||||
// move final values to the output array. CAUTION: the
|
||||
// order of these assigns rely on the register mappings
|
||||
@ -297,8 +297,8 @@ aes_enc_blk:
|
||||
|
||||
.global aes_dec_blk
|
||||
|
||||
.extern it_tab
|
||||
.extern il_tab
|
||||
.extern crypto_it_tab
|
||||
.extern crypto_il_tab
|
||||
|
||||
.align 4
|
||||
|
||||
@ -312,14 +312,11 @@ aes_dec_blk:
|
||||
1: push %ebx
|
||||
mov in_blk+4(%esp),%r2
|
||||
push %esi
|
||||
mov nrnd(%ebp),%r3 // number of rounds
|
||||
mov klen(%ebp),%r3 // key size
|
||||
push %edi
|
||||
#if dkey != 0
|
||||
lea dkey(%ebp),%ebp // key pointer
|
||||
#endif
|
||||
mov %r3,%r0
|
||||
shl $4,%r0
|
||||
add %r0,%ebp
|
||||
|
||||
// input four columns and xor in first round key
|
||||
|
||||
@ -333,27 +330,27 @@ aes_dec_blk:
|
||||
xor 12(%ebp),%r5
|
||||
|
||||
sub $8,%esp // space for register saves on stack
|
||||
sub $16,%ebp // increment to next round key
|
||||
cmp $12,%r3
|
||||
add $16,%ebp // increment to next round key
|
||||
cmp $24,%r3
|
||||
jb 4f // 10 rounds for 128-bit key
|
||||
lea -32(%ebp),%ebp
|
||||
lea 32(%ebp),%ebp
|
||||
je 3f // 12 rounds for 192-bit key
|
||||
lea -32(%ebp),%ebp
|
||||
lea 32(%ebp),%ebp
|
||||
|
||||
2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 256-bit key
|
||||
inv_rnd2( +48(%ebp), it_tab)
|
||||
3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 192-bit key
|
||||
inv_rnd2( +16(%ebp), it_tab)
|
||||
4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key
|
||||
inv_rnd2( -16(%ebp), it_tab)
|
||||
inv_rnd1( -32(%ebp), it_tab)
|
||||
inv_rnd2( -48(%ebp), it_tab)
|
||||
inv_rnd1( -64(%ebp), it_tab)
|
||||
inv_rnd2( -80(%ebp), it_tab)
|
||||
inv_rnd1( -96(%ebp), it_tab)
|
||||
inv_rnd2(-112(%ebp), it_tab)
|
||||
inv_rnd1(-128(%ebp), it_tab)
|
||||
inv_rnd2(-144(%ebp), il_tab) // last round uses a different table
|
||||
2: inv_rnd1( -64(%ebp), crypto_it_tab) // 14 rounds for 256-bit key
|
||||
inv_rnd2( -48(%ebp), crypto_it_tab)
|
||||
3: inv_rnd1( -32(%ebp), crypto_it_tab) // 12 rounds for 192-bit key
|
||||
inv_rnd2( -16(%ebp), crypto_it_tab)
|
||||
4: inv_rnd1( (%ebp), crypto_it_tab) // 10 rounds for 128-bit key
|
||||
inv_rnd2( +16(%ebp), crypto_it_tab)
|
||||
inv_rnd1( +32(%ebp), crypto_it_tab)
|
||||
inv_rnd2( +48(%ebp), crypto_it_tab)
|
||||
inv_rnd1( +64(%ebp), crypto_it_tab)
|
||||
inv_rnd2( +80(%ebp), crypto_it_tab)
|
||||
inv_rnd1( +96(%ebp), crypto_it_tab)
|
||||
inv_rnd2(+112(%ebp), crypto_it_tab)
|
||||
inv_rnd1(+128(%ebp), crypto_it_tab)
|
||||
inv_rnd2(+144(%ebp), crypto_il_tab) // last round uses a different table
|
||||
|
||||
// move final values to the output array. CAUTION: the
|
||||
// order of these assigns rely on the register mappings
|
||||
|
@ -8,10 +8,10 @@
|
||||
* including this sentence is retained in full.
|
||||
*/
|
||||
|
||||
.extern aes_ft_tab
|
||||
.extern aes_it_tab
|
||||
.extern aes_fl_tab
|
||||
.extern aes_il_tab
|
||||
.extern crypto_ft_tab
|
||||
.extern crypto_it_tab
|
||||
.extern crypto_fl_tab
|
||||
.extern crypto_il_tab
|
||||
|
||||
.text
|
||||
|
||||
@ -56,13 +56,13 @@
|
||||
.align 8; \
|
||||
FUNC: movq r1,r2; \
|
||||
movq r3,r4; \
|
||||
leaq BASE+KEY+52(r8),r9; \
|
||||
leaq BASE+KEY+48+4(r8),r9; \
|
||||
movq r10,r11; \
|
||||
movl (r7),r5 ## E; \
|
||||
movl 4(r7),r1 ## E; \
|
||||
movl 8(r7),r6 ## E; \
|
||||
movl 12(r7),r7 ## E; \
|
||||
movl BASE(r8),r10 ## E; \
|
||||
movl BASE+0(r8),r10 ## E; \
|
||||
xorl -48(r9),r5 ## E; \
|
||||
xorl -44(r9),r1 ## E; \
|
||||
xorl -40(r9),r6 ## E; \
|
||||
@ -154,37 +154,37 @@ FUNC: movq r1,r2; \
|
||||
/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */
|
||||
|
||||
entry(aes_enc_blk,0,enc128,enc192)
|
||||
encrypt_round(aes_ft_tab,-96)
|
||||
encrypt_round(aes_ft_tab,-80)
|
||||
enc192: encrypt_round(aes_ft_tab,-64)
|
||||
encrypt_round(aes_ft_tab,-48)
|
||||
enc128: encrypt_round(aes_ft_tab,-32)
|
||||
encrypt_round(aes_ft_tab,-16)
|
||||
encrypt_round(aes_ft_tab, 0)
|
||||
encrypt_round(aes_ft_tab, 16)
|
||||
encrypt_round(aes_ft_tab, 32)
|
||||
encrypt_round(aes_ft_tab, 48)
|
||||
encrypt_round(aes_ft_tab, 64)
|
||||
encrypt_round(aes_ft_tab, 80)
|
||||
encrypt_round(aes_ft_tab, 96)
|
||||
encrypt_final(aes_fl_tab,112)
|
||||
encrypt_round(crypto_ft_tab,-96)
|
||||
encrypt_round(crypto_ft_tab,-80)
|
||||
enc192: encrypt_round(crypto_ft_tab,-64)
|
||||
encrypt_round(crypto_ft_tab,-48)
|
||||
enc128: encrypt_round(crypto_ft_tab,-32)
|
||||
encrypt_round(crypto_ft_tab,-16)
|
||||
encrypt_round(crypto_ft_tab, 0)
|
||||
encrypt_round(crypto_ft_tab, 16)
|
||||
encrypt_round(crypto_ft_tab, 32)
|
||||
encrypt_round(crypto_ft_tab, 48)
|
||||
encrypt_round(crypto_ft_tab, 64)
|
||||
encrypt_round(crypto_ft_tab, 80)
|
||||
encrypt_round(crypto_ft_tab, 96)
|
||||
encrypt_final(crypto_fl_tab,112)
|
||||
return
|
||||
|
||||
/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */
|
||||
|
||||
entry(aes_dec_blk,240,dec128,dec192)
|
||||
decrypt_round(aes_it_tab,-96)
|
||||
decrypt_round(aes_it_tab,-80)
|
||||
dec192: decrypt_round(aes_it_tab,-64)
|
||||
decrypt_round(aes_it_tab,-48)
|
||||
dec128: decrypt_round(aes_it_tab,-32)
|
||||
decrypt_round(aes_it_tab,-16)
|
||||
decrypt_round(aes_it_tab, 0)
|
||||
decrypt_round(aes_it_tab, 16)
|
||||
decrypt_round(aes_it_tab, 32)
|
||||
decrypt_round(aes_it_tab, 48)
|
||||
decrypt_round(aes_it_tab, 64)
|
||||
decrypt_round(aes_it_tab, 80)
|
||||
decrypt_round(aes_it_tab, 96)
|
||||
decrypt_final(aes_il_tab,112)
|
||||
decrypt_round(crypto_it_tab,-96)
|
||||
decrypt_round(crypto_it_tab,-80)
|
||||
dec192: decrypt_round(crypto_it_tab,-64)
|
||||
decrypt_round(crypto_it_tab,-48)
|
||||
dec128: decrypt_round(crypto_it_tab,-32)
|
||||
decrypt_round(crypto_it_tab,-16)
|
||||
decrypt_round(crypto_it_tab, 0)
|
||||
decrypt_round(crypto_it_tab, 16)
|
||||
decrypt_round(crypto_it_tab, 32)
|
||||
decrypt_round(crypto_it_tab, 48)
|
||||
decrypt_round(crypto_it_tab, 64)
|
||||
decrypt_round(crypto_it_tab, 80)
|
||||
decrypt_round(crypto_it_tab, 96)
|
||||
decrypt_final(crypto_il_tab,112)
|
||||
return
|
||||
|
@ -1,515 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Glue Code for optimized 586 assembler version of AES
|
||||
*
|
||||
* Copyright (c) 2002, Dr Brian Gladman <>, Worcester, UK.
|
||||
* All rights reserved.
|
||||
*
|
||||
* LICENSE TERMS
|
||||
*
|
||||
* The free distribution and use of this software in both source and binary
|
||||
* form is allowed (with or without changes) provided that:
|
||||
*
|
||||
* 1. distributions of this source code include the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
*
|
||||
* 2. distributions in binary form include the above copyright
|
||||
* notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other associated materials;
|
||||
*
|
||||
* 3. the copyright holder's name is not used to endorse products
|
||||
* built using this software without specific written permission.
|
||||
*
|
||||
* ALTERNATIVELY, provided that this notice is retained in full, this product
|
||||
* may be distributed under the terms of the GNU General Public License (GPL),
|
||||
* in which case the provisions of the GPL apply INSTEAD OF those given above.
|
||||
*
|
||||
* DISCLAIMER
|
||||
*
|
||||
* This software is provided 'as is' with no explicit or implied warranties
|
||||
* in respect of its properties, including, but not limited to, correctness
|
||||
* and/or fitness for purpose.
|
||||
*
|
||||
* Copyright (c) 2003, Adam J. Richter <adam@yggdrasil.com> (conversion to
|
||||
* 2.5 API).
|
||||
* Copyright (c) 2003, 2004 Fruhwirth Clemens <clemens@endorphin.org>
|
||||
* Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
||||
asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
||||
|
||||
#define AES_MIN_KEY_SIZE 16
|
||||
#define AES_MAX_KEY_SIZE 32
|
||||
#define AES_BLOCK_SIZE 16
|
||||
#define AES_KS_LENGTH 4 * AES_BLOCK_SIZE
|
||||
#define RC_LENGTH 29
|
||||
|
||||
struct aes_ctx {
|
||||
u32 ekey[AES_KS_LENGTH];
|
||||
u32 rounds;
|
||||
u32 dkey[AES_KS_LENGTH];
|
||||
};
|
||||
|
||||
#define WPOLY 0x011b
|
||||
#define bytes2word(b0, b1, b2, b3) \
|
||||
(((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
|
||||
|
||||
/* define the finite field multiplies required for Rijndael */
|
||||
#define f2(x) ((x) ? pow[log[x] + 0x19] : 0)
|
||||
#define f3(x) ((x) ? pow[log[x] + 0x01] : 0)
|
||||
#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0)
|
||||
#define fb(x) ((x) ? pow[log[x] + 0x68] : 0)
|
||||
#define fd(x) ((x) ? pow[log[x] + 0xee] : 0)
|
||||
#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0)
|
||||
#define fi(x) ((x) ? pow[255 - log[x]]: 0)
|
||||
|
||||
static inline u32 upr(u32 x, int n)
|
||||
{
|
||||
return (x << 8 * n) | (x >> (32 - 8 * n));
|
||||
}
|
||||
|
||||
static inline u8 bval(u32 x, int n)
|
||||
{
|
||||
return x >> 8 * n;
|
||||
}
|
||||
|
||||
/* The forward and inverse affine transformations used in the S-box */
|
||||
#define fwd_affine(x) \
|
||||
(w = (u32)x, w ^= (w<<1)^(w<<2)^(w<<3)^(w<<4), 0x63^(u8)(w^(w>>8)))
|
||||
|
||||
#define inv_affine(x) \
|
||||
(w = (u32)x, w = (w<<1)^(w<<3)^(w<<6), 0x05^(u8)(w^(w>>8)))
|
||||
|
||||
static u32 rcon_tab[RC_LENGTH];
|
||||
|
||||
u32 ft_tab[4][256];
|
||||
u32 fl_tab[4][256];
|
||||
static u32 im_tab[4][256];
|
||||
u32 il_tab[4][256];
|
||||
u32 it_tab[4][256];
|
||||
|
||||
static void gen_tabs(void)
|
||||
{
|
||||
u32 i, w;
|
||||
u8 pow[512], log[256];
|
||||
|
||||
/*
|
||||
* log and power tables for GF(2^8) finite field with
|
||||
* WPOLY as modular polynomial - the simplest primitive
|
||||
* root is 0x03, used here to generate the tables.
|
||||
*/
|
||||
i = 0; w = 1;
|
||||
|
||||
do {
|
||||
pow[i] = (u8)w;
|
||||
pow[i + 255] = (u8)w;
|
||||
log[w] = (u8)i++;
|
||||
w ^= (w << 1) ^ (w & 0x80 ? WPOLY : 0);
|
||||
} while (w != 1);
|
||||
|
||||
for(i = 0, w = 1; i < RC_LENGTH; ++i) {
|
||||
rcon_tab[i] = bytes2word(w, 0, 0, 0);
|
||||
w = f2(w);
|
||||
}
|
||||
|
||||
for(i = 0; i < 256; ++i) {
|
||||
u8 b;
|
||||
|
||||
b = fwd_affine(fi((u8)i));
|
||||
w = bytes2word(f2(b), b, b, f3(b));
|
||||
|
||||
/* tables for a normal encryption round */
|
||||
ft_tab[0][i] = w;
|
||||
ft_tab[1][i] = upr(w, 1);
|
||||
ft_tab[2][i] = upr(w, 2);
|
||||
ft_tab[3][i] = upr(w, 3);
|
||||
w = bytes2word(b, 0, 0, 0);
|
||||
|
||||
/*
|
||||
* tables for last encryption round
|
||||
* (may also be used in the key schedule)
|
||||
*/
|
||||
fl_tab[0][i] = w;
|
||||
fl_tab[1][i] = upr(w, 1);
|
||||
fl_tab[2][i] = upr(w, 2);
|
||||
fl_tab[3][i] = upr(w, 3);
|
||||
|
||||
b = fi(inv_affine((u8)i));
|
||||
w = bytes2word(fe(b), f9(b), fd(b), fb(b));
|
||||
|
||||
/* tables for the inverse mix column operation */
|
||||
im_tab[0][b] = w;
|
||||
im_tab[1][b] = upr(w, 1);
|
||||
im_tab[2][b] = upr(w, 2);
|
||||
im_tab[3][b] = upr(w, 3);
|
||||
|
||||
/* tables for a normal decryption round */
|
||||
it_tab[0][i] = w;
|
||||
it_tab[1][i] = upr(w,1);
|
||||
it_tab[2][i] = upr(w,2);
|
||||
it_tab[3][i] = upr(w,3);
|
||||
|
||||
w = bytes2word(b, 0, 0, 0);
|
||||
|
||||
/* tables for last decryption round */
|
||||
il_tab[0][i] = w;
|
||||
il_tab[1][i] = upr(w,1);
|
||||
il_tab[2][i] = upr(w,2);
|
||||
il_tab[3][i] = upr(w,3);
|
||||
}
|
||||
}
|
||||
|
||||
#define four_tables(x,tab,vf,rf,c) \
|
||||
( tab[0][bval(vf(x,0,c),rf(0,c))] ^ \
|
||||
tab[1][bval(vf(x,1,c),rf(1,c))] ^ \
|
||||
tab[2][bval(vf(x,2,c),rf(2,c))] ^ \
|
||||
tab[3][bval(vf(x,3,c),rf(3,c))] \
|
||||
)
|
||||
|
||||
#define vf1(x,r,c) (x)
|
||||
#define rf1(r,c) (r)
|
||||
#define rf2(r,c) ((r-c)&3)
|
||||
|
||||
#define inv_mcol(x) four_tables(x,im_tab,vf1,rf1,0)
|
||||
#define ls_box(x,c) four_tables(x,fl_tab,vf1,rf2,c)
|
||||
|
||||
#define ff(x) inv_mcol(x)
|
||||
|
||||
#define ke4(k,i) \
|
||||
{ \
|
||||
k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \
|
||||
k[4*(i)+5] = ss[1] ^= ss[0]; \
|
||||
k[4*(i)+6] = ss[2] ^= ss[1]; \
|
||||
k[4*(i)+7] = ss[3] ^= ss[2]; \
|
||||
}
|
||||
|
||||
#define kel4(k,i) \
|
||||
{ \
|
||||
k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \
|
||||
k[4*(i)+5] = ss[1] ^= ss[0]; \
|
||||
k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2]; \
|
||||
}
|
||||
|
||||
#define ke6(k,i) \
|
||||
{ \
|
||||
k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
|
||||
k[6*(i)+ 7] = ss[1] ^= ss[0]; \
|
||||
k[6*(i)+ 8] = ss[2] ^= ss[1]; \
|
||||
k[6*(i)+ 9] = ss[3] ^= ss[2]; \
|
||||
k[6*(i)+10] = ss[4] ^= ss[3]; \
|
||||
k[6*(i)+11] = ss[5] ^= ss[4]; \
|
||||
}
|
||||
|
||||
#define kel6(k,i) \
|
||||
{ \
|
||||
k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
|
||||
k[6*(i)+ 7] = ss[1] ^= ss[0]; \
|
||||
k[6*(i)+ 8] = ss[2] ^= ss[1]; \
|
||||
k[6*(i)+ 9] = ss[3] ^= ss[2]; \
|
||||
}
|
||||
|
||||
#define ke8(k,i) \
|
||||
{ \
|
||||
k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
|
||||
k[8*(i)+ 9] = ss[1] ^= ss[0]; \
|
||||
k[8*(i)+10] = ss[2] ^= ss[1]; \
|
||||
k[8*(i)+11] = ss[3] ^= ss[2]; \
|
||||
k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \
|
||||
k[8*(i)+13] = ss[5] ^= ss[4]; \
|
||||
k[8*(i)+14] = ss[6] ^= ss[5]; \
|
||||
k[8*(i)+15] = ss[7] ^= ss[6]; \
|
||||
}
|
||||
|
||||
#define kel8(k,i) \
|
||||
{ \
|
||||
k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
|
||||
k[8*(i)+ 9] = ss[1] ^= ss[0]; \
|
||||
k[8*(i)+10] = ss[2] ^= ss[1]; \
|
||||
k[8*(i)+11] = ss[3] ^= ss[2]; \
|
||||
}
|
||||
|
||||
#define kdf4(k,i) \
|
||||
{ \
|
||||
ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \
|
||||
ss[1] = ss[1] ^ ss[3]; \
|
||||
ss[2] = ss[2] ^ ss[3]; \
|
||||
ss[3] = ss[3]; \
|
||||
ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
|
||||
ss[i % 4] ^= ss[4]; \
|
||||
ss[4] ^= k[4*(i)]; \
|
||||
k[4*(i)+4] = ff(ss[4]); \
|
||||
ss[4] ^= k[4*(i)+1]; \
|
||||
k[4*(i)+5] = ff(ss[4]); \
|
||||
ss[4] ^= k[4*(i)+2]; \
|
||||
k[4*(i)+6] = ff(ss[4]); \
|
||||
ss[4] ^= k[4*(i)+3]; \
|
||||
k[4*(i)+7] = ff(ss[4]); \
|
||||
}
|
||||
|
||||
#define kd4(k,i) \
|
||||
{ \
|
||||
ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
|
||||
ss[i % 4] ^= ss[4]; \
|
||||
ss[4] = ff(ss[4]); \
|
||||
k[4*(i)+4] = ss[4] ^= k[4*(i)]; \
|
||||
k[4*(i)+5] = ss[4] ^= k[4*(i)+1]; \
|
||||
k[4*(i)+6] = ss[4] ^= k[4*(i)+2]; \
|
||||
k[4*(i)+7] = ss[4] ^= k[4*(i)+3]; \
|
||||
}
|
||||
|
||||
#define kdl4(k,i) \
|
||||
{ \
|
||||
ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \
|
||||
ss[i % 4] ^= ss[4]; \
|
||||
k[4*(i)+4] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \
|
||||
k[4*(i)+5] = ss[1] ^ ss[3]; \
|
||||
k[4*(i)+6] = ss[0]; \
|
||||
k[4*(i)+7] = ss[1]; \
|
||||
}
|
||||
|
||||
#define kdf6(k,i) \
|
||||
{ \
|
||||
ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
|
||||
k[6*(i)+ 6] = ff(ss[0]); \
|
||||
ss[1] ^= ss[0]; \
|
||||
k[6*(i)+ 7] = ff(ss[1]); \
|
||||
ss[2] ^= ss[1]; \
|
||||
k[6*(i)+ 8] = ff(ss[2]); \
|
||||
ss[3] ^= ss[2]; \
|
||||
k[6*(i)+ 9] = ff(ss[3]); \
|
||||
ss[4] ^= ss[3]; \
|
||||
k[6*(i)+10] = ff(ss[4]); \
|
||||
ss[5] ^= ss[4]; \
|
||||
k[6*(i)+11] = ff(ss[5]); \
|
||||
}
|
||||
|
||||
#define kd6(k,i) \
|
||||
{ \
|
||||
ss[6] = ls_box(ss[5],3) ^ rcon_tab[i]; \
|
||||
ss[0] ^= ss[6]; ss[6] = ff(ss[6]); \
|
||||
k[6*(i)+ 6] = ss[6] ^= k[6*(i)]; \
|
||||
ss[1] ^= ss[0]; \
|
||||
k[6*(i)+ 7] = ss[6] ^= k[6*(i)+ 1]; \
|
||||
ss[2] ^= ss[1]; \
|
||||
k[6*(i)+ 8] = ss[6] ^= k[6*(i)+ 2]; \
|
||||
ss[3] ^= ss[2]; \
|
||||
k[6*(i)+ 9] = ss[6] ^= k[6*(i)+ 3]; \
|
||||
ss[4] ^= ss[3]; \
|
||||
k[6*(i)+10] = ss[6] ^= k[6*(i)+ 4]; \
|
||||
ss[5] ^= ss[4]; \
|
||||
k[6*(i)+11] = ss[6] ^= k[6*(i)+ 5]; \
|
||||
}
|
||||
|
||||
#define kdl6(k,i) \
|
||||
{ \
|
||||
ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \
|
||||
k[6*(i)+ 6] = ss[0]; \
|
||||
ss[1] ^= ss[0]; \
|
||||
k[6*(i)+ 7] = ss[1]; \
|
||||
ss[2] ^= ss[1]; \
|
||||
k[6*(i)+ 8] = ss[2]; \
|
||||
ss[3] ^= ss[2]; \
|
||||
k[6*(i)+ 9] = ss[3]; \
|
||||
}
|
||||
|
||||
#define kdf8(k,i) \
|
||||
{ \
|
||||
ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
|
||||
k[8*(i)+ 8] = ff(ss[0]); \
|
||||
ss[1] ^= ss[0]; \
|
||||
k[8*(i)+ 9] = ff(ss[1]); \
|
||||
ss[2] ^= ss[1]; \
|
||||
k[8*(i)+10] = ff(ss[2]); \
|
||||
ss[3] ^= ss[2]; \
|
||||
k[8*(i)+11] = ff(ss[3]); \
|
||||
ss[4] ^= ls_box(ss[3],0); \
|
||||
k[8*(i)+12] = ff(ss[4]); \
|
||||
ss[5] ^= ss[4]; \
|
||||
k[8*(i)+13] = ff(ss[5]); \
|
||||
ss[6] ^= ss[5]; \
|
||||
k[8*(i)+14] = ff(ss[6]); \
|
||||
ss[7] ^= ss[6]; \
|
||||
k[8*(i)+15] = ff(ss[7]); \
|
||||
}
|
||||
|
||||
#define kd8(k,i) \
|
||||
{ \
|
||||
u32 __g = ls_box(ss[7],3) ^ rcon_tab[i]; \
|
||||
ss[0] ^= __g; \
|
||||
__g = ff(__g); \
|
||||
k[8*(i)+ 8] = __g ^= k[8*(i)]; \
|
||||
ss[1] ^= ss[0]; \
|
||||
k[8*(i)+ 9] = __g ^= k[8*(i)+ 1]; \
|
||||
ss[2] ^= ss[1]; \
|
||||
k[8*(i)+10] = __g ^= k[8*(i)+ 2]; \
|
||||
ss[3] ^= ss[2]; \
|
||||
k[8*(i)+11] = __g ^= k[8*(i)+ 3]; \
|
||||
__g = ls_box(ss[3],0); \
|
||||
ss[4] ^= __g; \
|
||||
__g = ff(__g); \
|
||||
k[8*(i)+12] = __g ^= k[8*(i)+ 4]; \
|
||||
ss[5] ^= ss[4]; \
|
||||
k[8*(i)+13] = __g ^= k[8*(i)+ 5]; \
|
||||
ss[6] ^= ss[5]; \
|
||||
k[8*(i)+14] = __g ^= k[8*(i)+ 6]; \
|
||||
ss[7] ^= ss[6]; \
|
||||
k[8*(i)+15] = __g ^= k[8*(i)+ 7]; \
|
||||
}
|
||||
|
||||
#define kdl8(k,i) \
|
||||
{ \
|
||||
ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \
|
||||
k[8*(i)+ 8] = ss[0]; \
|
||||
ss[1] ^= ss[0]; \
|
||||
k[8*(i)+ 9] = ss[1]; \
|
||||
ss[2] ^= ss[1]; \
|
||||
k[8*(i)+10] = ss[2]; \
|
||||
ss[3] ^= ss[2]; \
|
||||
k[8*(i)+11] = ss[3]; \
|
||||
}
|
||||
|
||||
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
int i;
|
||||
u32 ss[8];
|
||||
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
const __le32 *key = (const __le32 *)in_key;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
|
||||
/* encryption schedule */
|
||||
|
||||
ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]);
|
||||
ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]);
|
||||
ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]);
|
||||
ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]);
|
||||
|
||||
switch(key_len) {
|
||||
case 16:
|
||||
for (i = 0; i < 9; i++)
|
||||
ke4(ctx->ekey, i);
|
||||
kel4(ctx->ekey, 9);
|
||||
ctx->rounds = 10;
|
||||
break;
|
||||
|
||||
case 24:
|
||||
ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
|
||||
ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
|
||||
for (i = 0; i < 7; i++)
|
||||
ke6(ctx->ekey, i);
|
||||
kel6(ctx->ekey, 7);
|
||||
ctx->rounds = 12;
|
||||
break;
|
||||
|
||||
case 32:
|
||||
ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
|
||||
ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
|
||||
ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]);
|
||||
ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]);
|
||||
for (i = 0; i < 6; i++)
|
||||
ke8(ctx->ekey, i);
|
||||
kel8(ctx->ekey, 6);
|
||||
ctx->rounds = 14;
|
||||
break;
|
||||
|
||||
default:
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* decryption schedule */
|
||||
|
||||
ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]);
|
||||
ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]);
|
||||
ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]);
|
||||
ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]);
|
||||
|
||||
switch (key_len) {
|
||||
case 16:
|
||||
kdf4(ctx->dkey, 0);
|
||||
for (i = 1; i < 9; i++)
|
||||
kd4(ctx->dkey, i);
|
||||
kdl4(ctx->dkey, 9);
|
||||
break;
|
||||
|
||||
case 24:
|
||||
ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
|
||||
ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
|
||||
kdf6(ctx->dkey, 0);
|
||||
for (i = 1; i < 7; i++)
|
||||
kd6(ctx->dkey, i);
|
||||
kdl6(ctx->dkey, 7);
|
||||
break;
|
||||
|
||||
case 32:
|
||||
ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
|
||||
ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
|
||||
ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6]));
|
||||
ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7]));
|
||||
kdf8(ctx->dkey, 0);
|
||||
for (i = 1; i < 6; i++)
|
||||
kd8(ctx->dkey, i);
|
||||
kdl8(ctx->dkey, 6);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
aes_enc_blk(tfm, dst, src);
|
||||
}
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
aes_dec_blk(tfm, dst, src);
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-i586",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
||||
.cia_setkey = aes_set_key,
|
||||
.cia_encrypt = aes_encrypt,
|
||||
.cia_decrypt = aes_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init aes_init(void)
|
||||
{
|
||||
gen_tabs();
|
||||
return crypto_register_alg(&aes_alg);
|
||||
}
|
||||
|
||||
static void __exit aes_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&aes_alg);
|
||||
}
|
||||
|
||||
module_init(aes_init);
|
||||
module_exit(aes_fini);
|
||||
|
||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, i586 asm optimized");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Fruhwirth Clemens, James Morris, Brian Gladman, Adam Richter");
|
||||
MODULE_ALIAS("aes");
|
@ -1,336 +0,0 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* AES Cipher Algorithm.
|
||||
*
|
||||
* Based on Brian Gladman's code.
|
||||
*
|
||||
* Linux developers:
|
||||
* Alexander Kjeldaas <astor@fast.no>
|
||||
* Herbert Valerio Riedel <hvr@hvrlab.org>
|
||||
* Kyle McMartin <kyle@debian.org>
|
||||
* Adam J. Richter <adam@yggdrasil.com> (conversion to 2.5 API).
|
||||
* Andreas Steinmetz <ast@domdv.de> (adapted to x86_64 assembler)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* ---------------------------------------------------------------------------
|
||||
* Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
|
||||
* All rights reserved.
|
||||
*
|
||||
* LICENSE TERMS
|
||||
*
|
||||
* The free distribution and use of this software in both source and binary
|
||||
* form is allowed (with or without changes) provided that:
|
||||
*
|
||||
* 1. distributions of this source code include the above copyright
|
||||
* notice, this list of conditions and the following disclaimer;
|
||||
*
|
||||
* 2. distributions in binary form include the above copyright
|
||||
* notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other associated materials;
|
||||
*
|
||||
* 3. the copyright holder's name is not used to endorse products
|
||||
* built using this software without specific written permission.
|
||||
*
|
||||
* ALTERNATIVELY, provided that this notice is retained in full, this product
|
||||
* may be distributed under the terms of the GNU General Public License (GPL),
|
||||
* in which case the provisions of the GPL apply INSTEAD OF those given above.
|
||||
*
|
||||
* DISCLAIMER
|
||||
*
|
||||
* This software is provided 'as is' with no explicit or implied warranties
|
||||
* in respect of its properties, including, but not limited to, correctness
|
||||
* and/or fitness for purpose.
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/* Some changes from the Gladman version:
|
||||
s/RIJNDAEL(e_key)/E_KEY/g
|
||||
s/RIJNDAEL(d_key)/D_KEY/g
|
||||
*/
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define AES_MIN_KEY_SIZE 16
|
||||
#define AES_MAX_KEY_SIZE 32
|
||||
|
||||
#define AES_BLOCK_SIZE 16
|
||||
|
||||
/*
|
||||
* #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
|
||||
*/
|
||||
static inline u8 byte(const u32 x, const unsigned n)
|
||||
{
|
||||
return x >> (n << 3);
|
||||
}
|
||||
|
||||
struct aes_ctx
|
||||
{
|
||||
u32 key_length;
|
||||
u32 buf[120];
|
||||
};
|
||||
|
||||
#define E_KEY (&ctx->buf[0])
|
||||
#define D_KEY (&ctx->buf[60])
|
||||
|
||||
static u8 pow_tab[256] __initdata;
|
||||
static u8 log_tab[256] __initdata;
|
||||
static u8 sbx_tab[256] __initdata;
|
||||
static u8 isb_tab[256] __initdata;
|
||||
static u32 rco_tab[10];
|
||||
u32 aes_ft_tab[4][256];
|
||||
u32 aes_it_tab[4][256];
|
||||
|
||||
u32 aes_fl_tab[4][256];
|
||||
u32 aes_il_tab[4][256];
|
||||
|
||||
static inline u8 f_mult(u8 a, u8 b)
|
||||
{
|
||||
u8 aa = log_tab[a], cc = aa + log_tab[b];
|
||||
|
||||
return pow_tab[cc + (cc < aa ? 1 : 0)];
|
||||
}
|
||||
|
||||
#define ff_mult(a, b) (a && b ? f_mult(a, b) : 0)
|
||||
|
||||
#define ls_box(x) \
|
||||
(aes_fl_tab[0][byte(x, 0)] ^ \
|
||||
aes_fl_tab[1][byte(x, 1)] ^ \
|
||||
aes_fl_tab[2][byte(x, 2)] ^ \
|
||||
aes_fl_tab[3][byte(x, 3)])
|
||||
|
||||
static void __init gen_tabs(void)
|
||||
{
|
||||
u32 i, t;
|
||||
u8 p, q;
|
||||
|
||||
/* log and power tables for GF(2**8) finite field with
|
||||
0x011b as modular polynomial - the simplest primitive
|
||||
root is 0x03, used here to generate the tables */
|
||||
|
||||
for (i = 0, p = 1; i < 256; ++i) {
|
||||
pow_tab[i] = (u8)p;
|
||||
log_tab[p] = (u8)i;
|
||||
|
||||
p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0);
|
||||
}
|
||||
|
||||
log_tab[1] = 0;
|
||||
|
||||
for (i = 0, p = 1; i < 10; ++i) {
|
||||
rco_tab[i] = p;
|
||||
|
||||
p = (p << 1) ^ (p & 0x80 ? 0x01b : 0);
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; ++i) {
|
||||
p = (i ? pow_tab[255 - log_tab[i]] : 0);
|
||||
q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2));
|
||||
p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2));
|
||||
sbx_tab[i] = p;
|
||||
isb_tab[p] = (u8)i;
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; ++i) {
|
||||
p = sbx_tab[i];
|
||||
|
||||
t = p;
|
||||
aes_fl_tab[0][i] = t;
|
||||
aes_fl_tab[1][i] = rol32(t, 8);
|
||||
aes_fl_tab[2][i] = rol32(t, 16);
|
||||
aes_fl_tab[3][i] = rol32(t, 24);
|
||||
|
||||
t = ((u32)ff_mult(2, p)) |
|
||||
((u32)p << 8) |
|
||||
((u32)p << 16) | ((u32)ff_mult(3, p) << 24);
|
||||
|
||||
aes_ft_tab[0][i] = t;
|
||||
aes_ft_tab[1][i] = rol32(t, 8);
|
||||
aes_ft_tab[2][i] = rol32(t, 16);
|
||||
aes_ft_tab[3][i] = rol32(t, 24);
|
||||
|
||||
p = isb_tab[i];
|
||||
|
||||
t = p;
|
||||
aes_il_tab[0][i] = t;
|
||||
aes_il_tab[1][i] = rol32(t, 8);
|
||||
aes_il_tab[2][i] = rol32(t, 16);
|
||||
aes_il_tab[3][i] = rol32(t, 24);
|
||||
|
||||
t = ((u32)ff_mult(14, p)) |
|
||||
((u32)ff_mult(9, p) << 8) |
|
||||
((u32)ff_mult(13, p) << 16) |
|
||||
((u32)ff_mult(11, p) << 24);
|
||||
|
||||
aes_it_tab[0][i] = t;
|
||||
aes_it_tab[1][i] = rol32(t, 8);
|
||||
aes_it_tab[2][i] = rol32(t, 16);
|
||||
aes_it_tab[3][i] = rol32(t, 24);
|
||||
}
|
||||
}
|
||||
|
||||
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
|
||||
|
||||
#define imix_col(y, x) \
|
||||
u = star_x(x); \
|
||||
v = star_x(u); \
|
||||
w = star_x(v); \
|
||||
t = w ^ (x); \
|
||||
(y) = u ^ v ^ w; \
|
||||
(y) ^= ror32(u ^ t, 8) ^ \
|
||||
ror32(v ^ t, 16) ^ \
|
||||
ror32(t, 24)
|
||||
|
||||
/* initialise the key schedule from the user supplied key */
|
||||
|
||||
#define loop4(i) \
|
||||
{ \
|
||||
t = ror32(t, 8); t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \
|
||||
t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \
|
||||
t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \
|
||||
t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \
|
||||
}
|
||||
|
||||
#define loop6(i) \
|
||||
{ \
|
||||
t = ror32(t, 8); t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \
|
||||
t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \
|
||||
t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \
|
||||
t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \
|
||||
t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \
|
||||
t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \
|
||||
}
|
||||
|
||||
#define loop8(i) \
|
||||
{ \
|
||||
t = ror32(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \
|
||||
t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \
|
||||
t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \
|
||||
t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \
|
||||
t = E_KEY[8 * i + 4] ^ ls_box(t); \
|
||||
E_KEY[8 * i + 12] = t; \
|
||||
t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \
|
||||
t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \
|
||||
t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
|
||||
}
|
||||
|
||||
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
const __le32 *key = (const __le32 *)in_key;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
u32 i, j, t, u, v, w;
|
||||
|
||||
if (key_len % 8) {
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->key_length = key_len;
|
||||
|
||||
D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]);
|
||||
D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]);
|
||||
D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]);
|
||||
D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]);
|
||||
|
||||
switch (key_len) {
|
||||
case 16:
|
||||
t = E_KEY[3];
|
||||
for (i = 0; i < 10; ++i)
|
||||
loop4(i);
|
||||
break;
|
||||
|
||||
case 24:
|
||||
E_KEY[4] = le32_to_cpu(key[4]);
|
||||
t = E_KEY[5] = le32_to_cpu(key[5]);
|
||||
for (i = 0; i < 8; ++i)
|
||||
loop6 (i);
|
||||
break;
|
||||
|
||||
case 32:
|
||||
E_KEY[4] = le32_to_cpu(key[4]);
|
||||
E_KEY[5] = le32_to_cpu(key[5]);
|
||||
E_KEY[6] = le32_to_cpu(key[6]);
|
||||
t = E_KEY[7] = le32_to_cpu(key[7]);
|
||||
for (i = 0; i < 7; ++i)
|
||||
loop8(i);
|
||||
break;
|
||||
}
|
||||
|
||||
D_KEY[0] = E_KEY[key_len + 24];
|
||||
D_KEY[1] = E_KEY[key_len + 25];
|
||||
D_KEY[2] = E_KEY[key_len + 26];
|
||||
D_KEY[3] = E_KEY[key_len + 27];
|
||||
|
||||
for (i = 4; i < key_len + 24; ++i) {
|
||||
j = key_len + 24 - (i & ~3) + (i & 3);
|
||||
imix_col(D_KEY[j], E_KEY[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
|
||||
asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
aes_enc_blk(tfm, dst, src);
|
||||
}
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
aes_dec_blk(tfm, dst, src);
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-x86_64",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
||||
.cia_setkey = aes_set_key,
|
||||
.cia_encrypt = aes_encrypt,
|
||||
.cia_decrypt = aes_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init aes_init(void)
|
||||
{
|
||||
gen_tabs();
|
||||
return crypto_register_alg(&aes_alg);
|
||||
}
|
||||
|
||||
static void __exit aes_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&aes_alg);
|
||||
}
|
||||
|
||||
module_init(aes_init);
|
||||
module_exit(aes_fini);
|
||||
|
||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("aes");
|
57
arch/x86/crypto/aes_glue.c
Normal file
57
arch/x86/crypto/aes_glue.c
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Glue Code for the asm optimized version of the AES Cipher Algorithm
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/aes.h>
|
||||
|
||||
asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
|
||||
asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in);
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
aes_enc_blk(tfm, dst, src);
|
||||
}
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
aes_dec_blk(tfm, dst, src);
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-asm",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
||||
.cia_setkey = crypto_aes_set_key,
|
||||
.cia_encrypt = aes_encrypt,
|
||||
.cia_decrypt = aes_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init aes_init(void)
|
||||
{
|
||||
return crypto_register_alg(&aes_alg);
|
||||
}
|
||||
|
||||
static void __exit aes_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&aes_alg);
|
||||
}
|
||||
|
||||
module_init(aes_init);
|
||||
module_exit(aes_fini);
|
||||
|
||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("aes");
|
||||
MODULE_ALIAS("aes-asm");
|
1114
arch/x86/crypto/salsa20-i586-asm_32.S
Normal file
1114
arch/x86/crypto/salsa20-i586-asm_32.S
Normal file
File diff suppressed because it is too large
Load Diff
920
arch/x86/crypto/salsa20-x86_64-asm_64.S
Normal file
920
arch/x86/crypto/salsa20-x86_64-asm_64.S
Normal file
@ -0,0 +1,920 @@
|
||||
# enter ECRYPT_encrypt_bytes
|
||||
.text
|
||||
.p2align 5
|
||||
.globl ECRYPT_encrypt_bytes
|
||||
ECRYPT_encrypt_bytes:
|
||||
mov %rsp,%r11
|
||||
and $31,%r11
|
||||
add $256,%r11
|
||||
sub %r11,%rsp
|
||||
# x = arg1
|
||||
mov %rdi,%r8
|
||||
# m = arg2
|
||||
mov %rsi,%rsi
|
||||
# out = arg3
|
||||
mov %rdx,%rdi
|
||||
# bytes = arg4
|
||||
mov %rcx,%rdx
|
||||
# unsigned>? bytes - 0
|
||||
cmp $0,%rdx
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto done if !unsigned>
|
||||
jbe ._done
|
||||
# comment:fp stack unchanged by fallthrough
|
||||
# start:
|
||||
._start:
|
||||
# r11_stack = r11
|
||||
movq %r11,0(%rsp)
|
||||
# r12_stack = r12
|
||||
movq %r12,8(%rsp)
|
||||
# r13_stack = r13
|
||||
movq %r13,16(%rsp)
|
||||
# r14_stack = r14
|
||||
movq %r14,24(%rsp)
|
||||
# r15_stack = r15
|
||||
movq %r15,32(%rsp)
|
||||
# rbx_stack = rbx
|
||||
movq %rbx,40(%rsp)
|
||||
# rbp_stack = rbp
|
||||
movq %rbp,48(%rsp)
|
||||
# in0 = *(uint64 *) (x + 0)
|
||||
movq 0(%r8),%rcx
|
||||
# in2 = *(uint64 *) (x + 8)
|
||||
movq 8(%r8),%r9
|
||||
# in4 = *(uint64 *) (x + 16)
|
||||
movq 16(%r8),%rax
|
||||
# in6 = *(uint64 *) (x + 24)
|
||||
movq 24(%r8),%r10
|
||||
# in8 = *(uint64 *) (x + 32)
|
||||
movq 32(%r8),%r11
|
||||
# in10 = *(uint64 *) (x + 40)
|
||||
movq 40(%r8),%r12
|
||||
# in12 = *(uint64 *) (x + 48)
|
||||
movq 48(%r8),%r13
|
||||
# in14 = *(uint64 *) (x + 56)
|
||||
movq 56(%r8),%r14
|
||||
# j0 = in0
|
||||
movq %rcx,56(%rsp)
|
||||
# j2 = in2
|
||||
movq %r9,64(%rsp)
|
||||
# j4 = in4
|
||||
movq %rax,72(%rsp)
|
||||
# j6 = in6
|
||||
movq %r10,80(%rsp)
|
||||
# j8 = in8
|
||||
movq %r11,88(%rsp)
|
||||
# j10 = in10
|
||||
movq %r12,96(%rsp)
|
||||
# j12 = in12
|
||||
movq %r13,104(%rsp)
|
||||
# j14 = in14
|
||||
movq %r14,112(%rsp)
|
||||
# x_backup = x
|
||||
movq %r8,120(%rsp)
|
||||
# bytesatleast1:
|
||||
._bytesatleast1:
|
||||
# unsigned<? bytes - 64
|
||||
cmp $64,%rdx
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto nocopy if !unsigned<
|
||||
jae ._nocopy
|
||||
# ctarget = out
|
||||
movq %rdi,128(%rsp)
|
||||
# out = &tmp
|
||||
leaq 192(%rsp),%rdi
|
||||
# i = bytes
|
||||
mov %rdx,%rcx
|
||||
# while (i) { *out++ = *m++; --i }
|
||||
rep movsb
|
||||
# out = &tmp
|
||||
leaq 192(%rsp),%rdi
|
||||
# m = &tmp
|
||||
leaq 192(%rsp),%rsi
|
||||
# comment:fp stack unchanged by fallthrough
|
||||
# nocopy:
|
||||
._nocopy:
|
||||
# out_backup = out
|
||||
movq %rdi,136(%rsp)
|
||||
# m_backup = m
|
||||
movq %rsi,144(%rsp)
|
||||
# bytes_backup = bytes
|
||||
movq %rdx,152(%rsp)
|
||||
# x1 = j0
|
||||
movq 56(%rsp),%rdi
|
||||
# x0 = x1
|
||||
mov %rdi,%rdx
|
||||
# (uint64) x1 >>= 32
|
||||
shr $32,%rdi
|
||||
# x3 = j2
|
||||
movq 64(%rsp),%rsi
|
||||
# x2 = x3
|
||||
mov %rsi,%rcx
|
||||
# (uint64) x3 >>= 32
|
||||
shr $32,%rsi
|
||||
# x5 = j4
|
||||
movq 72(%rsp),%r8
|
||||
# x4 = x5
|
||||
mov %r8,%r9
|
||||
# (uint64) x5 >>= 32
|
||||
shr $32,%r8
|
||||
# x5_stack = x5
|
||||
movq %r8,160(%rsp)
|
||||
# x7 = j6
|
||||
movq 80(%rsp),%r8
|
||||
# x6 = x7
|
||||
mov %r8,%rax
|
||||
# (uint64) x7 >>= 32
|
||||
shr $32,%r8
|
||||
# x9 = j8
|
||||
movq 88(%rsp),%r10
|
||||
# x8 = x9
|
||||
mov %r10,%r11
|
||||
# (uint64) x9 >>= 32
|
||||
shr $32,%r10
|
||||
# x11 = j10
|
||||
movq 96(%rsp),%r12
|
||||
# x10 = x11
|
||||
mov %r12,%r13
|
||||
# x10_stack = x10
|
||||
movq %r13,168(%rsp)
|
||||
# (uint64) x11 >>= 32
|
||||
shr $32,%r12
|
||||
# x13 = j12
|
||||
movq 104(%rsp),%r13
|
||||
# x12 = x13
|
||||
mov %r13,%r14
|
||||
# (uint64) x13 >>= 32
|
||||
shr $32,%r13
|
||||
# x15 = j14
|
||||
movq 112(%rsp),%r15
|
||||
# x14 = x15
|
||||
mov %r15,%rbx
|
||||
# (uint64) x15 >>= 32
|
||||
shr $32,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# i = 20
|
||||
mov $20,%r15
|
||||
# mainloop:
|
||||
._mainloop:
|
||||
# i_backup = i
|
||||
movq %r15,184(%rsp)
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%r15
|
||||
# a = x12 + x0
|
||||
lea (%r14,%rdx),%rbp
|
||||
# (uint32) a <<<= 7
|
||||
rol $7,%ebp
|
||||
# x4 ^= a
|
||||
xor %rbp,%r9
|
||||
# b = x1 + x5
|
||||
lea (%rdi,%r15),%rbp
|
||||
# (uint32) b <<<= 7
|
||||
rol $7,%ebp
|
||||
# x9 ^= b
|
||||
xor %rbp,%r10
|
||||
# a = x0 + x4
|
||||
lea (%rdx,%r9),%rbp
|
||||
# (uint32) a <<<= 9
|
||||
rol $9,%ebp
|
||||
# x8 ^= a
|
||||
xor %rbp,%r11
|
||||
# b = x5 + x9
|
||||
lea (%r15,%r10),%rbp
|
||||
# (uint32) b <<<= 9
|
||||
rol $9,%ebp
|
||||
# x13 ^= b
|
||||
xor %rbp,%r13
|
||||
# a = x4 + x8
|
||||
lea (%r9,%r11),%rbp
|
||||
# (uint32) a <<<= 13
|
||||
rol $13,%ebp
|
||||
# x12 ^= a
|
||||
xor %rbp,%r14
|
||||
# b = x9 + x13
|
||||
lea (%r10,%r13),%rbp
|
||||
# (uint32) b <<<= 13
|
||||
rol $13,%ebp
|
||||
# x1 ^= b
|
||||
xor %rbp,%rdi
|
||||
# a = x8 + x12
|
||||
lea (%r11,%r14),%rbp
|
||||
# (uint32) a <<<= 18
|
||||
rol $18,%ebp
|
||||
# x0 ^= a
|
||||
xor %rbp,%rdx
|
||||
# b = x13 + x1
|
||||
lea (%r13,%rdi),%rbp
|
||||
# (uint32) b <<<= 18
|
||||
rol $18,%ebp
|
||||
# x5 ^= b
|
||||
xor %rbp,%r15
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%rbp
|
||||
# x5_stack = x5
|
||||
movq %r15,160(%rsp)
|
||||
# c = x6 + x10
|
||||
lea (%rax,%rbp),%r15
|
||||
# (uint32) c <<<= 7
|
||||
rol $7,%r15d
|
||||
# x14 ^= c
|
||||
xor %r15,%rbx
|
||||
# c = x10 + x14
|
||||
lea (%rbp,%rbx),%r15
|
||||
# (uint32) c <<<= 9
|
||||
rol $9,%r15d
|
||||
# x2 ^= c
|
||||
xor %r15,%rcx
|
||||
# c = x14 + x2
|
||||
lea (%rbx,%rcx),%r15
|
||||
# (uint32) c <<<= 13
|
||||
rol $13,%r15d
|
||||
# x6 ^= c
|
||||
xor %r15,%rax
|
||||
# c = x2 + x6
|
||||
lea (%rcx,%rax),%r15
|
||||
# (uint32) c <<<= 18
|
||||
rol $18,%r15d
|
||||
# x10 ^= c
|
||||
xor %r15,%rbp
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%r15
|
||||
# x10_stack = x10
|
||||
movq %rbp,168(%rsp)
|
||||
# d = x11 + x15
|
||||
lea (%r12,%r15),%rbp
|
||||
# (uint32) d <<<= 7
|
||||
rol $7,%ebp
|
||||
# x3 ^= d
|
||||
xor %rbp,%rsi
|
||||
# d = x15 + x3
|
||||
lea (%r15,%rsi),%rbp
|
||||
# (uint32) d <<<= 9
|
||||
rol $9,%ebp
|
||||
# x7 ^= d
|
||||
xor %rbp,%r8
|
||||
# d = x3 + x7
|
||||
lea (%rsi,%r8),%rbp
|
||||
# (uint32) d <<<= 13
|
||||
rol $13,%ebp
|
||||
# x11 ^= d
|
||||
xor %rbp,%r12
|
||||
# d = x7 + x11
|
||||
lea (%r8,%r12),%rbp
|
||||
# (uint32) d <<<= 18
|
||||
rol $18,%ebp
|
||||
# x15 ^= d
|
||||
xor %rbp,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%r15
|
||||
# a = x3 + x0
|
||||
lea (%rsi,%rdx),%rbp
|
||||
# (uint32) a <<<= 7
|
||||
rol $7,%ebp
|
||||
# x1 ^= a
|
||||
xor %rbp,%rdi
|
||||
# b = x4 + x5
|
||||
lea (%r9,%r15),%rbp
|
||||
# (uint32) b <<<= 7
|
||||
rol $7,%ebp
|
||||
# x6 ^= b
|
||||
xor %rbp,%rax
|
||||
# a = x0 + x1
|
||||
lea (%rdx,%rdi),%rbp
|
||||
# (uint32) a <<<= 9
|
||||
rol $9,%ebp
|
||||
# x2 ^= a
|
||||
xor %rbp,%rcx
|
||||
# b = x5 + x6
|
||||
lea (%r15,%rax),%rbp
|
||||
# (uint32) b <<<= 9
|
||||
rol $9,%ebp
|
||||
# x7 ^= b
|
||||
xor %rbp,%r8
|
||||
# a = x1 + x2
|
||||
lea (%rdi,%rcx),%rbp
|
||||
# (uint32) a <<<= 13
|
||||
rol $13,%ebp
|
||||
# x3 ^= a
|
||||
xor %rbp,%rsi
|
||||
# b = x6 + x7
|
||||
lea (%rax,%r8),%rbp
|
||||
# (uint32) b <<<= 13
|
||||
rol $13,%ebp
|
||||
# x4 ^= b
|
||||
xor %rbp,%r9
|
||||
# a = x2 + x3
|
||||
lea (%rcx,%rsi),%rbp
|
||||
# (uint32) a <<<= 18
|
||||
rol $18,%ebp
|
||||
# x0 ^= a
|
||||
xor %rbp,%rdx
|
||||
# b = x7 + x4
|
||||
lea (%r8,%r9),%rbp
|
||||
# (uint32) b <<<= 18
|
||||
rol $18,%ebp
|
||||
# x5 ^= b
|
||||
xor %rbp,%r15
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%rbp
|
||||
# x5_stack = x5
|
||||
movq %r15,160(%rsp)
|
||||
# c = x9 + x10
|
||||
lea (%r10,%rbp),%r15
|
||||
# (uint32) c <<<= 7
|
||||
rol $7,%r15d
|
||||
# x11 ^= c
|
||||
xor %r15,%r12
|
||||
# c = x10 + x11
|
||||
lea (%rbp,%r12),%r15
|
||||
# (uint32) c <<<= 9
|
||||
rol $9,%r15d
|
||||
# x8 ^= c
|
||||
xor %r15,%r11
|
||||
# c = x11 + x8
|
||||
lea (%r12,%r11),%r15
|
||||
# (uint32) c <<<= 13
|
||||
rol $13,%r15d
|
||||
# x9 ^= c
|
||||
xor %r15,%r10
|
||||
# c = x8 + x9
|
||||
lea (%r11,%r10),%r15
|
||||
# (uint32) c <<<= 18
|
||||
rol $18,%r15d
|
||||
# x10 ^= c
|
||||
xor %r15,%rbp
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%r15
|
||||
# x10_stack = x10
|
||||
movq %rbp,168(%rsp)
|
||||
# d = x14 + x15
|
||||
lea (%rbx,%r15),%rbp
|
||||
# (uint32) d <<<= 7
|
||||
rol $7,%ebp
|
||||
# x12 ^= d
|
||||
xor %rbp,%r14
|
||||
# d = x15 + x12
|
||||
lea (%r15,%r14),%rbp
|
||||
# (uint32) d <<<= 9
|
||||
rol $9,%ebp
|
||||
# x13 ^= d
|
||||
xor %rbp,%r13
|
||||
# d = x12 + x13
|
||||
lea (%r14,%r13),%rbp
|
||||
# (uint32) d <<<= 13
|
||||
rol $13,%ebp
|
||||
# x14 ^= d
|
||||
xor %rbp,%rbx
|
||||
# d = x13 + x14
|
||||
lea (%r13,%rbx),%rbp
|
||||
# (uint32) d <<<= 18
|
||||
rol $18,%ebp
|
||||
# x15 ^= d
|
||||
xor %rbp,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%r15
|
||||
# a = x12 + x0
|
||||
lea (%r14,%rdx),%rbp
|
||||
# (uint32) a <<<= 7
|
||||
rol $7,%ebp
|
||||
# x4 ^= a
|
||||
xor %rbp,%r9
|
||||
# b = x1 + x5
|
||||
lea (%rdi,%r15),%rbp
|
||||
# (uint32) b <<<= 7
|
||||
rol $7,%ebp
|
||||
# x9 ^= b
|
||||
xor %rbp,%r10
|
||||
# a = x0 + x4
|
||||
lea (%rdx,%r9),%rbp
|
||||
# (uint32) a <<<= 9
|
||||
rol $9,%ebp
|
||||
# x8 ^= a
|
||||
xor %rbp,%r11
|
||||
# b = x5 + x9
|
||||
lea (%r15,%r10),%rbp
|
||||
# (uint32) b <<<= 9
|
||||
rol $9,%ebp
|
||||
# x13 ^= b
|
||||
xor %rbp,%r13
|
||||
# a = x4 + x8
|
||||
lea (%r9,%r11),%rbp
|
||||
# (uint32) a <<<= 13
|
||||
rol $13,%ebp
|
||||
# x12 ^= a
|
||||
xor %rbp,%r14
|
||||
# b = x9 + x13
|
||||
lea (%r10,%r13),%rbp
|
||||
# (uint32) b <<<= 13
|
||||
rol $13,%ebp
|
||||
# x1 ^= b
|
||||
xor %rbp,%rdi
|
||||
# a = x8 + x12
|
||||
lea (%r11,%r14),%rbp
|
||||
# (uint32) a <<<= 18
|
||||
rol $18,%ebp
|
||||
# x0 ^= a
|
||||
xor %rbp,%rdx
|
||||
# b = x13 + x1
|
||||
lea (%r13,%rdi),%rbp
|
||||
# (uint32) b <<<= 18
|
||||
rol $18,%ebp
|
||||
# x5 ^= b
|
||||
xor %rbp,%r15
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%rbp
|
||||
# x5_stack = x5
|
||||
movq %r15,160(%rsp)
|
||||
# c = x6 + x10
|
||||
lea (%rax,%rbp),%r15
|
||||
# (uint32) c <<<= 7
|
||||
rol $7,%r15d
|
||||
# x14 ^= c
|
||||
xor %r15,%rbx
|
||||
# c = x10 + x14
|
||||
lea (%rbp,%rbx),%r15
|
||||
# (uint32) c <<<= 9
|
||||
rol $9,%r15d
|
||||
# x2 ^= c
|
||||
xor %r15,%rcx
|
||||
# c = x14 + x2
|
||||
lea (%rbx,%rcx),%r15
|
||||
# (uint32) c <<<= 13
|
||||
rol $13,%r15d
|
||||
# x6 ^= c
|
||||
xor %r15,%rax
|
||||
# c = x2 + x6
|
||||
lea (%rcx,%rax),%r15
|
||||
# (uint32) c <<<= 18
|
||||
rol $18,%r15d
|
||||
# x10 ^= c
|
||||
xor %r15,%rbp
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%r15
|
||||
# x10_stack = x10
|
||||
movq %rbp,168(%rsp)
|
||||
# d = x11 + x15
|
||||
lea (%r12,%r15),%rbp
|
||||
# (uint32) d <<<= 7
|
||||
rol $7,%ebp
|
||||
# x3 ^= d
|
||||
xor %rbp,%rsi
|
||||
# d = x15 + x3
|
||||
lea (%r15,%rsi),%rbp
|
||||
# (uint32) d <<<= 9
|
||||
rol $9,%ebp
|
||||
# x7 ^= d
|
||||
xor %rbp,%r8
|
||||
# d = x3 + x7
|
||||
lea (%rsi,%r8),%rbp
|
||||
# (uint32) d <<<= 13
|
||||
rol $13,%ebp
|
||||
# x11 ^= d
|
||||
xor %rbp,%r12
|
||||
# d = x7 + x11
|
||||
lea (%r8,%r12),%rbp
|
||||
# (uint32) d <<<= 18
|
||||
rol $18,%ebp
|
||||
# x15 ^= d
|
||||
xor %rbp,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%r15
|
||||
# a = x3 + x0
|
||||
lea (%rsi,%rdx),%rbp
|
||||
# (uint32) a <<<= 7
|
||||
rol $7,%ebp
|
||||
# x1 ^= a
|
||||
xor %rbp,%rdi
|
||||
# b = x4 + x5
|
||||
lea (%r9,%r15),%rbp
|
||||
# (uint32) b <<<= 7
|
||||
rol $7,%ebp
|
||||
# x6 ^= b
|
||||
xor %rbp,%rax
|
||||
# a = x0 + x1
|
||||
lea (%rdx,%rdi),%rbp
|
||||
# (uint32) a <<<= 9
|
||||
rol $9,%ebp
|
||||
# x2 ^= a
|
||||
xor %rbp,%rcx
|
||||
# b = x5 + x6
|
||||
lea (%r15,%rax),%rbp
|
||||
# (uint32) b <<<= 9
|
||||
rol $9,%ebp
|
||||
# x7 ^= b
|
||||
xor %rbp,%r8
|
||||
# a = x1 + x2
|
||||
lea (%rdi,%rcx),%rbp
|
||||
# (uint32) a <<<= 13
|
||||
rol $13,%ebp
|
||||
# x3 ^= a
|
||||
xor %rbp,%rsi
|
||||
# b = x6 + x7
|
||||
lea (%rax,%r8),%rbp
|
||||
# (uint32) b <<<= 13
|
||||
rol $13,%ebp
|
||||
# x4 ^= b
|
||||
xor %rbp,%r9
|
||||
# a = x2 + x3
|
||||
lea (%rcx,%rsi),%rbp
|
||||
# (uint32) a <<<= 18
|
||||
rol $18,%ebp
|
||||
# x0 ^= a
|
||||
xor %rbp,%rdx
|
||||
# b = x7 + x4
|
||||
lea (%r8,%r9),%rbp
|
||||
# (uint32) b <<<= 18
|
||||
rol $18,%ebp
|
||||
# x5 ^= b
|
||||
xor %rbp,%r15
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%rbp
|
||||
# x5_stack = x5
|
||||
movq %r15,160(%rsp)
|
||||
# c = x9 + x10
|
||||
lea (%r10,%rbp),%r15
|
||||
# (uint32) c <<<= 7
|
||||
rol $7,%r15d
|
||||
# x11 ^= c
|
||||
xor %r15,%r12
|
||||
# c = x10 + x11
|
||||
lea (%rbp,%r12),%r15
|
||||
# (uint32) c <<<= 9
|
||||
rol $9,%r15d
|
||||
# x8 ^= c
|
||||
xor %r15,%r11
|
||||
# c = x11 + x8
|
||||
lea (%r12,%r11),%r15
|
||||
# (uint32) c <<<= 13
|
||||
rol $13,%r15d
|
||||
# x9 ^= c
|
||||
xor %r15,%r10
|
||||
# c = x8 + x9
|
||||
lea (%r11,%r10),%r15
|
||||
# (uint32) c <<<= 18
|
||||
rol $18,%r15d
|
||||
# x10 ^= c
|
||||
xor %r15,%rbp
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%r15
|
||||
# x10_stack = x10
|
||||
movq %rbp,168(%rsp)
|
||||
# d = x14 + x15
|
||||
lea (%rbx,%r15),%rbp
|
||||
# (uint32) d <<<= 7
|
||||
rol $7,%ebp
|
||||
# x12 ^= d
|
||||
xor %rbp,%r14
|
||||
# d = x15 + x12
|
||||
lea (%r15,%r14),%rbp
|
||||
# (uint32) d <<<= 9
|
||||
rol $9,%ebp
|
||||
# x13 ^= d
|
||||
xor %rbp,%r13
|
||||
# d = x12 + x13
|
||||
lea (%r14,%r13),%rbp
|
||||
# (uint32) d <<<= 13
|
||||
rol $13,%ebp
|
||||
# x14 ^= d
|
||||
xor %rbp,%rbx
|
||||
# d = x13 + x14
|
||||
lea (%r13,%rbx),%rbp
|
||||
# (uint32) d <<<= 18
|
||||
rol $18,%ebp
|
||||
# x15 ^= d
|
||||
xor %rbp,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# i = i_backup
|
||||
movq 184(%rsp),%r15
|
||||
# unsigned>? i -= 4
|
||||
sub $4,%r15
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto mainloop if unsigned>
|
||||
ja ._mainloop
|
||||
# (uint32) x2 += j2
|
||||
addl 64(%rsp),%ecx
|
||||
# x3 <<= 32
|
||||
shl $32,%rsi
|
||||
# x3 += j2
|
||||
addq 64(%rsp),%rsi
|
||||
# (uint64) x3 >>= 32
|
||||
shr $32,%rsi
|
||||
# x3 <<= 32
|
||||
shl $32,%rsi
|
||||
# x2 += x3
|
||||
add %rsi,%rcx
|
||||
# (uint32) x6 += j6
|
||||
addl 80(%rsp),%eax
|
||||
# x7 <<= 32
|
||||
shl $32,%r8
|
||||
# x7 += j6
|
||||
addq 80(%rsp),%r8
|
||||
# (uint64) x7 >>= 32
|
||||
shr $32,%r8
|
||||
# x7 <<= 32
|
||||
shl $32,%r8
|
||||
# x6 += x7
|
||||
add %r8,%rax
|
||||
# (uint32) x8 += j8
|
||||
addl 88(%rsp),%r11d
|
||||
# x9 <<= 32
|
||||
shl $32,%r10
|
||||
# x9 += j8
|
||||
addq 88(%rsp),%r10
|
||||
# (uint64) x9 >>= 32
|
||||
shr $32,%r10
|
||||
# x9 <<= 32
|
||||
shl $32,%r10
|
||||
# x8 += x9
|
||||
add %r10,%r11
|
||||
# (uint32) x12 += j12
|
||||
addl 104(%rsp),%r14d
|
||||
# x13 <<= 32
|
||||
shl $32,%r13
|
||||
# x13 += j12
|
||||
addq 104(%rsp),%r13
|
||||
# (uint64) x13 >>= 32
|
||||
shr $32,%r13
|
||||
# x13 <<= 32
|
||||
shl $32,%r13
|
||||
# x12 += x13
|
||||
add %r13,%r14
|
||||
# (uint32) x0 += j0
|
||||
addl 56(%rsp),%edx
|
||||
# x1 <<= 32
|
||||
shl $32,%rdi
|
||||
# x1 += j0
|
||||
addq 56(%rsp),%rdi
|
||||
# (uint64) x1 >>= 32
|
||||
shr $32,%rdi
|
||||
# x1 <<= 32
|
||||
shl $32,%rdi
|
||||
# x0 += x1
|
||||
add %rdi,%rdx
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%rdi
|
||||
# (uint32) x4 += j4
|
||||
addl 72(%rsp),%r9d
|
||||
# x5 <<= 32
|
||||
shl $32,%rdi
|
||||
# x5 += j4
|
||||
addq 72(%rsp),%rdi
|
||||
# (uint64) x5 >>= 32
|
||||
shr $32,%rdi
|
||||
# x5 <<= 32
|
||||
shl $32,%rdi
|
||||
# x4 += x5
|
||||
add %rdi,%r9
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%r8
|
||||
# (uint32) x10 += j10
|
||||
addl 96(%rsp),%r8d
|
||||
# x11 <<= 32
|
||||
shl $32,%r12
|
||||
# x11 += j10
|
||||
addq 96(%rsp),%r12
|
||||
# (uint64) x11 >>= 32
|
||||
shr $32,%r12
|
||||
# x11 <<= 32
|
||||
shl $32,%r12
|
||||
# x10 += x11
|
||||
add %r12,%r8
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%rdi
|
||||
# (uint32) x14 += j14
|
||||
addl 112(%rsp),%ebx
|
||||
# x15 <<= 32
|
||||
shl $32,%rdi
|
||||
# x15 += j14
|
||||
addq 112(%rsp),%rdi
|
||||
# (uint64) x15 >>= 32
|
||||
shr $32,%rdi
|
||||
# x15 <<= 32
|
||||
shl $32,%rdi
|
||||
# x14 += x15
|
||||
add %rdi,%rbx
|
||||
# out = out_backup
|
||||
movq 136(%rsp),%rdi
|
||||
# m = m_backup
|
||||
movq 144(%rsp),%rsi
|
||||
# x0 ^= *(uint64 *) (m + 0)
|
||||
xorq 0(%rsi),%rdx
|
||||
# *(uint64 *) (out + 0) = x0
|
||||
movq %rdx,0(%rdi)
|
||||
# x2 ^= *(uint64 *) (m + 8)
|
||||
xorq 8(%rsi),%rcx
|
||||
# *(uint64 *) (out + 8) = x2
|
||||
movq %rcx,8(%rdi)
|
||||
# x4 ^= *(uint64 *) (m + 16)
|
||||
xorq 16(%rsi),%r9
|
||||
# *(uint64 *) (out + 16) = x4
|
||||
movq %r9,16(%rdi)
|
||||
# x6 ^= *(uint64 *) (m + 24)
|
||||
xorq 24(%rsi),%rax
|
||||
# *(uint64 *) (out + 24) = x6
|
||||
movq %rax,24(%rdi)
|
||||
# x8 ^= *(uint64 *) (m + 32)
|
||||
xorq 32(%rsi),%r11
|
||||
# *(uint64 *) (out + 32) = x8
|
||||
movq %r11,32(%rdi)
|
||||
# x10 ^= *(uint64 *) (m + 40)
|
||||
xorq 40(%rsi),%r8
|
||||
# *(uint64 *) (out + 40) = x10
|
||||
movq %r8,40(%rdi)
|
||||
# x12 ^= *(uint64 *) (m + 48)
|
||||
xorq 48(%rsi),%r14
|
||||
# *(uint64 *) (out + 48) = x12
|
||||
movq %r14,48(%rdi)
|
||||
# x14 ^= *(uint64 *) (m + 56)
|
||||
xorq 56(%rsi),%rbx
|
||||
# *(uint64 *) (out + 56) = x14
|
||||
movq %rbx,56(%rdi)
|
||||
# bytes = bytes_backup
|
||||
movq 152(%rsp),%rdx
|
||||
# in8 = j8
|
||||
movq 88(%rsp),%rcx
|
||||
# in8 += 1
|
||||
add $1,%rcx
|
||||
# j8 = in8
|
||||
movq %rcx,88(%rsp)
|
||||
# unsigned>? unsigned<? bytes - 64
|
||||
cmp $64,%rdx
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto bytesatleast65 if unsigned>
|
||||
ja ._bytesatleast65
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto bytesatleast64 if !unsigned<
|
||||
jae ._bytesatleast64
|
||||
# m = out
|
||||
mov %rdi,%rsi
|
||||
# out = ctarget
|
||||
movq 128(%rsp),%rdi
|
||||
# i = bytes
|
||||
mov %rdx,%rcx
|
||||
# while (i) { *out++ = *m++; --i }
|
||||
rep movsb
|
||||
# comment:fp stack unchanged by fallthrough
|
||||
# bytesatleast64:
|
||||
._bytesatleast64:
|
||||
# x = x_backup
|
||||
movq 120(%rsp),%rdi
|
||||
# in8 = j8
|
||||
movq 88(%rsp),%rsi
|
||||
# *(uint64 *) (x + 32) = in8
|
||||
movq %rsi,32(%rdi)
|
||||
# r11 = r11_stack
|
||||
movq 0(%rsp),%r11
|
||||
# r12 = r12_stack
|
||||
movq 8(%rsp),%r12
|
||||
# r13 = r13_stack
|
||||
movq 16(%rsp),%r13
|
||||
# r14 = r14_stack
|
||||
movq 24(%rsp),%r14
|
||||
# r15 = r15_stack
|
||||
movq 32(%rsp),%r15
|
||||
# rbx = rbx_stack
|
||||
movq 40(%rsp),%rbx
|
||||
# rbp = rbp_stack
|
||||
movq 48(%rsp),%rbp
|
||||
# comment:fp stack unchanged by fallthrough
|
||||
# done:
|
||||
._done:
|
||||
# leave
|
||||
add %r11,%rsp
|
||||
mov %rdi,%rax
|
||||
mov %rsi,%rdx
|
||||
ret
|
||||
# bytesatleast65:
|
||||
._bytesatleast65:
|
||||
# bytes -= 64
|
||||
sub $64,%rdx
|
||||
# out += 64
|
||||
add $64,%rdi
|
||||
# m += 64
|
||||
add $64,%rsi
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto bytesatleast1
|
||||
jmp ._bytesatleast1
|
||||
# enter ECRYPT_keysetup
|
||||
.text
|
||||
.p2align 5
|
||||
.globl ECRYPT_keysetup
|
||||
ECRYPT_keysetup:
|
||||
mov %rsp,%r11
|
||||
and $31,%r11
|
||||
add $256,%r11
|
||||
sub %r11,%rsp
|
||||
# k = arg2
|
||||
mov %rsi,%rsi
|
||||
# kbits = arg3
|
||||
mov %rdx,%rdx
|
||||
# x = arg1
|
||||
mov %rdi,%rdi
|
||||
# in0 = *(uint64 *) (k + 0)
|
||||
movq 0(%rsi),%r8
|
||||
# in2 = *(uint64 *) (k + 8)
|
||||
movq 8(%rsi),%r9
|
||||
# *(uint64 *) (x + 4) = in0
|
||||
movq %r8,4(%rdi)
|
||||
# *(uint64 *) (x + 12) = in2
|
||||
movq %r9,12(%rdi)
|
||||
# unsigned<? kbits - 256
|
||||
cmp $256,%rdx
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto kbits128 if unsigned<
|
||||
jb ._kbits128
|
||||
# kbits256:
|
||||
._kbits256:
|
||||
# in10 = *(uint64 *) (k + 16)
|
||||
movq 16(%rsi),%rdx
|
||||
# in12 = *(uint64 *) (k + 24)
|
||||
movq 24(%rsi),%rsi
|
||||
# *(uint64 *) (x + 44) = in10
|
||||
movq %rdx,44(%rdi)
|
||||
# *(uint64 *) (x + 52) = in12
|
||||
movq %rsi,52(%rdi)
|
||||
# in0 = 1634760805
|
||||
mov $1634760805,%rsi
|
||||
# in4 = 857760878
|
||||
mov $857760878,%rdx
|
||||
# in10 = 2036477234
|
||||
mov $2036477234,%rcx
|
||||
# in14 = 1797285236
|
||||
mov $1797285236,%r8
|
||||
# *(uint32 *) (x + 0) = in0
|
||||
movl %esi,0(%rdi)
|
||||
# *(uint32 *) (x + 20) = in4
|
||||
movl %edx,20(%rdi)
|
||||
# *(uint32 *) (x + 40) = in10
|
||||
movl %ecx,40(%rdi)
|
||||
# *(uint32 *) (x + 60) = in14
|
||||
movl %r8d,60(%rdi)
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto keysetupdone
|
||||
jmp ._keysetupdone
|
||||
# kbits128:
|
||||
._kbits128:
|
||||
# in10 = *(uint64 *) (k + 0)
|
||||
movq 0(%rsi),%rdx
|
||||
# in12 = *(uint64 *) (k + 8)
|
||||
movq 8(%rsi),%rsi
|
||||
# *(uint64 *) (x + 44) = in10
|
||||
movq %rdx,44(%rdi)
|
||||
# *(uint64 *) (x + 52) = in12
|
||||
movq %rsi,52(%rdi)
|
||||
# in0 = 1634760805
|
||||
mov $1634760805,%rsi
|
||||
# in4 = 824206446
|
||||
mov $824206446,%rdx
|
||||
# in10 = 2036477238
|
||||
mov $2036477238,%rcx
|
||||
# in14 = 1797285236
|
||||
mov $1797285236,%r8
|
||||
# *(uint32 *) (x + 0) = in0
|
||||
movl %esi,0(%rdi)
|
||||
# *(uint32 *) (x + 20) = in4
|
||||
movl %edx,20(%rdi)
|
||||
# *(uint32 *) (x + 40) = in10
|
||||
movl %ecx,40(%rdi)
|
||||
# *(uint32 *) (x + 60) = in14
|
||||
movl %r8d,60(%rdi)
|
||||
# keysetupdone:
|
||||
._keysetupdone:
|
||||
# leave
|
||||
add %r11,%rsp
|
||||
mov %rdi,%rax
|
||||
mov %rsi,%rdx
|
||||
ret
|
||||
# enter ECRYPT_ivsetup
|
||||
.text
|
||||
.p2align 5
|
||||
.globl ECRYPT_ivsetup
|
||||
ECRYPT_ivsetup:
|
||||
mov %rsp,%r11
|
||||
and $31,%r11
|
||||
add $256,%r11
|
||||
sub %r11,%rsp
|
||||
# iv = arg2
|
||||
mov %rsi,%rsi
|
||||
# x = arg1
|
||||
mov %rdi,%rdi
|
||||
# in6 = *(uint64 *) (iv + 0)
|
||||
movq 0(%rsi),%rsi
|
||||
# in8 = 0
|
||||
mov $0,%r8
|
||||
# *(uint64 *) (x + 24) = in6
|
||||
movq %rsi,24(%rdi)
|
||||
# *(uint64 *) (x + 32) = in8
|
||||
movq %r8,32(%rdi)
|
||||
# leave
|
||||
add %r11,%rsp
|
||||
mov %rdi,%rax
|
||||
mov %rsi,%rdx
|
||||
ret
|
129
arch/x86/crypto/salsa20_glue.c
Normal file
129
arch/x86/crypto/salsa20_glue.c
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
* Glue code for optimized assembly version of Salsa20.
|
||||
*
|
||||
* Copyright (c) 2007 Tan Swee Heng <thesweeheng@gmail.com>
|
||||
*
|
||||
* The assembly codes are public domain assembly codes written by Daniel. J.
|
||||
* Bernstein <djb@cr.yp.to>. The codes are modified to include indentation
|
||||
* and to remove extraneous comments and functions that are not needed.
|
||||
* - i586 version, renamed as salsa20-i586-asm_32.S
|
||||
* available from <http://cr.yp.to/snuffle/salsa20/x86-pm/salsa20.s>
|
||||
* - x86-64 version, renamed as salsa20-x86_64-asm_64.S
|
||||
* available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/crypto.h>
|
||||
|
||||
#define SALSA20_IV_SIZE 8U
|
||||
#define SALSA20_MIN_KEY_SIZE 16U
|
||||
#define SALSA20_MAX_KEY_SIZE 32U
|
||||
|
||||
// use the ECRYPT_* function names
|
||||
#define salsa20_keysetup ECRYPT_keysetup
|
||||
#define salsa20_ivsetup ECRYPT_ivsetup
|
||||
#define salsa20_encrypt_bytes ECRYPT_encrypt_bytes
|
||||
|
||||
struct salsa20_ctx
|
||||
{
|
||||
u32 input[16];
|
||||
};
|
||||
|
||||
asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k,
|
||||
u32 keysize, u32 ivsize);
|
||||
asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv);
|
||||
asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx,
|
||||
const u8 *src, u8 *dst, u32 bytes);
|
||||
|
||||
static int setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keysize)
|
||||
{
|
||||
struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, 64);
|
||||
|
||||
salsa20_ivsetup(ctx, walk.iv);
|
||||
|
||||
if (likely(walk.nbytes == nbytes))
|
||||
{
|
||||
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
|
||||
walk.dst.virt.addr, nbytes);
|
||||
return blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
while (walk.nbytes >= 64) {
|
||||
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
walk.nbytes - (walk.nbytes % 64));
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
|
||||
walk.dst.virt.addr, walk.nbytes);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "salsa20",
|
||||
.cra_driver_name = "salsa20-asm",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct salsa20_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.setkey = setkey,
|
||||
.encrypt = encrypt,
|
||||
.decrypt = encrypt,
|
||||
.min_keysize = SALSA20_MIN_KEY_SIZE,
|
||||
.max_keysize = SALSA20_MAX_KEY_SIZE,
|
||||
.ivsize = SALSA20_IV_SIZE,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
|
||||
MODULE_ALIAS("salsa20");
|
||||
MODULE_ALIAS("salsa20-asm");
|
@ -1,97 +0,0 @@
|
||||
/*
|
||||
* Glue Code for optimized x86_64 assembler version of TWOFISH
|
||||
*
|
||||
* Originally Twofish for GPG
|
||||
* By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
|
||||
* 256-bit key length added March 20, 1999
|
||||
* Some modifications to reduce the text size by Werner Koch, April, 1998
|
||||
* Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
|
||||
* Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
|
||||
*
|
||||
* The original author has disclaimed all copyright interest in this
|
||||
* code and thus put it in the public domain. The subsequent authors
|
||||
* have put this under the GNU General Public License.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
|
||||
* USA
|
||||
*
|
||||
* This code is a "clean room" implementation, written from the paper
|
||||
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
|
||||
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
|
||||
* through http://www.counterpane.com/twofish.html
|
||||
*
|
||||
* For background information on multiplication in finite fields, used for
|
||||
* the matrix operations in the key schedule, see the book _Contemporary
|
||||
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
|
||||
* Third Edition.
|
||||
*/
|
||||
|
||||
#include <crypto/twofish.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
||||
asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
||||
|
||||
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
twofish_enc_blk(tfm, dst, src);
|
||||
}
|
||||
|
||||
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
twofish_dec_blk(tfm, dst, src);
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "twofish",
|
||||
.cra_driver_name = "twofish-x86_64",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = TF_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = TF_MAX_KEY_SIZE,
|
||||
.cia_setkey = twofish_setkey,
|
||||
.cia_encrypt = twofish_encrypt,
|
||||
.cia_decrypt = twofish_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, x86_64 asm optimized");
|
||||
MODULE_ALIAS("twofish");
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Glue Code for optimized 586 assembler version of TWOFISH
|
||||
* Glue Code for assembler optimized version of TWOFISH
|
||||
*
|
||||
* Originally Twofish for GPG
|
||||
* By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
|
||||
@ -44,7 +44,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
||||
asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
||||
asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
|
||||
|
||||
@ -60,7 +59,7 @@ static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "twofish",
|
||||
.cra_driver_name = "twofish-i586",
|
||||
.cra_driver_name = "twofish-asm",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
@ -93,5 +92,6 @@ module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, i586 asm optimized");
|
||||
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
|
||||
MODULE_ALIAS("twofish");
|
||||
MODULE_ALIAS("twofish-asm");
|
@ -24,10 +24,6 @@ config CRYPTO_ALGAPI
|
||||
help
|
||||
This option provides the API for cryptographic algorithms.
|
||||
|
||||
config CRYPTO_ABLKCIPHER
|
||||
tristate
|
||||
select CRYPTO_BLKCIPHER
|
||||
|
||||
config CRYPTO_AEAD
|
||||
tristate
|
||||
select CRYPTO_ALGAPI
|
||||
@ -36,6 +32,15 @@ config CRYPTO_BLKCIPHER
|
||||
tristate
|
||||
select CRYPTO_ALGAPI
|
||||
|
||||
config CRYPTO_SEQIV
|
||||
tristate "Sequence Number IV Generator"
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
This IV generator generates an IV based on a sequence number by
|
||||
xoring it with a salt. This algorithm is mainly useful for CTR
|
||||
and similar modes.
|
||||
|
||||
config CRYPTO_HASH
|
||||
tristate
|
||||
select CRYPTO_ALGAPI
|
||||
@ -91,7 +96,7 @@ config CRYPTO_SHA1
|
||||
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
|
||||
|
||||
config CRYPTO_SHA256
|
||||
tristate "SHA256 digest algorithm"
|
||||
tristate "SHA224 and SHA256 digest algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
help
|
||||
SHA256 secure hash standard (DFIPS 180-2).
|
||||
@ -99,6 +104,9 @@ config CRYPTO_SHA256
|
||||
This version of SHA implements a 256 bit hash with 128 bits of
|
||||
security against collision attacks.
|
||||
|
||||
This code also includes SHA-224, a 224 bit hash with 112 bits
|
||||
of security against collision attacks.
|
||||
|
||||
config CRYPTO_SHA512
|
||||
tristate "SHA384 and SHA512 digest algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
@ -195,9 +203,34 @@ config CRYPTO_XTS
|
||||
key size 256, 384 or 512 bits. This implementation currently
|
||||
can't handle a sectorsize which is not a multiple of 16 bytes.
|
||||
|
||||
config CRYPTO_CTR
|
||||
tristate "CTR support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SEQIV
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
CTR: Counter mode
|
||||
This block cipher algorithm is required for IPSec.
|
||||
|
||||
config CRYPTO_GCM
|
||||
tristate "GCM/GMAC support"
|
||||
select CRYPTO_CTR
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_GF128MUL
|
||||
help
|
||||
Support for Galois/Counter Mode (GCM) and Galois Message
|
||||
Authentication Code (GMAC). Required for IPSec.
|
||||
|
||||
config CRYPTO_CCM
|
||||
tristate "CCM support"
|
||||
select CRYPTO_CTR
|
||||
select CRYPTO_AEAD
|
||||
help
|
||||
Support for Counter with CBC MAC. Required for IPsec.
|
||||
|
||||
config CRYPTO_CRYPTD
|
||||
tristate "Software async crypto daemon"
|
||||
select CRYPTO_ABLKCIPHER
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
This is a generic software asynchronous crypto daemon that
|
||||
@ -320,6 +353,7 @@ config CRYPTO_AES_586
|
||||
tristate "AES cipher algorithms (i586)"
|
||||
depends on (X86 || UML_X86) && !64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_AES
|
||||
help
|
||||
AES cipher algorithms (FIPS-197). AES uses the Rijndael
|
||||
algorithm.
|
||||
@ -341,6 +375,7 @@ config CRYPTO_AES_X86_64
|
||||
tristate "AES cipher algorithms (x86_64)"
|
||||
depends on (X86 || UML_X86) && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_AES
|
||||
help
|
||||
AES cipher algorithms (FIPS-197). AES uses the Rijndael
|
||||
algorithm.
|
||||
@ -441,6 +476,46 @@ config CRYPTO_SEED
|
||||
See also:
|
||||
<http://www.kisa.or.kr/kisa/seed/jsp/seed_eng.jsp>
|
||||
|
||||
config CRYPTO_SALSA20
|
||||
tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
Salsa20 stream cipher algorithm.
|
||||
|
||||
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
|
||||
Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
|
||||
|
||||
The Salsa20 stream cipher algorithm is designed by Daniel J.
|
||||
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
|
||||
|
||||
config CRYPTO_SALSA20_586
|
||||
tristate "Salsa20 stream cipher algorithm (i586) (EXPERIMENTAL)"
|
||||
depends on (X86 || UML_X86) && !64BIT
|
||||
depends on EXPERIMENTAL
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
Salsa20 stream cipher algorithm.
|
||||
|
||||
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
|
||||
Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
|
||||
|
||||
The Salsa20 stream cipher algorithm is designed by Daniel J.
|
||||
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
|
||||
|
||||
config CRYPTO_SALSA20_X86_64
|
||||
tristate "Salsa20 stream cipher algorithm (x86_64) (EXPERIMENTAL)"
|
||||
depends on (X86 || UML_X86) && 64BIT
|
||||
depends on EXPERIMENTAL
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
Salsa20 stream cipher algorithm.
|
||||
|
||||
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
|
||||
Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
|
||||
|
||||
The Salsa20 stream cipher algorithm is designed by Daniel J.
|
||||
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
|
||||
|
||||
config CRYPTO_DEFLATE
|
||||
tristate "Deflate compression algorithm"
|
||||
@ -491,6 +566,7 @@ config CRYPTO_TEST
|
||||
tristate "Testing module"
|
||||
depends on m
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_AEAD
|
||||
help
|
||||
Quick & dirty crypto test module.
|
||||
|
||||
@ -498,10 +574,19 @@ config CRYPTO_AUTHENC
|
||||
tristate "Authenc support"
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Authenc: Combined mode wrapper for IPsec.
|
||||
This is required for IPSec.
|
||||
|
||||
config CRYPTO_LZO
|
||||
tristate "LZO compression algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
select LZO_COMPRESS
|
||||
select LZO_DECOMPRESS
|
||||
help
|
||||
This is the LZO algorithm.
|
||||
|
||||
source "drivers/crypto/Kconfig"
|
||||
|
||||
endif # if CRYPTO
|
||||
|
@ -8,9 +8,14 @@ crypto_algapi-$(CONFIG_PROC_FS) += proc.o
|
||||
crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
|
||||
obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o
|
||||
obj-$(CONFIG_CRYPTO_AEAD) += aead.o
|
||||
obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o
|
||||
|
||||
crypto_blkcipher-objs := ablkcipher.o
|
||||
crypto_blkcipher-objs += blkcipher.o
|
||||
obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
|
||||
obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o
|
||||
obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o
|
||||
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
|
||||
|
||||
crypto_hash-objs := hash.o
|
||||
obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
|
||||
@ -32,6 +37,9 @@ obj-$(CONFIG_CRYPTO_CBC) += cbc.o
|
||||
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
|
||||
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
|
||||
obj-$(CONFIG_CRYPTO_XTS) += xts.o
|
||||
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
|
||||
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
|
||||
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
|
||||
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
|
||||
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
|
||||
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
|
||||
@ -48,10 +56,12 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
|
||||
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
|
||||
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
|
||||
obj-$(CONFIG_CRYPTO_SEED) += seed.o
|
||||
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
|
||||
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
|
||||
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
|
||||
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
|
||||
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
|
||||
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
|
||||
|
||||
|
@ -13,14 +13,18 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/errno.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@ -66,6 +70,16 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
|
||||
return alg->cra_ctxsize;
|
||||
}
|
||||
|
||||
int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
return crypto_ablkcipher_encrypt(&req->creq);
|
||||
}
|
||||
|
||||
int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
return crypto_ablkcipher_decrypt(&req->creq);
|
||||
}
|
||||
|
||||
static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
@ -78,6 +92,11 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
|
||||
crt->setkey = setkey;
|
||||
crt->encrypt = alg->encrypt;
|
||||
crt->decrypt = alg->decrypt;
|
||||
if (!alg->ivsize) {
|
||||
crt->givencrypt = skcipher_null_givencrypt;
|
||||
crt->givdecrypt = skcipher_null_givdecrypt;
|
||||
}
|
||||
crt->base = __crypto_ablkcipher_cast(tfm);
|
||||
crt->ivsize = alg->ivsize;
|
||||
|
||||
return 0;
|
||||
@ -90,10 +109,13 @@ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
|
||||
|
||||
seq_printf(m, "type : ablkcipher\n");
|
||||
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
||||
"yes" : "no");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
|
||||
seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
|
||||
seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
|
||||
seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_ablkcipher_type = {
|
||||
@ -105,5 +127,220 @@ const struct crypto_type crypto_ablkcipher_type = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
|
||||
|
||||
static int no_givdecrypt(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
|
||||
struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
|
||||
|
||||
if (alg->ivsize > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
|
||||
alg->setkey : setkey;
|
||||
crt->encrypt = alg->encrypt;
|
||||
crt->decrypt = alg->decrypt;
|
||||
crt->givencrypt = alg->givencrypt;
|
||||
crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
|
||||
crt->base = __crypto_ablkcipher_cast(tfm);
|
||||
crt->ivsize = alg->ivsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
__attribute__ ((unused));
|
||||
static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
{
|
||||
struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
|
||||
|
||||
seq_printf(m, "type : givcipher\n");
|
||||
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
||||
"yes" : "no");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
|
||||
seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
|
||||
seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
|
||||
seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_givcipher_type = {
|
||||
.ctxsize = crypto_ablkcipher_ctxsize,
|
||||
.init = crypto_init_givcipher_ops,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_givcipher_show,
|
||||
#endif
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_givcipher_type);
|
||||
|
||||
const char *crypto_default_geniv(const struct crypto_alg *alg)
|
||||
{
|
||||
return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv";
|
||||
}
|
||||
|
||||
static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
|
||||
{
|
||||
struct rtattr *tb[3];
|
||||
struct {
|
||||
struct rtattr attr;
|
||||
struct crypto_attr_type data;
|
||||
} ptype;
|
||||
struct {
|
||||
struct rtattr attr;
|
||||
struct crypto_attr_alg data;
|
||||
} palg;
|
||||
struct crypto_template *tmpl;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *larval;
|
||||
const char *geniv;
|
||||
int err;
|
||||
|
||||
larval = crypto_larval_lookup(alg->cra_driver_name,
|
||||
CRYPTO_ALG_TYPE_GIVCIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
err = PTR_ERR(larval);
|
||||
if (IS_ERR(larval))
|
||||
goto out;
|
||||
|
||||
err = -EAGAIN;
|
||||
if (!crypto_is_larval(larval))
|
||||
goto drop_larval;
|
||||
|
||||
ptype.attr.rta_len = sizeof(ptype);
|
||||
ptype.attr.rta_type = CRYPTOA_TYPE;
|
||||
ptype.data.type = type | CRYPTO_ALG_GENIV;
|
||||
/* GENIV tells the template that we're making a default geniv. */
|
||||
ptype.data.mask = mask | CRYPTO_ALG_GENIV;
|
||||
tb[0] = &ptype.attr;
|
||||
|
||||
palg.attr.rta_len = sizeof(palg);
|
||||
palg.attr.rta_type = CRYPTOA_ALG;
|
||||
/* Must use the exact name to locate ourselves. */
|
||||
memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
|
||||
tb[1] = &palg.attr;
|
||||
|
||||
tb[2] = NULL;
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_BLKCIPHER)
|
||||
geniv = alg->cra_blkcipher.geniv;
|
||||
else
|
||||
geniv = alg->cra_ablkcipher.geniv;
|
||||
|
||||
if (!geniv)
|
||||
geniv = crypto_default_geniv(alg);
|
||||
|
||||
tmpl = crypto_lookup_template(geniv);
|
||||
err = -ENOENT;
|
||||
if (!tmpl)
|
||||
goto kill_larval;
|
||||
|
||||
inst = tmpl->alloc(tb);
|
||||
err = PTR_ERR(inst);
|
||||
if (IS_ERR(inst))
|
||||
goto put_tmpl;
|
||||
|
||||
if ((err = crypto_register_instance(tmpl, inst))) {
|
||||
tmpl->free(inst);
|
||||
goto put_tmpl;
|
||||
}
|
||||
|
||||
/* Redo the lookup to use the instance we just registered. */
|
||||
err = -EAGAIN;
|
||||
|
||||
put_tmpl:
|
||||
crypto_tmpl_put(tmpl);
|
||||
kill_larval:
|
||||
crypto_larval_kill(larval);
|
||||
drop_larval:
|
||||
crypto_mod_put(larval);
|
||||
out:
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
|
||||
alg = crypto_alg_mod_lookup(name, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return alg;
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_GIVCIPHER)
|
||||
return alg;
|
||||
|
||||
if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
|
||||
alg->cra_ablkcipher.ivsize))
|
||||
return alg;
|
||||
|
||||
return ERR_PTR(crypto_givcipher_default(alg, type, mask));
|
||||
}
|
||||
|
||||
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
type = crypto_skcipher_type(type);
|
||||
mask = crypto_skcipher_mask(mask);
|
||||
|
||||
alg = crypto_lookup_skcipher(name, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return PTR_ERR(alg);
|
||||
|
||||
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
|
||||
|
||||
struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_tfm *tfm;
|
||||
int err;
|
||||
|
||||
type = crypto_skcipher_type(type);
|
||||
mask = crypto_skcipher_mask(mask);
|
||||
|
||||
for (;;) {
|
||||
struct crypto_alg *alg;
|
||||
|
||||
alg = crypto_lookup_skcipher(alg_name, type, mask);
|
||||
if (IS_ERR(alg)) {
|
||||
err = PTR_ERR(alg);
|
||||
goto err;
|
||||
}
|
||||
|
||||
tfm = __crypto_alloc_tfm(alg, type, mask);
|
||||
if (!IS_ERR(tfm))
|
||||
return __crypto_ablkcipher_cast(tfm);
|
||||
|
||||
crypto_mod_put(alg);
|
||||
err = PTR_ERR(tfm);
|
||||
|
||||
err:
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Asynchronous block chaining cipher type");
|
||||
|
400
crypto/aead.c
400
crypto/aead.c
@ -12,14 +12,17 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/errno.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@ -53,25 +56,54 @@ static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
|
||||
return aead->setkey(tfm, key, keylen);
|
||||
}
|
||||
|
||||
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
||||
{
|
||||
struct aead_tfm *crt = crypto_aead_crt(tfm);
|
||||
int err;
|
||||
|
||||
if (authsize > crypto_aead_alg(tfm)->maxauthsize)
|
||||
return -EINVAL;
|
||||
|
||||
if (crypto_aead_alg(tfm)->setauthsize) {
|
||||
err = crypto_aead_alg(tfm)->setauthsize(crt->base, authsize);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
crypto_aead_crt(crt->base)->authsize = authsize;
|
||||
crt->authsize = authsize;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
|
||||
|
||||
static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
return alg->cra_ctxsize;
|
||||
}
|
||||
|
||||
static int no_givcrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
||||
{
|
||||
struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
|
||||
struct aead_tfm *crt = &tfm->crt_aead;
|
||||
|
||||
if (max(alg->authsize, alg->ivsize) > PAGE_SIZE / 8)
|
||||
if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
crt->setkey = setkey;
|
||||
crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
|
||||
alg->setkey : setkey;
|
||||
crt->encrypt = alg->encrypt;
|
||||
crt->decrypt = alg->decrypt;
|
||||
crt->givencrypt = alg->givencrypt ?: no_givcrypt;
|
||||
crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
|
||||
crt->base = __crypto_aead_cast(tfm);
|
||||
crt->ivsize = alg->ivsize;
|
||||
crt->authsize = alg->authsize;
|
||||
crt->authsize = alg->maxauthsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -83,9 +115,12 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
struct aead_alg *aead = &alg->cra_aead;
|
||||
|
||||
seq_printf(m, "type : aead\n");
|
||||
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
||||
"yes" : "no");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "ivsize : %u\n", aead->ivsize);
|
||||
seq_printf(m, "authsize : %u\n", aead->authsize);
|
||||
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
|
||||
seq_printf(m, "geniv : %s\n", aead->geniv ?: "<built-in>");
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_aead_type = {
|
||||
@ -97,5 +132,358 @@ const struct crypto_type crypto_aead_type = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_aead_type);
|
||||
|
||||
static int aead_null_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
return crypto_aead_encrypt(&req->areq);
|
||||
}
|
||||
|
||||
static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
return crypto_aead_decrypt(&req->areq);
|
||||
}
|
||||
|
||||
static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
||||
{
|
||||
struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
|
||||
struct aead_tfm *crt = &tfm->crt_aead;
|
||||
|
||||
if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
crt->setkey = setkey;
|
||||
crt->encrypt = alg->encrypt;
|
||||
crt->decrypt = alg->decrypt;
|
||||
if (!alg->ivsize) {
|
||||
crt->givencrypt = aead_null_givencrypt;
|
||||
crt->givdecrypt = aead_null_givdecrypt;
|
||||
}
|
||||
crt->base = __crypto_aead_cast(tfm);
|
||||
crt->ivsize = alg->ivsize;
|
||||
crt->authsize = alg->maxauthsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
__attribute__ ((unused));
|
||||
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
{
|
||||
struct aead_alg *aead = &alg->cra_aead;
|
||||
|
||||
seq_printf(m, "type : nivaead\n");
|
||||
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
|
||||
"yes" : "no");
|
||||
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
|
||||
seq_printf(m, "ivsize : %u\n", aead->ivsize);
|
||||
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
|
||||
seq_printf(m, "geniv : %s\n", aead->geniv);
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_nivaead_type = {
|
||||
.ctxsize = crypto_aead_ctxsize,
|
||||
.init = crypto_init_nivaead_ops,
|
||||
#ifdef CONFIG_PROC_FS
|
||||
.show = crypto_nivaead_show,
|
||||
#endif
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_nivaead_type);
|
||||
|
||||
static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
|
||||
const char *name, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
type |= CRYPTO_ALG_TYPE_AEAD;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV;
|
||||
|
||||
alg = crypto_alg_mod_lookup(name, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return PTR_ERR(alg);
|
||||
|
||||
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
const char *name;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
|
||||
algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(name);
|
||||
if (IS_ERR(name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spawn = crypto_instance_ctx(inst);
|
||||
|
||||
/* Ignore async algorithms if necessary. */
|
||||
mask |= crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
crypto_set_aead_spawn(spawn, inst);
|
||||
err = crypto_grab_nivaead(spawn, name, type, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_aead_spawn_alg(spawn);
|
||||
|
||||
err = -EINVAL;
|
||||
if (!alg->cra_aead.ivsize)
|
||||
goto err_drop_alg;
|
||||
|
||||
/*
|
||||
* This is only true if we're constructing an algorithm with its
|
||||
* default IV generator. For the default generator we elide the
|
||||
* template name and double-check the IV generator.
|
||||
*/
|
||||
if (algt->mask & CRYPTO_ALG_GENIV) {
|
||||
if (strcmp(tmpl->name, alg->cra_aead.geniv))
|
||||
goto err_drop_alg;
|
||||
|
||||
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
|
||||
memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
|
||||
CRYPTO_MAX_ALG_NAME);
|
||||
} else {
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
}
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV;
|
||||
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = alg->cra_blocksize;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_aead_type;
|
||||
|
||||
inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
|
||||
inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
|
||||
inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
|
||||
|
||||
inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
|
||||
inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
|
||||
inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt;
|
||||
inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
err_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_alloc);
|
||||
|
||||
void aead_geniv_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_aead(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_free);
|
||||
|
||||
int aead_geniv_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_aead *aead;
|
||||
|
||||
aead = crypto_spawn_aead(crypto_instance_ctx(inst));
|
||||
if (IS_ERR(aead))
|
||||
return PTR_ERR(aead);
|
||||
|
||||
tfm->crt_aead.base = aead;
|
||||
tfm->crt_aead.reqsize += crypto_aead_reqsize(aead);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_init);
|
||||
|
||||
void aead_geniv_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
crypto_free_aead(tfm->crt_aead.base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(aead_geniv_exit);
|
||||
|
||||
static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
|
||||
{
|
||||
struct rtattr *tb[3];
|
||||
struct {
|
||||
struct rtattr attr;
|
||||
struct crypto_attr_type data;
|
||||
} ptype;
|
||||
struct {
|
||||
struct rtattr attr;
|
||||
struct crypto_attr_alg data;
|
||||
} palg;
|
||||
struct crypto_template *tmpl;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *larval;
|
||||
const char *geniv;
|
||||
int err;
|
||||
|
||||
larval = crypto_larval_lookup(alg->cra_driver_name,
|
||||
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
|
||||
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
err = PTR_ERR(larval);
|
||||
if (IS_ERR(larval))
|
||||
goto out;
|
||||
|
||||
err = -EAGAIN;
|
||||
if (!crypto_is_larval(larval))
|
||||
goto drop_larval;
|
||||
|
||||
ptype.attr.rta_len = sizeof(ptype);
|
||||
ptype.attr.rta_type = CRYPTOA_TYPE;
|
||||
ptype.data.type = type | CRYPTO_ALG_GENIV;
|
||||
/* GENIV tells the template that we're making a default geniv. */
|
||||
ptype.data.mask = mask | CRYPTO_ALG_GENIV;
|
||||
tb[0] = &ptype.attr;
|
||||
|
||||
palg.attr.rta_len = sizeof(palg);
|
||||
palg.attr.rta_type = CRYPTOA_ALG;
|
||||
/* Must use the exact name to locate ourselves. */
|
||||
memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
|
||||
tb[1] = &palg.attr;
|
||||
|
||||
tb[2] = NULL;
|
||||
|
||||
geniv = alg->cra_aead.geniv;
|
||||
|
||||
tmpl = crypto_lookup_template(geniv);
|
||||
err = -ENOENT;
|
||||
if (!tmpl)
|
||||
goto kill_larval;
|
||||
|
||||
inst = tmpl->alloc(tb);
|
||||
err = PTR_ERR(inst);
|
||||
if (IS_ERR(inst))
|
||||
goto put_tmpl;
|
||||
|
||||
if ((err = crypto_register_instance(tmpl, inst))) {
|
||||
tmpl->free(inst);
|
||||
goto put_tmpl;
|
||||
}
|
||||
|
||||
/* Redo the lookup to use the instance we just registered. */
|
||||
err = -EAGAIN;
|
||||
|
||||
put_tmpl:
|
||||
crypto_tmpl_put(tmpl);
|
||||
kill_larval:
|
||||
crypto_larval_kill(larval);
|
||||
drop_larval:
|
||||
crypto_mod_put(larval);
|
||||
out:
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg *crypto_lookup_aead(const char *name, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
|
||||
alg = crypto_alg_mod_lookup(name, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return alg;
|
||||
|
||||
if (alg->cra_type == &crypto_aead_type)
|
||||
return alg;
|
||||
|
||||
if (!alg->cra_aead.ivsize)
|
||||
return alg;
|
||||
|
||||
return ERR_PTR(crypto_nivaead_default(alg, type, mask));
|
||||
}
|
||||
|
||||
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
type |= CRYPTO_ALG_TYPE_AEAD;
|
||||
mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
alg = crypto_lookup_aead(name, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return PTR_ERR(alg);
|
||||
|
||||
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_grab_aead);
|
||||
|
||||
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_tfm *tfm;
|
||||
int err;
|
||||
|
||||
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
type |= CRYPTO_ALG_TYPE_AEAD;
|
||||
mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
for (;;) {
|
||||
struct crypto_alg *alg;
|
||||
|
||||
alg = crypto_lookup_aead(alg_name, type, mask);
|
||||
if (IS_ERR(alg)) {
|
||||
err = PTR_ERR(alg);
|
||||
goto err;
|
||||
}
|
||||
|
||||
tfm = __crypto_alloc_tfm(alg, type, mask);
|
||||
if (!IS_ERR(tfm))
|
||||
return __crypto_aead_cast(tfm);
|
||||
|
||||
crypto_mod_put(alg);
|
||||
err = PTR_ERR(tfm);
|
||||
|
||||
err:
|
||||
if (err != -EAGAIN)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
err = -EINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alloc_aead);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)");
|
||||
|
@ -47,11 +47,7 @@
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/* Some changes from the Gladman version:
|
||||
s/RIJNDAEL(e_key)/E_KEY/g
|
||||
s/RIJNDAEL(d_key)/D_KEY/g
|
||||
*/
|
||||
|
||||
#include <crypto/aes.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
@ -59,88 +55,46 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define AES_MIN_KEY_SIZE 16
|
||||
#define AES_MAX_KEY_SIZE 32
|
||||
|
||||
#define AES_BLOCK_SIZE 16
|
||||
|
||||
/*
|
||||
* #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
|
||||
*/
|
||||
static inline u8
|
||||
byte(const u32 x, const unsigned n)
|
||||
static inline u8 byte(const u32 x, const unsigned n)
|
||||
{
|
||||
return x >> (n << 3);
|
||||
}
|
||||
|
||||
struct aes_ctx {
|
||||
int key_length;
|
||||
u32 buf[120];
|
||||
};
|
||||
|
||||
#define E_KEY (&ctx->buf[0])
|
||||
#define D_KEY (&ctx->buf[60])
|
||||
|
||||
static u8 pow_tab[256] __initdata;
|
||||
static u8 log_tab[256] __initdata;
|
||||
static u8 sbx_tab[256] __initdata;
|
||||
static u8 isb_tab[256] __initdata;
|
||||
static u32 rco_tab[10];
|
||||
static u32 ft_tab[4][256];
|
||||
static u32 it_tab[4][256];
|
||||
|
||||
static u32 fl_tab[4][256];
|
||||
static u32 il_tab[4][256];
|
||||
u32 crypto_ft_tab[4][256];
|
||||
u32 crypto_fl_tab[4][256];
|
||||
u32 crypto_it_tab[4][256];
|
||||
u32 crypto_il_tab[4][256];
|
||||
|
||||
static inline u8 __init
|
||||
f_mult (u8 a, u8 b)
|
||||
EXPORT_SYMBOL_GPL(crypto_ft_tab);
|
||||
EXPORT_SYMBOL_GPL(crypto_fl_tab);
|
||||
EXPORT_SYMBOL_GPL(crypto_it_tab);
|
||||
EXPORT_SYMBOL_GPL(crypto_il_tab);
|
||||
|
||||
static inline u8 __init f_mult(u8 a, u8 b)
|
||||
{
|
||||
u8 aa = log_tab[a], cc = aa + log_tab[b];
|
||||
|
||||
return pow_tab[cc + (cc < aa ? 1 : 0)];
|
||||
}
|
||||
|
||||
#define ff_mult(a,b) (a && b ? f_mult(a, b) : 0)
|
||||
#define ff_mult(a, b) (a && b ? f_mult(a, b) : 0)
|
||||
|
||||
#define f_rn(bo, bi, n, k) \
|
||||
bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
|
||||
ft_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
|
||||
ft_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
|
||||
ft_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
|
||||
|
||||
#define i_rn(bo, bi, n, k) \
|
||||
bo[n] = it_tab[0][byte(bi[n],0)] ^ \
|
||||
it_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
|
||||
it_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
|
||||
it_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
|
||||
|
||||
#define ls_box(x) \
|
||||
( fl_tab[0][byte(x, 0)] ^ \
|
||||
fl_tab[1][byte(x, 1)] ^ \
|
||||
fl_tab[2][byte(x, 2)] ^ \
|
||||
fl_tab[3][byte(x, 3)] )
|
||||
|
||||
#define f_rl(bo, bi, n, k) \
|
||||
bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
|
||||
fl_tab[1][byte(bi[(n + 1) & 3],1)] ^ \
|
||||
fl_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
|
||||
fl_tab[3][byte(bi[(n + 3) & 3],3)] ^ *(k + n)
|
||||
|
||||
#define i_rl(bo, bi, n, k) \
|
||||
bo[n] = il_tab[0][byte(bi[n],0)] ^ \
|
||||
il_tab[1][byte(bi[(n + 3) & 3],1)] ^ \
|
||||
il_tab[2][byte(bi[(n + 2) & 3],2)] ^ \
|
||||
il_tab[3][byte(bi[(n + 1) & 3],3)] ^ *(k + n)
|
||||
|
||||
static void __init
|
||||
gen_tabs (void)
|
||||
static void __init gen_tabs(void)
|
||||
{
|
||||
u32 i, t;
|
||||
u8 p, q;
|
||||
|
||||
/* log and power tables for GF(2**8) finite field with
|
||||
0x011b as modular polynomial - the simplest primitive
|
||||
root is 0x03, used here to generate the tables */
|
||||
/*
|
||||
* log and power tables for GF(2**8) finite field with
|
||||
* 0x011b as modular polynomial - the simplest primitive
|
||||
* root is 0x03, used here to generate the tables
|
||||
*/
|
||||
|
||||
for (i = 0, p = 1; i < 256; ++i) {
|
||||
pow_tab[i] = (u8) p;
|
||||
@ -169,92 +123,119 @@ gen_tabs (void)
|
||||
p = sbx_tab[i];
|
||||
|
||||
t = p;
|
||||
fl_tab[0][i] = t;
|
||||
fl_tab[1][i] = rol32(t, 8);
|
||||
fl_tab[2][i] = rol32(t, 16);
|
||||
fl_tab[3][i] = rol32(t, 24);
|
||||
crypto_fl_tab[0][i] = t;
|
||||
crypto_fl_tab[1][i] = rol32(t, 8);
|
||||
crypto_fl_tab[2][i] = rol32(t, 16);
|
||||
crypto_fl_tab[3][i] = rol32(t, 24);
|
||||
|
||||
t = ((u32) ff_mult (2, p)) |
|
||||
t = ((u32) ff_mult(2, p)) |
|
||||
((u32) p << 8) |
|
||||
((u32) p << 16) | ((u32) ff_mult (3, p) << 24);
|
||||
((u32) p << 16) | ((u32) ff_mult(3, p) << 24);
|
||||
|
||||
ft_tab[0][i] = t;
|
||||
ft_tab[1][i] = rol32(t, 8);
|
||||
ft_tab[2][i] = rol32(t, 16);
|
||||
ft_tab[3][i] = rol32(t, 24);
|
||||
crypto_ft_tab[0][i] = t;
|
||||
crypto_ft_tab[1][i] = rol32(t, 8);
|
||||
crypto_ft_tab[2][i] = rol32(t, 16);
|
||||
crypto_ft_tab[3][i] = rol32(t, 24);
|
||||
|
||||
p = isb_tab[i];
|
||||
|
||||
t = p;
|
||||
il_tab[0][i] = t;
|
||||
il_tab[1][i] = rol32(t, 8);
|
||||
il_tab[2][i] = rol32(t, 16);
|
||||
il_tab[3][i] = rol32(t, 24);
|
||||
crypto_il_tab[0][i] = t;
|
||||
crypto_il_tab[1][i] = rol32(t, 8);
|
||||
crypto_il_tab[2][i] = rol32(t, 16);
|
||||
crypto_il_tab[3][i] = rol32(t, 24);
|
||||
|
||||
t = ((u32) ff_mult (14, p)) |
|
||||
((u32) ff_mult (9, p) << 8) |
|
||||
((u32) ff_mult (13, p) << 16) |
|
||||
((u32) ff_mult (11, p) << 24);
|
||||
t = ((u32) ff_mult(14, p)) |
|
||||
((u32) ff_mult(9, p) << 8) |
|
||||
((u32) ff_mult(13, p) << 16) |
|
||||
((u32) ff_mult(11, p) << 24);
|
||||
|
||||
it_tab[0][i] = t;
|
||||
it_tab[1][i] = rol32(t, 8);
|
||||
it_tab[2][i] = rol32(t, 16);
|
||||
it_tab[3][i] = rol32(t, 24);
|
||||
crypto_it_tab[0][i] = t;
|
||||
crypto_it_tab[1][i] = rol32(t, 8);
|
||||
crypto_it_tab[2][i] = rol32(t, 16);
|
||||
crypto_it_tab[3][i] = rol32(t, 24);
|
||||
}
|
||||
}
|
||||
|
||||
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
|
||||
|
||||
#define imix_col(y,x) \
|
||||
u = star_x(x); \
|
||||
v = star_x(u); \
|
||||
w = star_x(v); \
|
||||
t = w ^ (x); \
|
||||
(y) = u ^ v ^ w; \
|
||||
(y) ^= ror32(u ^ t, 8) ^ \
|
||||
ror32(v ^ t, 16) ^ \
|
||||
ror32(t,24)
|
||||
|
||||
/* initialise the key schedule from the user supplied key */
|
||||
|
||||
#define loop4(i) \
|
||||
{ t = ror32(t, 8); t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[4 * i]; E_KEY[4 * i + 4] = t; \
|
||||
t ^= E_KEY[4 * i + 1]; E_KEY[4 * i + 5] = t; \
|
||||
t ^= E_KEY[4 * i + 2]; E_KEY[4 * i + 6] = t; \
|
||||
t ^= E_KEY[4 * i + 3]; E_KEY[4 * i + 7] = t; \
|
||||
}
|
||||
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
|
||||
|
||||
#define loop6(i) \
|
||||
{ t = ror32(t, 8); t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[6 * i]; E_KEY[6 * i + 6] = t; \
|
||||
t ^= E_KEY[6 * i + 1]; E_KEY[6 * i + 7] = t; \
|
||||
t ^= E_KEY[6 * i + 2]; E_KEY[6 * i + 8] = t; \
|
||||
t ^= E_KEY[6 * i + 3]; E_KEY[6 * i + 9] = t; \
|
||||
t ^= E_KEY[6 * i + 4]; E_KEY[6 * i + 10] = t; \
|
||||
t ^= E_KEY[6 * i + 5]; E_KEY[6 * i + 11] = t; \
|
||||
}
|
||||
#define imix_col(y,x) do { \
|
||||
u = star_x(x); \
|
||||
v = star_x(u); \
|
||||
w = star_x(v); \
|
||||
t = w ^ (x); \
|
||||
(y) = u ^ v ^ w; \
|
||||
(y) ^= ror32(u ^ t, 8) ^ \
|
||||
ror32(v ^ t, 16) ^ \
|
||||
ror32(t, 24); \
|
||||
} while (0)
|
||||
|
||||
#define loop8(i) \
|
||||
{ t = ror32(t, 8); ; t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= E_KEY[8 * i]; E_KEY[8 * i + 8] = t; \
|
||||
t ^= E_KEY[8 * i + 1]; E_KEY[8 * i + 9] = t; \
|
||||
t ^= E_KEY[8 * i + 2]; E_KEY[8 * i + 10] = t; \
|
||||
t ^= E_KEY[8 * i + 3]; E_KEY[8 * i + 11] = t; \
|
||||
t = E_KEY[8 * i + 4] ^ ls_box(t); \
|
||||
E_KEY[8 * i + 12] = t; \
|
||||
t ^= E_KEY[8 * i + 5]; E_KEY[8 * i + 13] = t; \
|
||||
t ^= E_KEY[8 * i + 6]; E_KEY[8 * i + 14] = t; \
|
||||
t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \
|
||||
}
|
||||
#define ls_box(x) \
|
||||
crypto_fl_tab[0][byte(x, 0)] ^ \
|
||||
crypto_fl_tab[1][byte(x, 1)] ^ \
|
||||
crypto_fl_tab[2][byte(x, 2)] ^ \
|
||||
crypto_fl_tab[3][byte(x, 3)]
|
||||
|
||||
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
#define loop4(i) do { \
|
||||
t = ror32(t, 8); \
|
||||
t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= ctx->key_enc[4 * i]; \
|
||||
ctx->key_enc[4 * i + 4] = t; \
|
||||
t ^= ctx->key_enc[4 * i + 1]; \
|
||||
ctx->key_enc[4 * i + 5] = t; \
|
||||
t ^= ctx->key_enc[4 * i + 2]; \
|
||||
ctx->key_enc[4 * i + 6] = t; \
|
||||
t ^= ctx->key_enc[4 * i + 3]; \
|
||||
ctx->key_enc[4 * i + 7] = t; \
|
||||
} while (0)
|
||||
|
||||
#define loop6(i) do { \
|
||||
t = ror32(t, 8); \
|
||||
t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= ctx->key_enc[6 * i]; \
|
||||
ctx->key_enc[6 * i + 6] = t; \
|
||||
t ^= ctx->key_enc[6 * i + 1]; \
|
||||
ctx->key_enc[6 * i + 7] = t; \
|
||||
t ^= ctx->key_enc[6 * i + 2]; \
|
||||
ctx->key_enc[6 * i + 8] = t; \
|
||||
t ^= ctx->key_enc[6 * i + 3]; \
|
||||
ctx->key_enc[6 * i + 9] = t; \
|
||||
t ^= ctx->key_enc[6 * i + 4]; \
|
||||
ctx->key_enc[6 * i + 10] = t; \
|
||||
t ^= ctx->key_enc[6 * i + 5]; \
|
||||
ctx->key_enc[6 * i + 11] = t; \
|
||||
} while (0)
|
||||
|
||||
#define loop8(i) do { \
|
||||
t = ror32(t, 8); \
|
||||
t = ls_box(t) ^ rco_tab[i]; \
|
||||
t ^= ctx->key_enc[8 * i]; \
|
||||
ctx->key_enc[8 * i + 8] = t; \
|
||||
t ^= ctx->key_enc[8 * i + 1]; \
|
||||
ctx->key_enc[8 * i + 9] = t; \
|
||||
t ^= ctx->key_enc[8 * i + 2]; \
|
||||
ctx->key_enc[8 * i + 10] = t; \
|
||||
t ^= ctx->key_enc[8 * i + 3]; \
|
||||
ctx->key_enc[8 * i + 11] = t; \
|
||||
t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \
|
||||
ctx->key_enc[8 * i + 12] = t; \
|
||||
t ^= ctx->key_enc[8 * i + 5]; \
|
||||
ctx->key_enc[8 * i + 13] = t; \
|
||||
t ^= ctx->key_enc[8 * i + 6]; \
|
||||
ctx->key_enc[8 * i + 14] = t; \
|
||||
t ^= ctx->key_enc[8 * i + 7]; \
|
||||
ctx->key_enc[8 * i + 15] = t; \
|
||||
} while (0)
|
||||
|
||||
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
const __le32 *key = (const __le32 *)in_key;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
u32 i, t, u, v, w;
|
||||
u32 i, t, u, v, w, j;
|
||||
|
||||
if (key_len % 8) {
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
@ -263,95 +244,113 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
|
||||
ctx->key_length = key_len;
|
||||
|
||||
E_KEY[0] = le32_to_cpu(key[0]);
|
||||
E_KEY[1] = le32_to_cpu(key[1]);
|
||||
E_KEY[2] = le32_to_cpu(key[2]);
|
||||
E_KEY[3] = le32_to_cpu(key[3]);
|
||||
ctx->key_dec[key_len + 24] = ctx->key_enc[0] = le32_to_cpu(key[0]);
|
||||
ctx->key_dec[key_len + 25] = ctx->key_enc[1] = le32_to_cpu(key[1]);
|
||||
ctx->key_dec[key_len + 26] = ctx->key_enc[2] = le32_to_cpu(key[2]);
|
||||
ctx->key_dec[key_len + 27] = ctx->key_enc[3] = le32_to_cpu(key[3]);
|
||||
|
||||
switch (key_len) {
|
||||
case 16:
|
||||
t = E_KEY[3];
|
||||
t = ctx->key_enc[3];
|
||||
for (i = 0; i < 10; ++i)
|
||||
loop4 (i);
|
||||
loop4(i);
|
||||
break;
|
||||
|
||||
case 24:
|
||||
E_KEY[4] = le32_to_cpu(key[4]);
|
||||
t = E_KEY[5] = le32_to_cpu(key[5]);
|
||||
ctx->key_enc[4] = le32_to_cpu(key[4]);
|
||||
t = ctx->key_enc[5] = le32_to_cpu(key[5]);
|
||||
for (i = 0; i < 8; ++i)
|
||||
loop6 (i);
|
||||
loop6(i);
|
||||
break;
|
||||
|
||||
case 32:
|
||||
E_KEY[4] = le32_to_cpu(key[4]);
|
||||
E_KEY[5] = le32_to_cpu(key[5]);
|
||||
E_KEY[6] = le32_to_cpu(key[6]);
|
||||
t = E_KEY[7] = le32_to_cpu(key[7]);
|
||||
ctx->key_enc[4] = le32_to_cpu(key[4]);
|
||||
ctx->key_enc[5] = le32_to_cpu(key[5]);
|
||||
ctx->key_enc[6] = le32_to_cpu(key[6]);
|
||||
t = ctx->key_enc[7] = le32_to_cpu(key[7]);
|
||||
for (i = 0; i < 7; ++i)
|
||||
loop8 (i);
|
||||
loop8(i);
|
||||
break;
|
||||
}
|
||||
|
||||
D_KEY[0] = E_KEY[0];
|
||||
D_KEY[1] = E_KEY[1];
|
||||
D_KEY[2] = E_KEY[2];
|
||||
D_KEY[3] = E_KEY[3];
|
||||
ctx->key_dec[0] = ctx->key_enc[key_len + 24];
|
||||
ctx->key_dec[1] = ctx->key_enc[key_len + 25];
|
||||
ctx->key_dec[2] = ctx->key_enc[key_len + 26];
|
||||
ctx->key_dec[3] = ctx->key_enc[key_len + 27];
|
||||
|
||||
for (i = 4; i < key_len + 24; ++i) {
|
||||
imix_col (D_KEY[i], E_KEY[i]);
|
||||
j = key_len + 24 - (i & ~3) + (i & 3);
|
||||
imix_col(ctx->key_dec[j], ctx->key_enc[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_aes_set_key);
|
||||
|
||||
/* encrypt a block of text */
|
||||
|
||||
#define f_nround(bo, bi, k) \
|
||||
f_rn(bo, bi, 0, k); \
|
||||
f_rn(bo, bi, 1, k); \
|
||||
f_rn(bo, bi, 2, k); \
|
||||
f_rn(bo, bi, 3, k); \
|
||||
k += 4
|
||||
#define f_rn(bo, bi, n, k) do { \
|
||||
bo[n] = crypto_ft_tab[0][byte(bi[n], 0)] ^ \
|
||||
crypto_ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \
|
||||
crypto_ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
|
||||
crypto_ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \
|
||||
} while (0)
|
||||
|
||||
#define f_lround(bo, bi, k) \
|
||||
f_rl(bo, bi, 0, k); \
|
||||
f_rl(bo, bi, 1, k); \
|
||||
f_rl(bo, bi, 2, k); \
|
||||
f_rl(bo, bi, 3, k)
|
||||
#define f_nround(bo, bi, k) do {\
|
||||
f_rn(bo, bi, 0, k); \
|
||||
f_rn(bo, bi, 1, k); \
|
||||
f_rn(bo, bi, 2, k); \
|
||||
f_rn(bo, bi, 3, k); \
|
||||
k += 4; \
|
||||
} while (0)
|
||||
|
||||
#define f_rl(bo, bi, n, k) do { \
|
||||
bo[n] = crypto_fl_tab[0][byte(bi[n], 0)] ^ \
|
||||
crypto_fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \
|
||||
crypto_fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
|
||||
crypto_fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \
|
||||
} while (0)
|
||||
|
||||
#define f_lround(bo, bi, k) do {\
|
||||
f_rl(bo, bi, 0, k); \
|
||||
f_rl(bo, bi, 1, k); \
|
||||
f_rl(bo, bi, 2, k); \
|
||||
f_rl(bo, bi, 3, k); \
|
||||
} while (0)
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
const __le32 *src = (const __le32 *)in;
|
||||
__le32 *dst = (__le32 *)out;
|
||||
u32 b0[4], b1[4];
|
||||
const u32 *kp = E_KEY + 4;
|
||||
const u32 *kp = ctx->key_enc + 4;
|
||||
const int key_len = ctx->key_length;
|
||||
|
||||
b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0];
|
||||
b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1];
|
||||
b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2];
|
||||
b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3];
|
||||
b0[0] = le32_to_cpu(src[0]) ^ ctx->key_enc[0];
|
||||
b0[1] = le32_to_cpu(src[1]) ^ ctx->key_enc[1];
|
||||
b0[2] = le32_to_cpu(src[2]) ^ ctx->key_enc[2];
|
||||
b0[3] = le32_to_cpu(src[3]) ^ ctx->key_enc[3];
|
||||
|
||||
if (ctx->key_length > 24) {
|
||||
f_nround (b1, b0, kp);
|
||||
f_nround (b0, b1, kp);
|
||||
if (key_len > 24) {
|
||||
f_nround(b1, b0, kp);
|
||||
f_nround(b0, b1, kp);
|
||||
}
|
||||
|
||||
if (ctx->key_length > 16) {
|
||||
f_nround (b1, b0, kp);
|
||||
f_nround (b0, b1, kp);
|
||||
if (key_len > 16) {
|
||||
f_nround(b1, b0, kp);
|
||||
f_nround(b0, b1, kp);
|
||||
}
|
||||
|
||||
f_nround (b1, b0, kp);
|
||||
f_nround (b0, b1, kp);
|
||||
f_nround (b1, b0, kp);
|
||||
f_nround (b0, b1, kp);
|
||||
f_nround (b1, b0, kp);
|
||||
f_nround (b0, b1, kp);
|
||||
f_nround (b1, b0, kp);
|
||||
f_nround (b0, b1, kp);
|
||||
f_nround (b1, b0, kp);
|
||||
f_lround (b0, b1, kp);
|
||||
f_nround(b1, b0, kp);
|
||||
f_nround(b0, b1, kp);
|
||||
f_nround(b1, b0, kp);
|
||||
f_nround(b0, b1, kp);
|
||||
f_nround(b1, b0, kp);
|
||||
f_nround(b0, b1, kp);
|
||||
f_nround(b1, b0, kp);
|
||||
f_nround(b0, b1, kp);
|
||||
f_nround(b1, b0, kp);
|
||||
f_lround(b0, b1, kp);
|
||||
|
||||
dst[0] = cpu_to_le32(b0[0]);
|
||||
dst[1] = cpu_to_le32(b0[1]);
|
||||
@ -361,53 +360,69 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
|
||||
/* decrypt a block of text */
|
||||
|
||||
#define i_nround(bo, bi, k) \
|
||||
i_rn(bo, bi, 0, k); \
|
||||
i_rn(bo, bi, 1, k); \
|
||||
i_rn(bo, bi, 2, k); \
|
||||
i_rn(bo, bi, 3, k); \
|
||||
k -= 4
|
||||
#define i_rn(bo, bi, n, k) do { \
|
||||
bo[n] = crypto_it_tab[0][byte(bi[n], 0)] ^ \
|
||||
crypto_it_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \
|
||||
crypto_it_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
|
||||
crypto_it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \
|
||||
} while (0)
|
||||
|
||||
#define i_lround(bo, bi, k) \
|
||||
i_rl(bo, bi, 0, k); \
|
||||
i_rl(bo, bi, 1, k); \
|
||||
i_rl(bo, bi, 2, k); \
|
||||
i_rl(bo, bi, 3, k)
|
||||
#define i_nround(bo, bi, k) do {\
|
||||
i_rn(bo, bi, 0, k); \
|
||||
i_rn(bo, bi, 1, k); \
|
||||
i_rn(bo, bi, 2, k); \
|
||||
i_rn(bo, bi, 3, k); \
|
||||
k += 4; \
|
||||
} while (0)
|
||||
|
||||
#define i_rl(bo, bi, n, k) do { \
|
||||
bo[n] = crypto_il_tab[0][byte(bi[n], 0)] ^ \
|
||||
crypto_il_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \
|
||||
crypto_il_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
|
||||
crypto_il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \
|
||||
} while (0)
|
||||
|
||||
#define i_lround(bo, bi, k) do {\
|
||||
i_rl(bo, bi, 0, k); \
|
||||
i_rl(bo, bi, 1, k); \
|
||||
i_rl(bo, bi, 2, k); \
|
||||
i_rl(bo, bi, 3, k); \
|
||||
} while (0)
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
const struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
const __le32 *src = (const __le32 *)in;
|
||||
__le32 *dst = (__le32 *)out;
|
||||
u32 b0[4], b1[4];
|
||||
const int key_len = ctx->key_length;
|
||||
const u32 *kp = D_KEY + key_len + 20;
|
||||
const u32 *kp = ctx->key_dec + 4;
|
||||
|
||||
b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24];
|
||||
b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25];
|
||||
b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26];
|
||||
b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27];
|
||||
b0[0] = le32_to_cpu(src[0]) ^ ctx->key_dec[0];
|
||||
b0[1] = le32_to_cpu(src[1]) ^ ctx->key_dec[1];
|
||||
b0[2] = le32_to_cpu(src[2]) ^ ctx->key_dec[2];
|
||||
b0[3] = le32_to_cpu(src[3]) ^ ctx->key_dec[3];
|
||||
|
||||
if (key_len > 24) {
|
||||
i_nround (b1, b0, kp);
|
||||
i_nround (b0, b1, kp);
|
||||
i_nround(b1, b0, kp);
|
||||
i_nround(b0, b1, kp);
|
||||
}
|
||||
|
||||
if (key_len > 16) {
|
||||
i_nround (b1, b0, kp);
|
||||
i_nround (b0, b1, kp);
|
||||
i_nround(b1, b0, kp);
|
||||
i_nround(b0, b1, kp);
|
||||
}
|
||||
|
||||
i_nround (b1, b0, kp);
|
||||
i_nround (b0, b1, kp);
|
||||
i_nround (b1, b0, kp);
|
||||
i_nround (b0, b1, kp);
|
||||
i_nround (b1, b0, kp);
|
||||
i_nround (b0, b1, kp);
|
||||
i_nround (b1, b0, kp);
|
||||
i_nround (b0, b1, kp);
|
||||
i_nround (b1, b0, kp);
|
||||
i_lround (b0, b1, kp);
|
||||
i_nround(b1, b0, kp);
|
||||
i_nround(b0, b1, kp);
|
||||
i_nround(b1, b0, kp);
|
||||
i_nround(b0, b1, kp);
|
||||
i_nround(b1, b0, kp);
|
||||
i_nround(b0, b1, kp);
|
||||
i_nround(b1, b0, kp);
|
||||
i_nround(b0, b1, kp);
|
||||
i_nround(b1, b0, kp);
|
||||
i_lround(b0, b1, kp);
|
||||
|
||||
dst[0] = cpu_to_le32(b0[0]);
|
||||
dst[1] = cpu_to_le32(b0[1]);
|
||||
@ -415,14 +430,13 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
dst[3] = cpu_to_le32(b0[3]);
|
||||
}
|
||||
|
||||
|
||||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct aes_ctx),
|
||||
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
||||
@ -430,9 +444,9 @@ static struct crypto_alg aes_alg = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
||||
.cia_setkey = aes_set_key,
|
||||
.cia_encrypt = aes_encrypt,
|
||||
.cia_decrypt = aes_decrypt
|
||||
.cia_setkey = crypto_aes_set_key,
|
||||
.cia_encrypt = aes_encrypt,
|
||||
.cia_decrypt = aes_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -472,7 +472,7 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_check_attr_type);
|
||||
|
||||
struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
|
||||
const char *crypto_attr_alg_name(struct rtattr *rta)
|
||||
{
|
||||
struct crypto_attr_alg *alga;
|
||||
|
||||
@ -486,7 +486,21 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
|
||||
alga = RTA_DATA(rta);
|
||||
alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0;
|
||||
|
||||
return crypto_alg_mod_lookup(alga->name, type, mask);
|
||||
return alga->name;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
|
||||
|
||||
struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
|
||||
{
|
||||
const char *name;
|
||||
int err;
|
||||
|
||||
name = crypto_attr_alg_name(rta);
|
||||
err = PTR_ERR(name);
|
||||
if (IS_ERR(name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
return crypto_alg_mod_lookup(name, type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_attr_alg);
|
||||
|
||||
@ -605,6 +619,53 @@ int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_tfm_in_queue);
|
||||
|
||||
static inline void crypto_inc_byte(u8 *a, unsigned int size)
|
||||
{
|
||||
u8 *b = (a + size);
|
||||
u8 c;
|
||||
|
||||
for (; size; size--) {
|
||||
c = *--b + 1;
|
||||
*b = c;
|
||||
if (c)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void crypto_inc(u8 *a, unsigned int size)
|
||||
{
|
||||
__be32 *b = (__be32 *)(a + size);
|
||||
u32 c;
|
||||
|
||||
for (; size >= 4; size -= 4) {
|
||||
c = be32_to_cpu(*--b) + 1;
|
||||
*b = cpu_to_be32(c);
|
||||
if (c)
|
||||
return;
|
||||
}
|
||||
|
||||
crypto_inc_byte(a, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_inc);
|
||||
|
||||
static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size)
|
||||
{
|
||||
for (; size; size--)
|
||||
*a++ ^= *b++;
|
||||
}
|
||||
|
||||
void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
|
||||
{
|
||||
u32 *a = (u32 *)dst;
|
||||
u32 *b = (u32 *)src;
|
||||
|
||||
for (; size >= 4; size -= 4)
|
||||
*a++ ^= *b++;
|
||||
|
||||
crypto_xor_byte((u8 *)a, (u8 *)b, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_xor);
|
||||
|
||||
static int __init crypto_algapi_init(void)
|
||||
{
|
||||
crypto_init_proc();
|
||||
|
19
crypto/api.c
19
crypto/api.c
@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type,
|
||||
return alg;
|
||||
}
|
||||
|
||||
static void crypto_larval_kill(struct crypto_alg *alg)
|
||||
void crypto_larval_kill(struct crypto_alg *alg)
|
||||
{
|
||||
struct crypto_larval *larval = (void *)alg;
|
||||
|
||||
@ -147,6 +147,7 @@ static void crypto_larval_kill(struct crypto_alg *alg)
|
||||
complete_all(&larval->completion);
|
||||
crypto_alg_put(alg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_larval_kill);
|
||||
|
||||
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
|
||||
{
|
||||
@ -176,11 +177,9 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
|
||||
return alg;
|
||||
}
|
||||
|
||||
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
|
||||
struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
struct crypto_alg *larval;
|
||||
int ok;
|
||||
|
||||
if (!name)
|
||||
return ERR_PTR(-ENOENT);
|
||||
@ -193,7 +192,17 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
|
||||
if (alg)
|
||||
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
|
||||
|
||||
larval = crypto_larval_alloc(name, type, mask);
|
||||
return crypto_larval_alloc(name, type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_larval_lookup);
|
||||
|
||||
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
struct crypto_alg *larval;
|
||||
int ok;
|
||||
|
||||
larval = crypto_larval_lookup(name, type, mask);
|
||||
if (IS_ERR(larval) || !crypto_is_larval(larval))
|
||||
return larval;
|
||||
|
||||
|
338
crypto/authenc.c
338
crypto/authenc.c
@ -10,22 +10,21 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "scatterwalk.h"
|
||||
|
||||
struct authenc_instance_ctx {
|
||||
struct crypto_spawn auth;
|
||||
struct crypto_spawn enc;
|
||||
|
||||
unsigned int authsize;
|
||||
unsigned int enckeylen;
|
||||
struct crypto_skcipher_spawn enc;
|
||||
};
|
||||
|
||||
struct crypto_authenc_ctx {
|
||||
@ -37,19 +36,31 @@ struct crypto_authenc_ctx {
|
||||
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct authenc_instance_ctx *ictx =
|
||||
crypto_instance_ctx(crypto_aead_alg_instance(authenc));
|
||||
unsigned int enckeylen = ictx->enckeylen;
|
||||
unsigned int authkeylen;
|
||||
unsigned int enckeylen;
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct crypto_hash *auth = ctx->auth;
|
||||
struct crypto_ablkcipher *enc = ctx->enc;
|
||||
struct rtattr *rta = (void *)key;
|
||||
struct crypto_authenc_key_param *param;
|
||||
int err = -EINVAL;
|
||||
|
||||
if (keylen < enckeylen) {
|
||||
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
goto out;
|
||||
}
|
||||
if (!RTA_OK(rta, keylen))
|
||||
goto badkey;
|
||||
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
|
||||
goto badkey;
|
||||
if (RTA_PAYLOAD(rta) < sizeof(*param))
|
||||
goto badkey;
|
||||
|
||||
param = RTA_DATA(rta);
|
||||
enckeylen = be32_to_cpu(param->enckeylen);
|
||||
|
||||
key += RTA_ALIGN(rta->rta_len);
|
||||
keylen -= RTA_ALIGN(rta->rta_len);
|
||||
|
||||
if (keylen < enckeylen)
|
||||
goto badkey;
|
||||
|
||||
authkeylen = keylen - enckeylen;
|
||||
|
||||
crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
|
||||
@ -71,21 +82,38 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
badkey:
|
||||
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int crypto_authenc_hash(struct aead_request *req)
|
||||
static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
|
||||
int chain)
|
||||
{
|
||||
if (chain) {
|
||||
head->length += sg->length;
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
|
||||
if (sg)
|
||||
scatterwalk_sg_chain(head, 2, sg);
|
||||
else
|
||||
sg_mark_end(head);
|
||||
}
|
||||
|
||||
static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags,
|
||||
struct scatterlist *cipher,
|
||||
unsigned int cryptlen)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct authenc_instance_ctx *ictx =
|
||||
crypto_instance_ctx(crypto_aead_alg_instance(authenc));
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct crypto_hash *auth = ctx->auth;
|
||||
struct hash_desc desc = {
|
||||
.tfm = auth,
|
||||
.flags = aead_request_flags(req) & flags,
|
||||
};
|
||||
u8 *hash = aead_request_ctx(req);
|
||||
struct scatterlist *dst = req->dst;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
int err;
|
||||
|
||||
hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth),
|
||||
@ -100,7 +128,7 @@ static int crypto_authenc_hash(struct aead_request *req)
|
||||
if (err)
|
||||
goto auth_unlock;
|
||||
|
||||
err = crypto_hash_update(&desc, dst, cryptlen);
|
||||
err = crypto_hash_update(&desc, cipher, cryptlen);
|
||||
if (err)
|
||||
goto auth_unlock;
|
||||
|
||||
@ -109,17 +137,53 @@ auth_unlock:
|
||||
spin_unlock_bh(&ctx->auth_lock);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
return ERR_PTR(err);
|
||||
|
||||
scatterwalk_map_and_copy(hash, dst, cryptlen, ictx->authsize, 1);
|
||||
return hash;
|
||||
}
|
||||
|
||||
static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct scatterlist *dst = req->dst;
|
||||
struct scatterlist cipher[2];
|
||||
struct page *dstp;
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc);
|
||||
unsigned int cryptlen;
|
||||
u8 *vdst;
|
||||
u8 *hash;
|
||||
|
||||
dstp = sg_page(dst);
|
||||
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
|
||||
|
||||
sg_init_table(cipher, 2);
|
||||
sg_set_buf(cipher, iv, ivsize);
|
||||
authenc_chain(cipher, dst, vdst == iv + ivsize);
|
||||
|
||||
cryptlen = req->cryptlen + ivsize;
|
||||
hash = crypto_authenc_hash(req, flags, cipher, cryptlen);
|
||||
if (IS_ERR(hash))
|
||||
return PTR_ERR(hash);
|
||||
|
||||
scatterwalk_map_and_copy(hash, cipher, cryptlen,
|
||||
crypto_aead_authsize(authenc), 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
|
||||
int err)
|
||||
{
|
||||
if (!err)
|
||||
err = crypto_authenc_hash(req->data);
|
||||
if (!err) {
|
||||
struct aead_request *areq = req->data;
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct ablkcipher_request *abreq = aead_request_ctx(areq);
|
||||
u8 *iv = (u8 *)(abreq + 1) +
|
||||
crypto_ablkcipher_reqsize(ctx->enc);
|
||||
|
||||
err = crypto_authenc_genicv(areq, iv, 0);
|
||||
}
|
||||
|
||||
aead_request_complete(req->data, err);
|
||||
}
|
||||
@ -129,72 +193,99 @@ static int crypto_authenc_encrypt(struct aead_request *req)
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct ablkcipher_request *abreq = aead_request_ctx(req);
|
||||
struct crypto_ablkcipher *enc = ctx->enc;
|
||||
struct scatterlist *dst = req->dst;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
u8 *iv = (u8 *)(abreq + 1) + crypto_ablkcipher_reqsize(enc);
|
||||
int err;
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_tfm(abreq, enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
crypto_authenc_encrypt_done, req);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen,
|
||||
req->iv);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
|
||||
|
||||
memcpy(iv, req->iv, crypto_aead_ivsize(authenc));
|
||||
|
||||
err = crypto_ablkcipher_encrypt(abreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_authenc_hash(req);
|
||||
return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
}
|
||||
|
||||
static int crypto_authenc_verify(struct aead_request *req)
|
||||
static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
|
||||
int err)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct authenc_instance_ctx *ictx =
|
||||
crypto_instance_ctx(crypto_aead_alg_instance(authenc));
|
||||
if (!err) {
|
||||
struct aead_givcrypt_request *greq = req->data;
|
||||
|
||||
err = crypto_authenc_genicv(&greq->areq, greq->giv, 0);
|
||||
}
|
||||
|
||||
aead_request_complete(req->data, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = aead_givcrypt_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct crypto_hash *auth = ctx->auth;
|
||||
struct hash_desc desc = {
|
||||
.tfm = auth,
|
||||
.flags = aead_request_flags(req),
|
||||
};
|
||||
u8 *ohash = aead_request_ctx(req);
|
||||
u8 *ihash;
|
||||
struct scatterlist *src = req->src;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
unsigned int authsize;
|
||||
struct aead_request *areq = &req->areq;
|
||||
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
|
||||
u8 *iv = req->giv;
|
||||
int err;
|
||||
|
||||
ohash = (u8 *)ALIGN((unsigned long)ohash + crypto_hash_alignmask(auth),
|
||||
crypto_hash_alignmask(auth) + 1);
|
||||
ihash = ohash + crypto_hash_digestsize(auth);
|
||||
|
||||
spin_lock_bh(&ctx->auth_lock);
|
||||
err = crypto_hash_init(&desc);
|
||||
if (err)
|
||||
goto auth_unlock;
|
||||
|
||||
err = crypto_hash_update(&desc, req->assoc, req->assoclen);
|
||||
if (err)
|
||||
goto auth_unlock;
|
||||
|
||||
err = crypto_hash_update(&desc, src, cryptlen);
|
||||
if (err)
|
||||
goto auth_unlock;
|
||||
|
||||
err = crypto_hash_final(&desc, ohash);
|
||||
auth_unlock:
|
||||
spin_unlock_bh(&ctx->auth_lock);
|
||||
skcipher_givcrypt_set_tfm(greq, ctx->enc);
|
||||
skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
|
||||
crypto_authenc_givencrypt_done, areq);
|
||||
skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
|
||||
areq->iv);
|
||||
skcipher_givcrypt_set_giv(greq, iv, req->seq);
|
||||
|
||||
err = crypto_skcipher_givencrypt(greq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
authsize = ictx->authsize;
|
||||
scatterwalk_map_and_copy(ihash, src, cryptlen, authsize, 0);
|
||||
return memcmp(ihash, ohash, authsize) ? -EINVAL : 0;
|
||||
return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
|
||||
}
|
||||
|
||||
static void crypto_authenc_decrypt_done(struct crypto_async_request *req,
|
||||
int err)
|
||||
static int crypto_authenc_verify(struct aead_request *req,
|
||||
struct scatterlist *cipher,
|
||||
unsigned int cryptlen)
|
||||
{
|
||||
aead_request_complete(req->data, err);
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
u8 *ohash;
|
||||
u8 *ihash;
|
||||
unsigned int authsize;
|
||||
|
||||
ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher,
|
||||
cryptlen);
|
||||
if (IS_ERR(ohash))
|
||||
return PTR_ERR(ohash);
|
||||
|
||||
authsize = crypto_aead_authsize(authenc);
|
||||
ihash = ohash + authsize;
|
||||
scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0);
|
||||
return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
|
||||
}
|
||||
|
||||
static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
|
||||
unsigned int cryptlen)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct scatterlist *src = req->src;
|
||||
struct scatterlist cipher[2];
|
||||
struct page *srcp;
|
||||
unsigned int ivsize = crypto_aead_ivsize(authenc);
|
||||
u8 *vsrc;
|
||||
|
||||
srcp = sg_page(src);
|
||||
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
|
||||
|
||||
sg_init_table(cipher, 2);
|
||||
sg_set_buf(cipher, iv, ivsize);
|
||||
authenc_chain(cipher, src, vsrc == iv + ivsize);
|
||||
|
||||
return crypto_authenc_verify(req, cipher, cryptlen + ivsize);
|
||||
}
|
||||
|
||||
static int crypto_authenc_decrypt(struct aead_request *req)
|
||||
@ -202,17 +293,23 @@ static int crypto_authenc_decrypt(struct aead_request *req)
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
struct ablkcipher_request *abreq = aead_request_ctx(req);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
unsigned int authsize = crypto_aead_authsize(authenc);
|
||||
u8 *iv = req->iv;
|
||||
int err;
|
||||
|
||||
err = crypto_authenc_verify(req);
|
||||
if (cryptlen < authsize)
|
||||
return -EINVAL;
|
||||
cryptlen -= authsize;
|
||||
|
||||
err = crypto_authenc_iverify(req, iv, cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->enc);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
crypto_authenc_decrypt_done, req);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen,
|
||||
req->iv);
|
||||
req->base.complete, req->base.data);
|
||||
ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
|
||||
|
||||
return crypto_ablkcipher_decrypt(abreq);
|
||||
}
|
||||
@ -224,19 +321,13 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
|
||||
struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_hash *auth;
|
||||
struct crypto_ablkcipher *enc;
|
||||
unsigned int digestsize;
|
||||
int err;
|
||||
|
||||
auth = crypto_spawn_hash(&ictx->auth);
|
||||
if (IS_ERR(auth))
|
||||
return PTR_ERR(auth);
|
||||
|
||||
err = -EINVAL;
|
||||
digestsize = crypto_hash_digestsize(auth);
|
||||
if (ictx->authsize > digestsize)
|
||||
goto err_free_hash;
|
||||
|
||||
enc = crypto_spawn_ablkcipher(&ictx->enc);
|
||||
enc = crypto_spawn_skcipher(&ictx->enc);
|
||||
err = PTR_ERR(enc);
|
||||
if (IS_ERR(enc))
|
||||
goto err_free_hash;
|
||||
@ -246,9 +337,10 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
|
||||
tfm->crt_aead.reqsize = max_t(unsigned int,
|
||||
(crypto_hash_alignmask(auth) &
|
||||
~(crypto_tfm_ctx_alignment() - 1)) +
|
||||
digestsize * 2,
|
||||
sizeof(struct ablkcipher_request) +
|
||||
crypto_ablkcipher_reqsize(enc));
|
||||
crypto_hash_digestsize(auth) * 2,
|
||||
sizeof(struct skcipher_givcrypt_request) +
|
||||
crypto_ablkcipher_reqsize(enc) +
|
||||
crypto_ablkcipher_ivsize(enc));
|
||||
|
||||
spin_lock_init(&ctx->auth_lock);
|
||||
|
||||
@ -269,75 +361,74 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
|
||||
|
||||
static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *auth;
|
||||
struct crypto_alg *enc;
|
||||
struct authenc_instance_ctx *ctx;
|
||||
unsigned int authsize;
|
||||
unsigned int enckeylen;
|
||||
const char *enc_name;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD);
|
||||
if (err)
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
|
||||
CRYPTO_ALG_TYPE_HASH_MASK);
|
||||
if (IS_ERR(auth))
|
||||
return ERR_PTR(PTR_ERR(auth));
|
||||
|
||||
err = crypto_attr_u32(tb[2], &authsize);
|
||||
inst = ERR_PTR(err);
|
||||
if (err)
|
||||
enc_name = crypto_attr_alg_name(tb[2]);
|
||||
err = PTR_ERR(enc_name);
|
||||
if (IS_ERR(enc_name))
|
||||
goto out_put_auth;
|
||||
|
||||
enc = crypto_attr_alg(tb[3], CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
inst = ERR_PTR(PTR_ERR(enc));
|
||||
if (IS_ERR(enc))
|
||||
goto out_put_auth;
|
||||
|
||||
err = crypto_attr_u32(tb[4], &enckeylen);
|
||||
if (err)
|
||||
goto out_put_enc;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
err = -ENOMEM;
|
||||
if (!inst)
|
||||
goto out_put_enc;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authenc(%s,%u,%s,%u)", auth->cra_name, authsize,
|
||||
enc->cra_name, enckeylen) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authenc(%s,%u,%s,%u)", auth->cra_driver_name,
|
||||
authsize, enc->cra_driver_name, enckeylen) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_free_inst;
|
||||
goto out_put_auth;
|
||||
|
||||
ctx = crypto_instance_ctx(inst);
|
||||
ctx->authsize = authsize;
|
||||
ctx->enckeylen = enckeylen;
|
||||
|
||||
err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
err = crypto_init_spawn(&ctx->enc, enc, inst, CRYPTO_ALG_TYPE_MASK);
|
||||
crypto_set_skcipher_spawn(&ctx->enc, inst);
|
||||
err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
|
||||
crypto_requires_sync(algt->type,
|
||||
algt->mask));
|
||||
if (err)
|
||||
goto err_drop_auth;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
|
||||
enc = crypto_skcipher_spawn_alg(&ctx->enc);
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authenc(%s,%s)", auth->cra_name, enc->cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_enc;
|
||||
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"authenc(%s,%s)", auth->cra_driver_name,
|
||||
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_enc;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority;
|
||||
inst->alg.cra_blocksize = enc->cra_blocksize;
|
||||
inst->alg.cra_alignmask = max(auth->cra_alignmask, enc->cra_alignmask);
|
||||
inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_aead_type;
|
||||
|
||||
inst->alg.cra_aead.ivsize = enc->cra_blkcipher.ivsize;
|
||||
inst->alg.cra_aead.authsize = authsize;
|
||||
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
|
||||
inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ?
|
||||
auth->cra_hash.digestsize :
|
||||
auth->cra_digest.dia_digestsize;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
|
||||
|
||||
@ -347,18 +438,19 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
|
||||
inst->alg.cra_aead.setkey = crypto_authenc_setkey;
|
||||
inst->alg.cra_aead.encrypt = crypto_authenc_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_authenc_decrypt;
|
||||
inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
|
||||
|
||||
out:
|
||||
crypto_mod_put(enc);
|
||||
out_put_auth:
|
||||
crypto_mod_put(auth);
|
||||
return inst;
|
||||
|
||||
err_drop_enc:
|
||||
crypto_drop_skcipher(&ctx->enc);
|
||||
err_drop_auth:
|
||||
crypto_drop_spawn(&ctx->auth);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
out_put_enc:
|
||||
out_put_auth:
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
@ -367,7 +459,7 @@ static void crypto_authenc_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_spawn(&ctx->enc);
|
||||
crypto_drop_skcipher(&ctx->enc);
|
||||
crypto_drop_spawn(&ctx->auth);
|
||||
kfree(inst);
|
||||
}
|
||||
|
@ -14,7 +14,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -25,7 +26,6 @@
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "internal.h"
|
||||
#include "scatterwalk.h"
|
||||
|
||||
enum {
|
||||
BLKCIPHER_WALK_PHYS = 1 << 0,
|
||||
@ -433,9 +433,8 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
|
||||
struct blkcipher_alg *cipher = &alg->cra_blkcipher;
|
||||
unsigned int len = alg->cra_ctxsize;
|
||||
|
||||
type ^= CRYPTO_ALG_ASYNC;
|
||||
mask &= CRYPTO_ALG_ASYNC;
|
||||
if ((type & mask) && cipher->ivsize) {
|
||||
if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
|
||||
cipher->ivsize) {
|
||||
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
|
||||
len += cipher->ivsize;
|
||||
}
|
||||
@ -451,6 +450,11 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
|
||||
crt->setkey = async_setkey;
|
||||
crt->encrypt = async_encrypt;
|
||||
crt->decrypt = async_decrypt;
|
||||
if (!alg->ivsize) {
|
||||
crt->givencrypt = skcipher_null_givencrypt;
|
||||
crt->givdecrypt = skcipher_null_givdecrypt;
|
||||
}
|
||||
crt->base = __crypto_ablkcipher_cast(tfm);
|
||||
crt->ivsize = alg->ivsize;
|
||||
|
||||
return 0;
|
||||
@ -482,9 +486,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
||||
if (alg->ivsize > PAGE_SIZE / 8)
|
||||
return -EINVAL;
|
||||
|
||||
type ^= CRYPTO_ALG_ASYNC;
|
||||
mask &= CRYPTO_ALG_ASYNC;
|
||||
if (type & mask)
|
||||
if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
|
||||
return crypto_init_blkcipher_ops_sync(tfm);
|
||||
else
|
||||
return crypto_init_blkcipher_ops_async(tfm);
|
||||
@ -499,6 +501,8 @@ static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
|
||||
seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
|
||||
seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
|
||||
seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
|
||||
seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
|
||||
"<default>");
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_blkcipher_type = {
|
||||
@ -510,5 +514,187 @@ const struct crypto_type crypto_blkcipher_type = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
|
||||
|
||||
static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
|
||||
const char *name, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
type = crypto_skcipher_type(type);
|
||||
mask = crypto_skcipher_mask(mask) | CRYPTO_ALG_GENIV;
|
||||
|
||||
alg = crypto_alg_mod_lookup(name, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return PTR_ERR(alg);
|
||||
|
||||
err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
|
||||
crypto_mod_put(alg);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
struct {
|
||||
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
int (*encrypt)(struct ablkcipher_request *req);
|
||||
int (*decrypt)(struct ablkcipher_request *req);
|
||||
|
||||
unsigned int min_keysize;
|
||||
unsigned int max_keysize;
|
||||
unsigned int ivsize;
|
||||
|
||||
const char *geniv;
|
||||
} balg;
|
||||
const char *name;
|
||||
struct crypto_skcipher_spawn *spawn;
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
|
||||
algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(name);
|
||||
if (IS_ERR(name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spawn = crypto_instance_ctx(inst);
|
||||
|
||||
/* Ignore async algorithms if necessary. */
|
||||
mask |= crypto_requires_sync(algt->type, algt->mask);
|
||||
|
||||
crypto_set_skcipher_spawn(spawn, inst);
|
||||
err = crypto_grab_nivcipher(spawn, name, type, mask);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
alg = crypto_skcipher_spawn_alg(spawn);
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_BLKCIPHER) {
|
||||
balg.ivsize = alg->cra_blkcipher.ivsize;
|
||||
balg.min_keysize = alg->cra_blkcipher.min_keysize;
|
||||
balg.max_keysize = alg->cra_blkcipher.max_keysize;
|
||||
|
||||
balg.setkey = async_setkey;
|
||||
balg.encrypt = async_encrypt;
|
||||
balg.decrypt = async_decrypt;
|
||||
|
||||
balg.geniv = alg->cra_blkcipher.geniv;
|
||||
} else {
|
||||
balg.ivsize = alg->cra_ablkcipher.ivsize;
|
||||
balg.min_keysize = alg->cra_ablkcipher.min_keysize;
|
||||
balg.max_keysize = alg->cra_ablkcipher.max_keysize;
|
||||
|
||||
balg.setkey = alg->cra_ablkcipher.setkey;
|
||||
balg.encrypt = alg->cra_ablkcipher.encrypt;
|
||||
balg.decrypt = alg->cra_ablkcipher.decrypt;
|
||||
|
||||
balg.geniv = alg->cra_ablkcipher.geniv;
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
if (!balg.ivsize)
|
||||
goto err_drop_alg;
|
||||
|
||||
/*
|
||||
* This is only true if we're constructing an algorithm with its
|
||||
* default IV generator. For the default generator we elide the
|
||||
* template name and double-check the IV generator.
|
||||
*/
|
||||
if (algt->mask & CRYPTO_ALG_GENIV) {
|
||||
if (!balg.geniv)
|
||||
balg.geniv = crypto_default_geniv(alg);
|
||||
err = -EAGAIN;
|
||||
if (strcmp(tmpl->name, balg.geniv))
|
||||
goto err_drop_alg;
|
||||
|
||||
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
|
||||
memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
|
||||
CRYPTO_MAX_ALG_NAME);
|
||||
} else {
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->cra_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"%s(%s)", tmpl->name, alg->cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_alg;
|
||||
}
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
|
||||
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = alg->cra_blocksize;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_givcipher_type;
|
||||
|
||||
inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
|
||||
inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
|
||||
inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
|
||||
inst->alg.cra_ablkcipher.geniv = balg.geniv;
|
||||
|
||||
inst->alg.cra_ablkcipher.setkey = balg.setkey;
|
||||
inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
|
||||
inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
err_drop_alg:
|
||||
crypto_drop_skcipher(spawn);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
|
||||
|
||||
void skcipher_geniv_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_skcipher(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skcipher_geniv_free);
|
||||
|
||||
int skcipher_geniv_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_ablkcipher *cipher;
|
||||
|
||||
cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
tfm->crt_ablkcipher.base = cipher;
|
||||
tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skcipher_geniv_init);
|
||||
|
||||
void skcipher_geniv_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Generic block chaining cipher type");
|
||||
|
1777
crypto/camellia.c
1777
crypto/camellia.c
File diff suppressed because it is too large
Load Diff
@ -369,7 +369,7 @@ static const u8 Tr[4][8] = {
|
||||
};
|
||||
|
||||
/* forward octave */
|
||||
static inline void W(u32 *key, unsigned int i) {
|
||||
static void W(u32 *key, unsigned int i) {
|
||||
u32 I;
|
||||
key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
|
||||
key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
|
||||
@ -428,7 +428,7 @@ static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
}
|
||||
|
||||
/*forward quad round*/
|
||||
static inline void Q (u32 * block, u8 * Kr, u32 * Km) {
|
||||
static void Q (u32 * block, u8 * Kr, u32 * Km) {
|
||||
u32 I;
|
||||
block[2] ^= F1(block[3], Kr[0], Km[0]);
|
||||
block[1] ^= F2(block[2], Kr[1], Km[1]);
|
||||
@ -437,7 +437,7 @@ static inline void Q (u32 * block, u8 * Kr, u32 * Km) {
|
||||
}
|
||||
|
||||
/*reverse quad round*/
|
||||
static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) {
|
||||
static void QBAR (u32 * block, u8 * Kr, u32 * Km) {
|
||||
u32 I;
|
||||
block[3] ^= F1(block[0], Kr[3], Km[3]);
|
||||
block[0] ^= F3(block[1], Kr[2], Km[2]);
|
||||
|
109
crypto/cbc.c
109
crypto/cbc.c
@ -14,13 +14,13 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct crypto_cbc_ctx {
|
||||
struct crypto_cipher *child;
|
||||
void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
|
||||
};
|
||||
|
||||
static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
|
||||
@ -41,9 +41,7 @@ static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
|
||||
|
||||
static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm,
|
||||
void (*xor)(u8 *, const u8 *,
|
||||
unsigned int))
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
@ -54,7 +52,7 @@ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
|
||||
u8 *iv = walk->iv;
|
||||
|
||||
do {
|
||||
xor(iv, src, bsize);
|
||||
crypto_xor(iv, src, bsize);
|
||||
fn(crypto_cipher_tfm(tfm), dst, iv);
|
||||
memcpy(iv, dst, bsize);
|
||||
|
||||
@ -67,9 +65,7 @@ static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
|
||||
|
||||
static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm,
|
||||
void (*xor)(u8 *, const u8 *,
|
||||
unsigned int))
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
@ -79,7 +75,7 @@ static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
|
||||
u8 *iv = walk->iv;
|
||||
|
||||
do {
|
||||
xor(src, iv, bsize);
|
||||
crypto_xor(src, iv, bsize);
|
||||
fn(crypto_cipher_tfm(tfm), src, src);
|
||||
iv = src;
|
||||
|
||||
@ -99,7 +95,6 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
@ -107,11 +102,9 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
if (walk.src.virt.addr == walk.dst.virt.addr)
|
||||
nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child,
|
||||
xor);
|
||||
nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child);
|
||||
else
|
||||
nbytes = crypto_cbc_encrypt_segment(desc, &walk, child,
|
||||
xor);
|
||||
nbytes = crypto_cbc_encrypt_segment(desc, &walk, child);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
@ -120,9 +113,7 @@ static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
|
||||
static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm,
|
||||
void (*xor)(u8 *, const u8 *,
|
||||
unsigned int))
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_decrypt;
|
||||
@ -134,7 +125,7 @@ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
|
||||
|
||||
do {
|
||||
fn(crypto_cipher_tfm(tfm), dst, src);
|
||||
xor(dst, iv, bsize);
|
||||
crypto_xor(dst, iv, bsize);
|
||||
iv = src;
|
||||
|
||||
src += bsize;
|
||||
@ -148,34 +139,29 @@ static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
|
||||
|
||||
static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm,
|
||||
void (*xor)(u8 *, const u8 *,
|
||||
unsigned int))
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_decrypt;
|
||||
int bsize = crypto_cipher_blocksize(tfm);
|
||||
unsigned long alignmask = crypto_cipher_alignmask(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 stack[bsize + alignmask];
|
||||
u8 *first_iv = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
|
||||
|
||||
memcpy(first_iv, walk->iv, bsize);
|
||||
u8 last_iv[bsize];
|
||||
|
||||
/* Start of the last block. */
|
||||
src += nbytes - nbytes % bsize - bsize;
|
||||
memcpy(walk->iv, src, bsize);
|
||||
src += nbytes - (nbytes & (bsize - 1)) - bsize;
|
||||
memcpy(last_iv, src, bsize);
|
||||
|
||||
for (;;) {
|
||||
fn(crypto_cipher_tfm(tfm), src, src);
|
||||
if ((nbytes -= bsize) < bsize)
|
||||
break;
|
||||
xor(src, src - bsize, bsize);
|
||||
crypto_xor(src, src - bsize, bsize);
|
||||
src -= bsize;
|
||||
}
|
||||
|
||||
xor(src, first_iv, bsize);
|
||||
crypto_xor(src, walk->iv, bsize);
|
||||
memcpy(walk->iv, last_iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
@ -188,7 +174,6 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
@ -196,48 +181,15 @@ static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
if (walk.src.virt.addr == walk.dst.virt.addr)
|
||||
nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child,
|
||||
xor);
|
||||
nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
|
||||
else
|
||||
nbytes = crypto_cbc_decrypt_segment(desc, &walk, child,
|
||||
xor);
|
||||
nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
|
||||
{
|
||||
do {
|
||||
*a++ ^= *b++;
|
||||
} while (--bs);
|
||||
}
|
||||
|
||||
static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
|
||||
{
|
||||
u32 *a = (u32 *)dst;
|
||||
u32 *b = (u32 *)src;
|
||||
|
||||
do {
|
||||
*a++ ^= *b++;
|
||||
} while ((bs -= 4));
|
||||
}
|
||||
|
||||
static void xor_64(u8 *a, const u8 *b, unsigned int bs)
|
||||
{
|
||||
((u32 *)a)[0] ^= ((u32 *)b)[0];
|
||||
((u32 *)a)[1] ^= ((u32 *)b)[1];
|
||||
}
|
||||
|
||||
static void xor_128(u8 *a, const u8 *b, unsigned int bs)
|
||||
{
|
||||
((u32 *)a)[0] ^= ((u32 *)b)[0];
|
||||
((u32 *)a)[1] ^= ((u32 *)b)[1];
|
||||
((u32 *)a)[2] ^= ((u32 *)b)[2];
|
||||
((u32 *)a)[3] ^= ((u32 *)b)[3];
|
||||
}
|
||||
|
||||
static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
@ -245,22 +197,6 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
|
||||
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_cipher *cipher;
|
||||
|
||||
switch (crypto_tfm_alg_blocksize(tfm)) {
|
||||
case 8:
|
||||
ctx->xor = xor_64;
|
||||
break;
|
||||
|
||||
case 16:
|
||||
ctx->xor = xor_128;
|
||||
break;
|
||||
|
||||
default:
|
||||
if (crypto_tfm_alg_blocksize(tfm) % 4)
|
||||
ctx->xor = xor_byte;
|
||||
else
|
||||
ctx->xor = xor_quad;
|
||||
}
|
||||
|
||||
cipher = crypto_spawn_cipher(spawn);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
@ -290,6 +226,10 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
|
||||
if (IS_ERR(alg))
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
|
||||
inst = ERR_PTR(-EINVAL);
|
||||
if (!is_power_of_2(alg->cra_blocksize))
|
||||
goto out_put_alg;
|
||||
|
||||
inst = crypto_alloc_instance("cbc", alg);
|
||||
if (IS_ERR(inst))
|
||||
goto out_put_alg;
|
||||
@ -300,8 +240,9 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_blkcipher_type;
|
||||
|
||||
if (!(alg->cra_blocksize % 4))
|
||||
inst->alg.cra_alignmask |= 3;
|
||||
/* We access the data as u32s when xoring. */
|
||||
inst->alg.cra_alignmask |= __alignof__(u32) - 1;
|
||||
|
||||
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
|
||||
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
|
||||
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
|
||||
|
889
crypto/ccm.c
Normal file
889
crypto/ccm.c
Normal file
@ -0,0 +1,889 @@
|
||||
/*
|
||||
* CCM: Counter with CBC-MAC
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
struct ccm_instance_ctx {
|
||||
struct crypto_skcipher_spawn ctr;
|
||||
struct crypto_spawn cipher;
|
||||
};
|
||||
|
||||
struct crypto_ccm_ctx {
|
||||
struct crypto_cipher *cipher;
|
||||
struct crypto_ablkcipher *ctr;
|
||||
};
|
||||
|
||||
struct crypto_rfc4309_ctx {
|
||||
struct crypto_aead *child;
|
||||
u8 nonce[3];
|
||||
};
|
||||
|
||||
struct crypto_ccm_req_priv_ctx {
|
||||
u8 odata[16];
|
||||
u8 idata[16];
|
||||
u8 auth_tag[16];
|
||||
u32 ilen;
|
||||
u32 flags;
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist dst[2];
|
||||
struct ablkcipher_request abreq;
|
||||
};
|
||||
|
||||
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
|
||||
struct aead_request *req)
|
||||
{
|
||||
unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
|
||||
|
||||
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
|
||||
}
|
||||
|
||||
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
|
||||
{
|
||||
__be32 data;
|
||||
|
||||
memset(block, 0, csize);
|
||||
block += csize;
|
||||
|
||||
if (csize >= 4)
|
||||
csize = 4;
|
||||
else if (msglen > (1 << (8 * csize)))
|
||||
return -EOVERFLOW;
|
||||
|
||||
data = cpu_to_be32(msglen);
|
||||
memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_ablkcipher *ctr = ctx->ctr;
|
||||
struct crypto_cipher *tfm = ctx->cipher;
|
||||
int err = 0;
|
||||
|
||||
crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_ablkcipher_setkey(ctr, key, keylen);
|
||||
crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_cipher_setkey(tfm, key, keylen);
|
||||
crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_ccm_setauthsize(struct crypto_aead *tfm,
|
||||
unsigned int authsize)
|
||||
{
|
||||
switch (authsize) {
|
||||
case 4:
|
||||
case 6:
|
||||
case 8:
|
||||
case 10:
|
||||
case 12:
|
||||
case 14:
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int format_input(u8 *info, struct aead_request *req,
|
||||
unsigned int cryptlen)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
unsigned int lp = req->iv[0];
|
||||
unsigned int l = lp + 1;
|
||||
unsigned int m;
|
||||
|
||||
m = crypto_aead_authsize(aead);
|
||||
|
||||
memcpy(info, req->iv, 16);
|
||||
|
||||
/* format control info per RFC 3610 and
|
||||
* NIST Special Publication 800-38C
|
||||
*/
|
||||
*info |= (8 * ((m - 2) / 2));
|
||||
if (req->assoclen)
|
||||
*info |= 64;
|
||||
|
||||
return set_msg_len(info + 16 - l, cryptlen, l);
|
||||
}
|
||||
|
||||
static int format_adata(u8 *adata, unsigned int a)
|
||||
{
|
||||
int len = 0;
|
||||
|
||||
/* add control info for associated data
|
||||
* RFC 3610 and NIST Special Publication 800-38C
|
||||
*/
|
||||
if (a < 65280) {
|
||||
*(__be16 *)adata = cpu_to_be16(a);
|
||||
len = 2;
|
||||
} else {
|
||||
*(__be16 *)adata = cpu_to_be16(0xfffe);
|
||||
*(__be32 *)&adata[2] = cpu_to_be32(a);
|
||||
len = 6;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n,
|
||||
struct crypto_ccm_req_priv_ctx *pctx)
|
||||
{
|
||||
unsigned int bs = 16;
|
||||
u8 *odata = pctx->odata;
|
||||
u8 *idata = pctx->idata;
|
||||
int datalen, getlen;
|
||||
|
||||
datalen = n;
|
||||
|
||||
/* first time in here, block may be partially filled. */
|
||||
getlen = bs - pctx->ilen;
|
||||
if (datalen >= getlen) {
|
||||
memcpy(idata + pctx->ilen, data, getlen);
|
||||
crypto_xor(odata, idata, bs);
|
||||
crypto_cipher_encrypt_one(tfm, odata, odata);
|
||||
datalen -= getlen;
|
||||
data += getlen;
|
||||
pctx->ilen = 0;
|
||||
}
|
||||
|
||||
/* now encrypt rest of data */
|
||||
while (datalen >= bs) {
|
||||
crypto_xor(odata, data, bs);
|
||||
crypto_cipher_encrypt_one(tfm, odata, odata);
|
||||
|
||||
datalen -= bs;
|
||||
data += bs;
|
||||
}
|
||||
|
||||
/* check and see if there's leftover data that wasn't
|
||||
* enough to fill a block.
|
||||
*/
|
||||
if (datalen) {
|
||||
memcpy(idata + pctx->ilen, data, datalen);
|
||||
pctx->ilen += datalen;
|
||||
}
|
||||
}
|
||||
|
||||
static void get_data_to_compute(struct crypto_cipher *tfm,
|
||||
struct crypto_ccm_req_priv_ctx *pctx,
|
||||
struct scatterlist *sg, unsigned int len)
|
||||
{
|
||||
struct scatter_walk walk;
|
||||
u8 *data_src;
|
||||
int n;
|
||||
|
||||
scatterwalk_start(&walk, sg);
|
||||
|
||||
while (len) {
|
||||
n = scatterwalk_clamp(&walk, len);
|
||||
if (!n) {
|
||||
scatterwalk_start(&walk, sg_next(walk.sg));
|
||||
n = scatterwalk_clamp(&walk, len);
|
||||
}
|
||||
data_src = scatterwalk_map(&walk, 0);
|
||||
|
||||
compute_mac(tfm, data_src, n, pctx);
|
||||
len -= n;
|
||||
|
||||
scatterwalk_unmap(data_src, 0);
|
||||
scatterwalk_advance(&walk, n);
|
||||
scatterwalk_done(&walk, 0, len);
|
||||
if (len)
|
||||
crypto_yield(pctx->flags);
|
||||
}
|
||||
|
||||
/* any leftover needs padding and then encrypted */
|
||||
if (pctx->ilen) {
|
||||
int padlen;
|
||||
u8 *odata = pctx->odata;
|
||||
u8 *idata = pctx->idata;
|
||||
|
||||
padlen = 16 - pctx->ilen;
|
||||
memset(idata + pctx->ilen, 0, padlen);
|
||||
crypto_xor(odata, idata, 16);
|
||||
crypto_cipher_encrypt_one(tfm, odata, odata);
|
||||
pctx->ilen = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
|
||||
unsigned int cryptlen)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||
struct crypto_cipher *cipher = ctx->cipher;
|
||||
unsigned int assoclen = req->assoclen;
|
||||
u8 *odata = pctx->odata;
|
||||
u8 *idata = pctx->idata;
|
||||
int err;
|
||||
|
||||
/* format control data for input */
|
||||
err = format_input(odata, req, cryptlen);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* encrypt first block to use as start in computing mac */
|
||||
crypto_cipher_encrypt_one(cipher, odata, odata);
|
||||
|
||||
/* format associated data and compute into mac */
|
||||
if (assoclen) {
|
||||
pctx->ilen = format_adata(idata, assoclen);
|
||||
get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
|
||||
}
|
||||
|
||||
/* compute plaintext into mac */
|
||||
get_data_to_compute(cipher, pctx, plain, cryptlen);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||
u8 *odata = pctx->odata;
|
||||
|
||||
if (!err)
|
||||
scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static inline int crypto_ccm_check_iv(const u8 *iv)
|
||||
{
|
||||
/* 2 <= L <= 8, so 1 <= L' <= 7. */
|
||||
if (1 > iv[0] || iv[0] > 7)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int crypto_ccm_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||
struct ablkcipher_request *abreq = &pctx->abreq;
|
||||
struct scatterlist *dst;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
u8 *odata = pctx->odata;
|
||||
u8 *iv = req->iv;
|
||||
int err;
|
||||
|
||||
err = crypto_ccm_check_iv(iv);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pctx->flags = aead_request_flags(req);
|
||||
|
||||
err = crypto_ccm_auth(req, req->src, cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Note: rfc 3610 and NIST 800-38C require counter of
|
||||
* zero to encrypt auth tag.
|
||||
*/
|
||||
memset(iv + 15 - iv[0], 0, iv[0] + 1);
|
||||
|
||||
sg_init_table(pctx->src, 2);
|
||||
sg_set_buf(pctx->src, odata, 16);
|
||||
scatterwalk_sg_chain(pctx->src, 2, req->src);
|
||||
|
||||
dst = pctx->src;
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(pctx->dst, 2);
|
||||
sg_set_buf(pctx->dst, odata, 16);
|
||||
scatterwalk_sg_chain(pctx->dst, 2, req->dst);
|
||||
dst = pctx->dst;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->ctr);
|
||||
ablkcipher_request_set_callback(abreq, pctx->flags,
|
||||
crypto_ccm_encrypt_done, req);
|
||||
ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
|
||||
err = crypto_ablkcipher_encrypt(abreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* copy authtag to end of dst */
|
||||
scatterwalk_map_and_copy(odata, req->dst, cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
|
||||
int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
unsigned int cryptlen = req->cryptlen - authsize;
|
||||
|
||||
if (!err) {
|
||||
err = crypto_ccm_auth(req, req->dst, cryptlen);
|
||||
if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
|
||||
err = -EBADMSG;
|
||||
}
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int crypto_ccm_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
|
||||
struct ablkcipher_request *abreq = &pctx->abreq;
|
||||
struct scatterlist *dst;
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
u8 *authtag = pctx->auth_tag;
|
||||
u8 *odata = pctx->odata;
|
||||
u8 *iv = req->iv;
|
||||
int err;
|
||||
|
||||
if (cryptlen < authsize)
|
||||
return -EINVAL;
|
||||
cryptlen -= authsize;
|
||||
|
||||
err = crypto_ccm_check_iv(iv);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
pctx->flags = aead_request_flags(req);
|
||||
|
||||
scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
|
||||
|
||||
memset(iv + 15 - iv[0], 0, iv[0] + 1);
|
||||
|
||||
sg_init_table(pctx->src, 2);
|
||||
sg_set_buf(pctx->src, authtag, 16);
|
||||
scatterwalk_sg_chain(pctx->src, 2, req->src);
|
||||
|
||||
dst = pctx->src;
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(pctx->dst, 2);
|
||||
sg_set_buf(pctx->dst, authtag, 16);
|
||||
scatterwalk_sg_chain(pctx->dst, 2, req->dst);
|
||||
dst = pctx->dst;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_tfm(abreq, ctx->ctr);
|
||||
ablkcipher_request_set_callback(abreq, pctx->flags,
|
||||
crypto_ccm_decrypt_done, req);
|
||||
ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
|
||||
err = crypto_ablkcipher_decrypt(abreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = crypto_ccm_auth(req, req->dst, cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* verify */
|
||||
if (memcmp(authtag, odata, authsize))
|
||||
return -EBADMSG;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_cipher *cipher;
|
||||
struct crypto_ablkcipher *ctr;
|
||||
unsigned long align;
|
||||
int err;
|
||||
|
||||
cipher = crypto_spawn_cipher(&ictx->cipher);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctr = crypto_spawn_skcipher(&ictx->ctr);
|
||||
err = PTR_ERR(ctr);
|
||||
if (IS_ERR(ctr))
|
||||
goto err_free_cipher;
|
||||
|
||||
ctx->cipher = cipher;
|
||||
ctx->ctr = ctr;
|
||||
|
||||
align = crypto_tfm_alg_alignmask(tfm);
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
tfm->crt_aead.reqsize = align +
|
||||
sizeof(struct crypto_ccm_req_priv_ctx) +
|
||||
crypto_ablkcipher_reqsize(ctr);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_cipher:
|
||||
crypto_free_cipher(cipher);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_cipher(ctx->cipher);
|
||||
crypto_free_ablkcipher(ctx->ctr);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
|
||||
const char *full_name,
|
||||
const char *ctr_name,
|
||||
const char *cipher_name)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *ctr;
|
||||
struct crypto_alg *cipher;
|
||||
struct ccm_instance_ctx *ictx;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
err = PTR_ERR(cipher);
|
||||
if (IS_ERR(cipher))
|
||||
return ERR_PTR(err);
|
||||
|
||||
err = -EINVAL;
|
||||
if (cipher->cra_blocksize != 16)
|
||||
goto out_put_cipher;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
|
||||
err = -ENOMEM;
|
||||
if (!inst)
|
||||
goto out_put_cipher;
|
||||
|
||||
ictx = crypto_instance_ctx(inst);
|
||||
|
||||
err = crypto_init_spawn(&ictx->cipher, cipher, inst,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
crypto_set_skcipher_spawn(&ictx->ctr, inst);
|
||||
err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
|
||||
crypto_requires_sync(algt->type,
|
||||
algt->mask));
|
||||
if (err)
|
||||
goto err_drop_cipher;
|
||||
|
||||
ctr = crypto_skcipher_spawn_alg(&ictx->ctr);
|
||||
|
||||
/* Not a stream cipher? */
|
||||
err = -EINVAL;
|
||||
if (ctr->cra_blocksize != 1)
|
||||
goto err_drop_ctr;
|
||||
|
||||
/* We want the real thing! */
|
||||
if (ctr->cra_ablkcipher.ivsize != 16)
|
||||
goto err_drop_ctr;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"ccm_base(%s,%s)", ctr->cra_driver_name,
|
||||
cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
goto err_drop_ctr;
|
||||
|
||||
memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
|
||||
(__alignof__(u32) - 1);
|
||||
inst->alg.cra_type = &crypto_aead_type;
|
||||
inst->alg.cra_aead.ivsize = 16;
|
||||
inst->alg.cra_aead.maxauthsize = 16;
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
|
||||
inst->alg.cra_init = crypto_ccm_init_tfm;
|
||||
inst->alg.cra_exit = crypto_ccm_exit_tfm;
|
||||
inst->alg.cra_aead.setkey = crypto_ccm_setkey;
|
||||
inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
|
||||
inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
|
||||
|
||||
out:
|
||||
crypto_mod_put(cipher);
|
||||
return inst;
|
||||
|
||||
err_drop_ctr:
|
||||
crypto_drop_skcipher(&ictx->ctr);
|
||||
err_drop_cipher:
|
||||
crypto_drop_spawn(&ictx->cipher);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
out_put_cipher:
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
|
||||
{
|
||||
int err;
|
||||
const char *cipher_name;
|
||||
char ctr_name[CRYPTO_MAX_ALG_NAME];
|
||||
char full_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(cipher_name);
|
||||
if (IS_ERR(cipher_name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
|
||||
cipher_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
|
||||
}
|
||||
|
||||
static void crypto_ccm_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_spawn(&ctx->cipher);
|
||||
crypto_drop_skcipher(&ctx->ctr);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_ccm_tmpl = {
|
||||
.name = "ccm",
|
||||
.alloc = crypto_ccm_alloc,
|
||||
.free = crypto_ccm_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
|
||||
{
|
||||
int err;
|
||||
const char *ctr_name;
|
||||
const char *cipher_name;
|
||||
char full_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
ctr_name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(ctr_name);
|
||||
if (IS_ERR(ctr_name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[2]);
|
||||
err = PTR_ERR(cipher_name);
|
||||
if (IS_ERR(cipher_name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
|
||||
ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_ccm_base_tmpl = {
|
||||
.name = "ccm_base",
|
||||
.alloc = crypto_ccm_base_alloc,
|
||||
.free = crypto_ccm_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
|
||||
struct crypto_aead *child = ctx->child;
|
||||
int err;
|
||||
|
||||
if (keylen < 3)
|
||||
return -EINVAL;
|
||||
|
||||
keylen -= 3;
|
||||
memcpy(ctx->nonce, key + keylen, 3);
|
||||
|
||||
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_aead_setkey(child, key, keylen);
|
||||
crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
|
||||
|
||||
switch (authsize) {
|
||||
case 8:
|
||||
case 12:
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return crypto_aead_setauthsize(ctx->child, authsize);
|
||||
}
|
||||
|
||||
static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
|
||||
{
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_aead *child = ctx->child;
|
||||
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
|
||||
crypto_aead_alignmask(child) + 1);
|
||||
|
||||
/* L' */
|
||||
iv[0] = 3;
|
||||
|
||||
memcpy(iv + 1, ctx->nonce, 3);
|
||||
memcpy(iv + 4, req->iv, 8);
|
||||
|
||||
aead_request_set_tfm(subreq, child);
|
||||
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
|
||||
req->base.data);
|
||||
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
|
||||
aead_request_set_assoc(subreq, req->assoc, req->assoclen);
|
||||
|
||||
return subreq;
|
||||
}
|
||||
|
||||
static int crypto_rfc4309_encrypt(struct aead_request *req)
|
||||
{
|
||||
req = crypto_rfc4309_crypt(req);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
|
||||
static int crypto_rfc4309_decrypt(struct aead_request *req)
|
||||
{
|
||||
req = crypto_rfc4309_crypt(req);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
|
||||
static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_aead *aead;
|
||||
unsigned long align;
|
||||
|
||||
aead = crypto_spawn_aead(spawn);
|
||||
if (IS_ERR(aead))
|
||||
return PTR_ERR(aead);
|
||||
|
||||
ctx->child = aead;
|
||||
|
||||
align = crypto_aead_alignmask(aead);
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
tfm->crt_aead.reqsize = sizeof(struct aead_request) +
|
||||
ALIGN(crypto_aead_reqsize(aead),
|
||||
crypto_tfm_ctx_alignment()) +
|
||||
align + 16;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
const char *ccm_name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ccm_name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(ccm_name);
|
||||
if (IS_ERR(ccm_name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spawn = crypto_instance_ctx(inst);
|
||||
crypto_set_aead_spawn(spawn, inst);
|
||||
err = crypto_grab_aead(spawn, ccm_name, 0,
|
||||
crypto_requires_sync(algt->type, algt->mask));
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
|
||||
alg = crypto_aead_spawn_alg(spawn);
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
/* We only support 16-byte blocks. */
|
||||
if (alg->cra_aead.ivsize != 16)
|
||||
goto out_drop_alg;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->cra_blocksize != 1)
|
||||
goto out_drop_alg;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
|
||||
snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4309(%s)", alg->cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_nivaead_type;
|
||||
|
||||
inst->alg.cra_aead.ivsize = 8;
|
||||
inst->alg.cra_aead.maxauthsize = 16;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
|
||||
|
||||
inst->alg.cra_init = crypto_rfc4309_init_tfm;
|
||||
inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
|
||||
|
||||
inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
|
||||
inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
|
||||
inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
|
||||
|
||||
inst->alg.cra_aead.geniv = "seqiv";
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
out_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_rfc4309_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_rfc4309_tmpl = {
|
||||
.name = "rfc4309",
|
||||
.alloc = crypto_rfc4309_alloc,
|
||||
.free = crypto_rfc4309_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_ccm_module_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_register_template(&crypto_ccm_base_tmpl);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_register_template(&crypto_ccm_tmpl);
|
||||
if (err)
|
||||
goto out_undo_base;
|
||||
|
||||
err = crypto_register_template(&crypto_rfc4309_tmpl);
|
||||
if (err)
|
||||
goto out_undo_ccm;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
out_undo_ccm:
|
||||
crypto_unregister_template(&crypto_ccm_tmpl);
|
||||
out_undo_base:
|
||||
crypto_unregister_template(&crypto_ccm_base_tmpl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void __exit crypto_ccm_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_rfc4309_tmpl);
|
||||
crypto_unregister_template(&crypto_ccm_tmpl);
|
||||
crypto_unregister_template(&crypto_ccm_base_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_ccm_module_init);
|
||||
module_exit(crypto_ccm_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Counter with CBC MAC");
|
||||
MODULE_ALIAS("ccm_base");
|
||||
MODULE_ALIAS("rfc4309");
|
331
crypto/chainiv.c
Normal file
331
crypto/chainiv.c
Normal file
@ -0,0 +1,331 @@
|
||||
/*
|
||||
* chainiv: Chain IV Generator
|
||||
*
|
||||
* Generate IVs simply be using the last block of the previous encryption.
|
||||
* This is mainly useful for CBC with a synchronous algorithm.
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
enum {
|
||||
CHAINIV_STATE_INUSE = 0,
|
||||
};
|
||||
|
||||
struct chainiv_ctx {
|
||||
spinlock_t lock;
|
||||
char iv[];
|
||||
};
|
||||
|
||||
struct async_chainiv_ctx {
|
||||
unsigned long state;
|
||||
|
||||
spinlock_t lock;
|
||||
int err;
|
||||
|
||||
struct crypto_queue queue;
|
||||
struct work_struct postponed;
|
||||
|
||||
char iv[];
|
||||
};
|
||||
|
||||
static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
||||
unsigned int ivsize;
|
||||
int err;
|
||||
|
||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags &
|
||||
~CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
req->creq.base.complete,
|
||||
req->creq.base.data);
|
||||
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
|
||||
req->creq.nbytes, req->creq.info);
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
|
||||
ivsize = crypto_ablkcipher_ivsize(geniv);
|
||||
|
||||
memcpy(req->giv, ctx->iv, ivsize);
|
||||
memcpy(subreq->info, ctx->iv, ivsize);
|
||||
|
||||
err = crypto_ablkcipher_encrypt(subreq);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
memcpy(ctx->iv, subreq->info, ivsize);
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
if (crypto_ablkcipher_crt(geniv)->givencrypt !=
|
||||
chainiv_givencrypt_first)
|
||||
goto unlock;
|
||||
|
||||
crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
|
||||
get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
return chainiv_givencrypt(req);
|
||||
}
|
||||
|
||||
static int chainiv_init_common(struct crypto_tfm *tfm)
|
||||
{
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
|
||||
|
||||
return skcipher_geniv_init(tfm);
|
||||
}
|
||||
|
||||
static int chainiv_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
return chainiv_init_common(tfm);
|
||||
}
|
||||
|
||||
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
|
||||
{
|
||||
int queued;
|
||||
|
||||
if (!ctx->queue.qlen) {
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
|
||||
|
||||
if (!ctx->queue.qlen ||
|
||||
test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
|
||||
goto out;
|
||||
}
|
||||
|
||||
queued = schedule_work(&ctx->postponed);
|
||||
BUG_ON(!queued);
|
||||
|
||||
out:
|
||||
return ctx->err;
|
||||
}
|
||||
|
||||
static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
int err;
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
err = skcipher_enqueue_givcrypt(&ctx->queue, req);
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
|
||||
return err;
|
||||
|
||||
ctx->err = err;
|
||||
return async_chainiv_schedule_work(ctx);
|
||||
}
|
||||
|
||||
static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
||||
unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
|
||||
|
||||
memcpy(req->giv, ctx->iv, ivsize);
|
||||
memcpy(subreq->info, ctx->iv, ivsize);
|
||||
|
||||
ctx->err = crypto_ablkcipher_encrypt(subreq);
|
||||
if (ctx->err)
|
||||
goto out;
|
||||
|
||||
memcpy(ctx->iv, subreq->info, ivsize);
|
||||
|
||||
out:
|
||||
return async_chainiv_schedule_work(ctx);
|
||||
}
|
||||
|
||||
static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
||||
|
||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags,
|
||||
req->creq.base.complete,
|
||||
req->creq.base.data);
|
||||
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
|
||||
req->creq.nbytes, req->creq.info);
|
||||
|
||||
if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
|
||||
goto postpone;
|
||||
|
||||
if (ctx->queue.qlen) {
|
||||
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
|
||||
goto postpone;
|
||||
}
|
||||
|
||||
return async_chainiv_givencrypt_tail(req);
|
||||
|
||||
postpone:
|
||||
return async_chainiv_postpone_request(req);
|
||||
}
|
||||
|
||||
static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
|
||||
if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
|
||||
goto out;
|
||||
|
||||
if (crypto_ablkcipher_crt(geniv)->givencrypt !=
|
||||
async_chainiv_givencrypt_first)
|
||||
goto unlock;
|
||||
|
||||
crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
|
||||
get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv));
|
||||
|
||||
unlock:
|
||||
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
|
||||
|
||||
out:
|
||||
return async_chainiv_givencrypt(req);
|
||||
}
|
||||
|
||||
static void async_chainiv_do_postponed(struct work_struct *work)
|
||||
{
|
||||
struct async_chainiv_ctx *ctx = container_of(work,
|
||||
struct async_chainiv_ctx,
|
||||
postponed);
|
||||
struct skcipher_givcrypt_request *req;
|
||||
struct ablkcipher_request *subreq;
|
||||
|
||||
/* Only handle one request at a time to avoid hogging keventd. */
|
||||
spin_lock_bh(&ctx->lock);
|
||||
req = skcipher_dequeue_givcrypt(&ctx->queue);
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
if (!req) {
|
||||
async_chainiv_schedule_work(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
subreq = skcipher_givcrypt_reqctx(req);
|
||||
subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
async_chainiv_givencrypt_tail(req);
|
||||
}
|
||||
|
||||
static int async_chainiv_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
crypto_init_queue(&ctx->queue, 100);
|
||||
INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
|
||||
|
||||
return chainiv_init_common(tfm);
|
||||
}
|
||||
|
||||
static void async_chainiv_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
|
||||
|
||||
skcipher_geniv_exit(tfm);
|
||||
}
|
||||
|
||||
static struct crypto_template chainiv_tmpl;
|
||||
|
||||
static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
|
||||
if (IS_ERR(inst))
|
||||
goto out;
|
||||
|
||||
inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first;
|
||||
|
||||
inst->alg.cra_init = chainiv_init;
|
||||
inst->alg.cra_exit = skcipher_geniv_exit;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
|
||||
|
||||
if (!crypto_requires_sync(algt->type, algt->mask)) {
|
||||
inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
|
||||
|
||||
inst->alg.cra_ablkcipher.givencrypt =
|
||||
async_chainiv_givencrypt_first;
|
||||
|
||||
inst->alg.cra_init = async_chainiv_init;
|
||||
inst->alg.cra_exit = async_chainiv_exit;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
|
||||
}
|
||||
|
||||
inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
}
|
||||
|
||||
static struct crypto_template chainiv_tmpl = {
|
||||
.name = "chainiv",
|
||||
.alloc = chainiv_alloc,
|
||||
.free = skcipher_geniv_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init chainiv_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&chainiv_tmpl);
|
||||
}
|
||||
|
||||
static void __exit chainiv_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&chainiv_tmpl);
|
||||
}
|
||||
|
||||
module_init(chainiv_module_init);
|
||||
module_exit(chainiv_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Chain IV Generator");
|
@ -228,7 +228,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
|
||||
struct crypto_alg *alg;
|
||||
|
||||
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (IS_ERR(alg))
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
|
||||
@ -236,13 +236,15 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
|
||||
if (IS_ERR(inst))
|
||||
goto out_put_alg;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_type = &crypto_ablkcipher_type;
|
||||
|
||||
inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
|
||||
inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
|
||||
inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
|
||||
|
||||
inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
|
||||
|
||||
inst->alg.cra_init = cryptd_blkcipher_init_tfm;
|
||||
|
@ -16,15 +16,17 @@
|
||||
* (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#define NULL_KEY_SIZE 0
|
||||
#define NULL_BLOCK_SIZE 1
|
||||
#define NULL_DIGEST_SIZE 0
|
||||
#define NULL_IV_SIZE 0
|
||||
|
||||
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
|
||||
unsigned int slen, u8 *dst, unsigned int *dlen)
|
||||
@ -55,6 +57,26 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
memcpy(dst, src, NULL_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static int skcipher_null_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while (walk.nbytes) {
|
||||
if (walk.src.virt.addr != walk.dst.virt.addr)
|
||||
memcpy(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
walk.nbytes);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg compress_null = {
|
||||
.cra_name = "compress_null",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||
@ -76,6 +98,7 @@ static struct crypto_alg digest_null = {
|
||||
.cra_list = LIST_HEAD_INIT(digest_null.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = NULL_DIGEST_SIZE,
|
||||
.dia_setkey = null_setkey,
|
||||
.dia_init = null_init,
|
||||
.dia_update = null_update,
|
||||
.dia_final = null_final } }
|
||||
@ -96,6 +119,25 @@ static struct crypto_alg cipher_null = {
|
||||
.cia_decrypt = null_crypt } }
|
||||
};
|
||||
|
||||
static struct crypto_alg skcipher_null = {
|
||||
.cra_name = "ecb(cipher_null)",
|
||||
.cra_driver_name = "ecb-cipher_null",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = NULL_BLOCK_SIZE,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_ctxsize = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(skcipher_null.cra_list),
|
||||
.cra_u = { .blkcipher = {
|
||||
.min_keysize = NULL_KEY_SIZE,
|
||||
.max_keysize = NULL_KEY_SIZE,
|
||||
.ivsize = NULL_IV_SIZE,
|
||||
.setkey = null_setkey,
|
||||
.encrypt = skcipher_null_crypt,
|
||||
.decrypt = skcipher_null_crypt } }
|
||||
};
|
||||
|
||||
MODULE_ALIAS("compress_null");
|
||||
MODULE_ALIAS("digest_null");
|
||||
MODULE_ALIAS("cipher_null");
|
||||
@ -108,27 +150,35 @@ static int __init init(void)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
ret = crypto_register_alg(&skcipher_null);
|
||||
if (ret < 0)
|
||||
goto out_unregister_cipher;
|
||||
|
||||
ret = crypto_register_alg(&digest_null);
|
||||
if (ret < 0) {
|
||||
crypto_unregister_alg(&cipher_null);
|
||||
goto out;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out_unregister_skcipher;
|
||||
|
||||
ret = crypto_register_alg(&compress_null);
|
||||
if (ret < 0) {
|
||||
crypto_unregister_alg(&digest_null);
|
||||
crypto_unregister_alg(&cipher_null);
|
||||
goto out;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out_unregister_digest;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
||||
out_unregister_digest:
|
||||
crypto_unregister_alg(&digest_null);
|
||||
out_unregister_skcipher:
|
||||
crypto_unregister_alg(&skcipher_null);
|
||||
out_unregister_cipher:
|
||||
crypto_unregister_alg(&cipher_null);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&compress_null);
|
||||
crypto_unregister_alg(&digest_null);
|
||||
crypto_unregister_alg(&skcipher_null);
|
||||
crypto_unregister_alg(&cipher_null);
|
||||
}
|
||||
|
||||
|
422
crypto/ctr.c
Normal file
422
crypto/ctr.c
Normal file
@ -0,0 +1,422 @@
|
||||
/*
|
||||
* CTR: Counter mode
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct crypto_ctr_ctx {
|
||||
struct crypto_cipher *child;
|
||||
};
|
||||
|
||||
struct crypto_rfc3686_ctx {
|
||||
struct crypto_blkcipher *child;
|
||||
u8 nonce[CTR_RFC3686_NONCE_SIZE];
|
||||
};
|
||||
|
||||
static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
int err;
|
||||
|
||||
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_cipher_setkey(child, key, keylen);
|
||||
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
unsigned int bsize = crypto_cipher_blocksize(tfm);
|
||||
unsigned long alignmask = crypto_cipher_alignmask(tfm);
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 tmp[bsize + alignmask];
|
||||
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
|
||||
crypto_xor(keystream, src, nbytes);
|
||||
memcpy(dst, keystream, nbytes);
|
||||
|
||||
crypto_inc(ctrblk, bsize);
|
||||
}
|
||||
|
||||
static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
unsigned int bsize = crypto_cipher_blocksize(tfm);
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
do {
|
||||
/* create keystream */
|
||||
fn(crypto_cipher_tfm(tfm), dst, ctrblk);
|
||||
crypto_xor(dst, src, bsize);
|
||||
|
||||
/* increment counter in counterblock */
|
||||
crypto_inc(ctrblk, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
unsigned int bsize = crypto_cipher_blocksize(tfm);
|
||||
unsigned long alignmask = crypto_cipher_alignmask(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 tmp[bsize + alignmask];
|
||||
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
|
||||
|
||||
do {
|
||||
/* create keystream */
|
||||
fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
|
||||
crypto_xor(src, keystream, bsize);
|
||||
|
||||
/* increment counter in counterblock */
|
||||
crypto_inc(ctrblk, bsize);
|
||||
|
||||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_ctr_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
unsigned int bsize = crypto_cipher_blocksize(child);
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, bsize);
|
||||
|
||||
while (walk.nbytes >= bsize) {
|
||||
if (walk.src.virt.addr == walk.dst.virt.addr)
|
||||
nbytes = crypto_ctr_crypt_inplace(&walk, child);
|
||||
else
|
||||
nbytes = crypto_ctr_crypt_segment(&walk, child);
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
crypto_ctr_crypt_final(&walk, child);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_ctr_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
|
||||
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_cipher *cipher;
|
||||
|
||||
cipher = crypto_spawn_cipher(spawn);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctx->child = cipher;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_cipher(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
if (IS_ERR(alg))
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
|
||||
/* Block size must be >= 4 bytes. */
|
||||
err = -EINVAL;
|
||||
if (alg->cra_blocksize < 4)
|
||||
goto out_put_alg;
|
||||
|
||||
/* If this is false we'd fail the alignment of crypto_inc. */
|
||||
if (alg->cra_blocksize % 4)
|
||||
goto out_put_alg;
|
||||
|
||||
inst = crypto_alloc_instance("ctr", alg);
|
||||
if (IS_ERR(inst))
|
||||
goto out;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
|
||||
inst->alg.cra_type = &crypto_blkcipher_type;
|
||||
|
||||
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
|
||||
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
|
||||
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx);
|
||||
|
||||
inst->alg.cra_init = crypto_ctr_init_tfm;
|
||||
inst->alg.cra_exit = crypto_ctr_exit_tfm;
|
||||
|
||||
inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey;
|
||||
inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
|
||||
inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
|
||||
|
||||
out:
|
||||
crypto_mod_put(alg);
|
||||
return inst;
|
||||
|
||||
out_put_alg:
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_ctr_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_ctr_tmpl = {
|
||||
.name = "ctr",
|
||||
.alloc = crypto_ctr_alloc,
|
||||
.free = crypto_ctr_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int crypto_rfc3686_setkey(struct crypto_tfm *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(parent);
|
||||
struct crypto_blkcipher *child = ctx->child;
|
||||
int err;
|
||||
|
||||
/* the nonce is stored in bytes at end of key */
|
||||
if (keylen < CTR_RFC3686_NONCE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
|
||||
CTR_RFC3686_NONCE_SIZE);
|
||||
|
||||
keylen -= CTR_RFC3686_NONCE_SIZE;
|
||||
|
||||
crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_blkcipher_setkey(child, key, keylen);
|
||||
crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_rfc3686_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct crypto_rfc3686_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
struct crypto_blkcipher *child = ctx->child;
|
||||
unsigned long alignmask = crypto_blkcipher_alignmask(tfm);
|
||||
u8 ivblk[CTR_RFC3686_BLOCK_SIZE + alignmask];
|
||||
u8 *iv = PTR_ALIGN(ivblk + 0, alignmask + 1);
|
||||
u8 *info = desc->info;
|
||||
int err;
|
||||
|
||||
/* set up counter block */
|
||||
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
|
||||
memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
|
||||
|
||||
/* initialize counter portion of counter block */
|
||||
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
|
||||
cpu_to_be32(1);
|
||||
|
||||
desc->tfm = child;
|
||||
desc->info = iv;
|
||||
err = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
|
||||
desc->tfm = tfm;
|
||||
desc->info = info;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
|
||||
struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_blkcipher *cipher;
|
||||
|
||||
cipher = crypto_spawn_blkcipher(spawn);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctx->child = cipher;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_blkcipher(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *alg;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
err = PTR_ERR(alg);
|
||||
if (IS_ERR(alg))
|
||||
return ERR_PTR(err);
|
||||
|
||||
/* We only support 16-byte blocks. */
|
||||
err = -EINVAL;
|
||||
if (alg->cra_blkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE)
|
||||
goto out_put_alg;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->cra_blocksize != 1)
|
||||
goto out_put_alg;
|
||||
|
||||
inst = crypto_alloc_instance("rfc3686", alg);
|
||||
if (IS_ERR(inst))
|
||||
goto out;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_blkcipher_type;
|
||||
|
||||
inst->alg.cra_blkcipher.ivsize = CTR_RFC3686_IV_SIZE;
|
||||
inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize
|
||||
+ CTR_RFC3686_NONCE_SIZE;
|
||||
inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize
|
||||
+ CTR_RFC3686_NONCE_SIZE;
|
||||
|
||||
inst->alg.cra_blkcipher.geniv = "seqiv";
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
|
||||
|
||||
inst->alg.cra_init = crypto_rfc3686_init_tfm;
|
||||
inst->alg.cra_exit = crypto_rfc3686_exit_tfm;
|
||||
|
||||
inst->alg.cra_blkcipher.setkey = crypto_rfc3686_setkey;
|
||||
inst->alg.cra_blkcipher.encrypt = crypto_rfc3686_crypt;
|
||||
inst->alg.cra_blkcipher.decrypt = crypto_rfc3686_crypt;
|
||||
|
||||
out:
|
||||
crypto_mod_put(alg);
|
||||
return inst;
|
||||
|
||||
out_put_alg:
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_rfc3686_tmpl = {
|
||||
.name = "rfc3686",
|
||||
.alloc = crypto_rfc3686_alloc,
|
||||
.free = crypto_ctr_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_ctr_module_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_register_template(&crypto_ctr_tmpl);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_register_template(&crypto_rfc3686_tmpl);
|
||||
if (err)
|
||||
goto out_drop_ctr;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
out_drop_ctr:
|
||||
crypto_unregister_template(&crypto_ctr_tmpl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void __exit crypto_ctr_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_rfc3686_tmpl);
|
||||
crypto_unregister_template(&crypto_ctr_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_ctr_module_init);
|
||||
module_exit(crypto_ctr_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("CTR Counter block mode");
|
||||
MODULE_ALIAS("rfc3686");
|
@ -20,13 +20,7 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define DES_KEY_SIZE 8
|
||||
#define DES_EXPKEY_WORDS 32
|
||||
#define DES_BLOCK_SIZE 8
|
||||
|
||||
#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE)
|
||||
#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
|
||||
#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
|
||||
#include <crypto/des.h>
|
||||
|
||||
#define ROL(x, r) ((x) = rol32((x), (r)))
|
||||
#define ROR(x, r) ((x) = ror32((x), (r)))
|
||||
@ -634,7 +628,7 @@ static const u32 S8[64] = {
|
||||
* Choice 1 has operated on the key.
|
||||
*
|
||||
*/
|
||||
static unsigned long ekey(u32 *pe, const u8 *k)
|
||||
unsigned long des_ekey(u32 *pe, const u8 *k)
|
||||
{
|
||||
/* K&R: long is at least 32 bits */
|
||||
unsigned long a, b, c, d, w;
|
||||
@ -709,6 +703,7 @@ static unsigned long ekey(u32 *pe, const u8 *k)
|
||||
/* Zero if weak key */
|
||||
return w;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(des_ekey);
|
||||
|
||||
/*
|
||||
* Decryption key expansion
|
||||
@ -792,7 +787,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
int ret;
|
||||
|
||||
/* Expand to tmp */
|
||||
ret = ekey(tmp, key);
|
||||
ret = des_ekey(tmp, key);
|
||||
|
||||
if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
|
||||
*flags |= CRYPTO_TFM_RES_WEAK_KEY;
|
||||
@ -879,9 +874,9 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
|
||||
des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
|
||||
dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
|
||||
ekey(expkey, key);
|
||||
des_ekey(expkey, key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
@ -20,9 +21,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include "internal.h"
|
||||
#include "scatterwalk.h"
|
||||
|
||||
static int init(struct hash_desc *desc)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
|
||||
|
264
crypto/eseqiv.c
Normal file
264
crypto/eseqiv.c
Normal file
@ -0,0 +1,264 @@
|
||||
/*
|
||||
* eseqiv: Encrypted Sequence Number IV Generator
|
||||
*
|
||||
* This generator generates an IV based on a sequence number by xoring it
|
||||
* with a salt and then encrypting it with the same key as used to encrypt
|
||||
* the plain text. This algorithm requires that the block size be equal
|
||||
* to the IV size. It is mainly useful for CBC.
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
struct eseqiv_request_ctx {
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist dst[2];
|
||||
char tail[];
|
||||
};
|
||||
|
||||
struct eseqiv_ctx {
|
||||
spinlock_t lock;
|
||||
unsigned int reqoff;
|
||||
char salt[];
|
||||
};
|
||||
|
||||
static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
|
||||
|
||||
memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
|
||||
crypto_ablkcipher_alignmask(geniv) + 1),
|
||||
crypto_ablkcipher_ivsize(geniv));
|
||||
}
|
||||
|
||||
static void eseqiv_complete(struct crypto_async_request *base, int err)
|
||||
{
|
||||
struct skcipher_givcrypt_request *req = base->data;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
eseqiv_complete2(req);
|
||||
|
||||
out:
|
||||
skcipher_givcrypt_complete(req, err);
|
||||
}
|
||||
|
||||
static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg,
|
||||
int chain)
|
||||
{
|
||||
if (chain) {
|
||||
head->length += sg->length;
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
|
||||
if (sg)
|
||||
scatterwalk_sg_chain(head, 2, sg);
|
||||
else
|
||||
sg_mark_end(head);
|
||||
}
|
||||
|
||||
static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
|
||||
struct ablkcipher_request *subreq;
|
||||
crypto_completion_t complete;
|
||||
void *data;
|
||||
struct scatterlist *osrc, *odst;
|
||||
struct scatterlist *dst;
|
||||
struct page *srcp;
|
||||
struct page *dstp;
|
||||
u8 *giv;
|
||||
u8 *vsrc;
|
||||
u8 *vdst;
|
||||
__be64 seq;
|
||||
unsigned int ivsize;
|
||||
unsigned int len;
|
||||
int err;
|
||||
|
||||
subreq = (void *)(reqctx->tail + ctx->reqoff);
|
||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
||||
|
||||
giv = req->giv;
|
||||
complete = req->creq.base.complete;
|
||||
data = req->creq.base.data;
|
||||
|
||||
osrc = req->creq.src;
|
||||
odst = req->creq.dst;
|
||||
srcp = sg_page(osrc);
|
||||
dstp = sg_page(odst);
|
||||
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
|
||||
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
|
||||
|
||||
ivsize = crypto_ablkcipher_ivsize(geniv);
|
||||
|
||||
if (vsrc != giv + ivsize && vdst != giv + ivsize) {
|
||||
giv = PTR_ALIGN((u8 *)reqctx->tail,
|
||||
crypto_ablkcipher_alignmask(geniv) + 1);
|
||||
complete = eseqiv_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
|
||||
data);
|
||||
|
||||
sg_init_table(reqctx->src, 2);
|
||||
sg_set_buf(reqctx->src, giv, ivsize);
|
||||
eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
|
||||
|
||||
dst = reqctx->src;
|
||||
if (osrc != odst) {
|
||||
sg_init_table(reqctx->dst, 2);
|
||||
sg_set_buf(reqctx->dst, giv, ivsize);
|
||||
eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
|
||||
|
||||
dst = reqctx->dst;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
|
||||
req->creq.nbytes, req->creq.info);
|
||||
|
||||
memcpy(req->creq.info, ctx->salt, ivsize);
|
||||
|
||||
len = ivsize;
|
||||
if (ivsize > sizeof(u64)) {
|
||||
memset(req->giv, 0, ivsize - sizeof(u64));
|
||||
len = sizeof(u64);
|
||||
}
|
||||
seq = cpu_to_be64(req->seq);
|
||||
memcpy(req->giv + ivsize - len, &seq, len);
|
||||
|
||||
err = crypto_ablkcipher_encrypt(subreq);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
eseqiv_complete2(req);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first)
|
||||
goto unlock;
|
||||
|
||||
crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
|
||||
get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv));
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
return eseqiv_givencrypt(req);
|
||||
}
|
||||
|
||||
static int eseqiv_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
|
||||
struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
unsigned long alignmask;
|
||||
unsigned int reqsize;
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
alignmask = crypto_tfm_ctx_alignment() - 1;
|
||||
reqsize = sizeof(struct eseqiv_request_ctx);
|
||||
|
||||
if (alignmask & reqsize) {
|
||||
alignmask &= reqsize;
|
||||
alignmask--;
|
||||
}
|
||||
|
||||
alignmask = ~alignmask;
|
||||
alignmask &= crypto_ablkcipher_alignmask(geniv);
|
||||
|
||||
reqsize += alignmask;
|
||||
reqsize += crypto_ablkcipher_ivsize(geniv);
|
||||
reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
|
||||
|
||||
ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
|
||||
|
||||
tfm->crt_ablkcipher.reqsize = reqsize +
|
||||
sizeof(struct ablkcipher_request);
|
||||
|
||||
return skcipher_geniv_init(tfm);
|
||||
}
|
||||
|
||||
static struct crypto_template eseqiv_tmpl;
|
||||
|
||||
static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
int err;
|
||||
|
||||
inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
|
||||
if (IS_ERR(inst))
|
||||
goto out;
|
||||
|
||||
err = -EINVAL;
|
||||
if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
|
||||
goto free_inst;
|
||||
|
||||
inst->alg.cra_ablkcipher.givencrypt = eseqiv_givencrypt_first;
|
||||
|
||||
inst->alg.cra_init = eseqiv_init;
|
||||
inst->alg.cra_exit = skcipher_geniv_exit;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
|
||||
inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
free_inst:
|
||||
skcipher_geniv_free(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct crypto_template eseqiv_tmpl = {
|
||||
.name = "eseqiv",
|
||||
.alloc = eseqiv_alloc,
|
||||
.free = skcipher_geniv_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init eseqiv_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&eseqiv_tmpl);
|
||||
}
|
||||
|
||||
static void __exit eseqiv_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&eseqiv_tmpl);
|
||||
}
|
||||
|
||||
module_init(eseqiv_module_init);
|
||||
module_exit(eseqiv_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
|
823
crypto/gcm.c
Normal file
823
crypto/gcm.c
Normal file
@ -0,0 +1,823 @@
|
||||
/*
|
||||
* GCM: Galois/Counter Mode.
|
||||
*
|
||||
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct gcm_instance_ctx {
|
||||
struct crypto_skcipher_spawn ctr;
|
||||
};
|
||||
|
||||
struct crypto_gcm_ctx {
|
||||
struct crypto_ablkcipher *ctr;
|
||||
struct gf128mul_4k *gf128;
|
||||
};
|
||||
|
||||
struct crypto_rfc4106_ctx {
|
||||
struct crypto_aead *child;
|
||||
u8 nonce[4];
|
||||
};
|
||||
|
||||
struct crypto_gcm_ghash_ctx {
|
||||
u32 bytes;
|
||||
u32 flags;
|
||||
struct gf128mul_4k *gf128;
|
||||
u8 buffer[16];
|
||||
};
|
||||
|
||||
struct crypto_gcm_req_priv_ctx {
|
||||
u8 auth_tag[16];
|
||||
u8 iauth_tag[16];
|
||||
struct scatterlist src[2];
|
||||
struct scatterlist dst[2];
|
||||
struct crypto_gcm_ghash_ctx ghash;
|
||||
struct ablkcipher_request abreq;
|
||||
};
|
||||
|
||||
struct crypto_gcm_setkey_result {
|
||||
int err;
|
||||
struct completion completion;
|
||||
};
|
||||
|
||||
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
|
||||
struct aead_request *req)
|
||||
{
|
||||
unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
|
||||
|
||||
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
|
||||
}
|
||||
|
||||
static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags,
|
||||
struct gf128mul_4k *gf128)
|
||||
{
|
||||
ctx->bytes = 0;
|
||||
ctx->flags = flags;
|
||||
ctx->gf128 = gf128;
|
||||
memset(ctx->buffer, 0, 16);
|
||||
}
|
||||
|
||||
static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
u8 *dst = ctx->buffer;
|
||||
|
||||
if (ctx->bytes) {
|
||||
int n = min(srclen, ctx->bytes);
|
||||
u8 *pos = dst + (16 - ctx->bytes);
|
||||
|
||||
ctx->bytes -= n;
|
||||
srclen -= n;
|
||||
|
||||
while (n--)
|
||||
*pos++ ^= *src++;
|
||||
|
||||
if (!ctx->bytes)
|
||||
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
|
||||
}
|
||||
|
||||
while (srclen >= 16) {
|
||||
crypto_xor(dst, src, 16);
|
||||
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
|
||||
src += 16;
|
||||
srclen -= 16;
|
||||
}
|
||||
|
||||
if (srclen) {
|
||||
ctx->bytes = 16 - srclen;
|
||||
while (srclen--)
|
||||
*dst++ ^= *src++;
|
||||
}
|
||||
}
|
||||
|
||||
static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
|
||||
struct scatterlist *sg, int len)
|
||||
{
|
||||
struct scatter_walk walk;
|
||||
u8 *src;
|
||||
int n;
|
||||
|
||||
if (!len)
|
||||
return;
|
||||
|
||||
scatterwalk_start(&walk, sg);
|
||||
|
||||
while (len) {
|
||||
n = scatterwalk_clamp(&walk, len);
|
||||
|
||||
if (!n) {
|
||||
scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
|
||||
n = scatterwalk_clamp(&walk, len);
|
||||
}
|
||||
|
||||
src = scatterwalk_map(&walk, 0);
|
||||
|
||||
crypto_gcm_ghash_update(ctx, src, n);
|
||||
len -= n;
|
||||
|
||||
scatterwalk_unmap(src, 0);
|
||||
scatterwalk_advance(&walk, n);
|
||||
scatterwalk_done(&walk, 0, len);
|
||||
if (len)
|
||||
crypto_yield(ctx->flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx)
|
||||
{
|
||||
u8 *dst = ctx->buffer;
|
||||
|
||||
if (ctx->bytes) {
|
||||
u8 *tmp = dst + (16 - ctx->bytes);
|
||||
|
||||
while (ctx->bytes--)
|
||||
*tmp++ ^= 0;
|
||||
|
||||
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
|
||||
}
|
||||
|
||||
ctx->bytes = 0;
|
||||
}
|
||||
|
||||
static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
|
||||
unsigned int authlen,
|
||||
unsigned int cryptlen, u8 *dst)
|
||||
{
|
||||
u8 *buf = ctx->buffer;
|
||||
u128 lengths;
|
||||
|
||||
lengths.a = cpu_to_be64(authlen * 8);
|
||||
lengths.b = cpu_to_be64(cryptlen * 8);
|
||||
|
||||
crypto_gcm_ghash_flush(ctx);
|
||||
crypto_xor(buf, (u8 *)&lengths, 16);
|
||||
gf128mul_4k_lle((be128 *)buf, ctx->gf128);
|
||||
crypto_xor(dst, buf, 16);
|
||||
}
|
||||
|
||||
static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct crypto_gcm_setkey_result *result = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
result->err = err;
|
||||
complete(&result->completion);
|
||||
}
|
||||
|
||||
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_ablkcipher *ctr = ctx->ctr;
|
||||
struct {
|
||||
be128 hash;
|
||||
u8 iv[8];
|
||||
|
||||
struct crypto_gcm_setkey_result result;
|
||||
|
||||
struct scatterlist sg[1];
|
||||
struct ablkcipher_request req;
|
||||
} *data;
|
||||
int err;
|
||||
|
||||
crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
err = crypto_ablkcipher_setkey(ctr, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr),
|
||||
GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
init_completion(&data->result.completion);
|
||||
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
|
||||
ablkcipher_request_set_tfm(&data->req, ctr);
|
||||
ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_gcm_setkey_done,
|
||||
&data->result);
|
||||
ablkcipher_request_set_crypt(&data->req, data->sg, data->sg,
|
||||
sizeof(data->hash), data->iv);
|
||||
|
||||
err = crypto_ablkcipher_encrypt(&data->req);
|
||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
||||
err = wait_for_completion_interruptible(
|
||||
&data->result.completion);
|
||||
if (!err)
|
||||
err = data->result.err;
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (ctx->gf128 != NULL)
|
||||
gf128mul_free_4k(ctx->gf128);
|
||||
|
||||
ctx->gf128 = gf128mul_init_4k_lle(&data->hash);
|
||||
|
||||
if (ctx->gf128 == NULL)
|
||||
err = -ENOMEM;
|
||||
|
||||
out:
|
||||
kfree(data);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_gcm_setauthsize(struct crypto_aead *tfm,
|
||||
unsigned int authsize)
|
||||
{
|
||||
switch (authsize) {
|
||||
case 4:
|
||||
case 8:
|
||||
case 12:
|
||||
case 13:
|
||||
case 14:
|
||||
case 15:
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
|
||||
struct aead_request *req,
|
||||
unsigned int cryptlen)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||
u32 flags = req->base.tfm->crt_flags;
|
||||
struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
|
||||
struct scatterlist *dst;
|
||||
__be32 counter = cpu_to_be32(1);
|
||||
|
||||
memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
|
||||
memcpy(req->iv + 12, &counter, 4);
|
||||
|
||||
sg_init_table(pctx->src, 2);
|
||||
sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
|
||||
scatterwalk_sg_chain(pctx->src, 2, req->src);
|
||||
|
||||
dst = pctx->src;
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(pctx->dst, 2);
|
||||
sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
|
||||
scatterwalk_sg_chain(pctx->dst, 2, req->dst);
|
||||
dst = pctx->dst;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_tfm(ablk_req, ctx->ctr);
|
||||
ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
|
||||
cryptlen + sizeof(pctx->auth_tag),
|
||||
req->iv);
|
||||
|
||||
crypto_gcm_ghash_init(ghash, flags, ctx->gf128);
|
||||
|
||||
crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen);
|
||||
crypto_gcm_ghash_flush(ghash);
|
||||
}
|
||||
|
||||
static int crypto_gcm_hash(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||
u8 *auth_tag = pctx->auth_tag;
|
||||
struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
|
||||
|
||||
crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen);
|
||||
crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen,
|
||||
auth_tag);
|
||||
|
||||
scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
|
||||
crypto_aead_authsize(aead), 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
|
||||
if (!err)
|
||||
err = crypto_gcm_hash(req);
|
||||
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int crypto_gcm_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||
struct ablkcipher_request *abreq = &pctx->abreq;
|
||||
int err;
|
||||
|
||||
crypto_gcm_init_crypt(abreq, req, req->cryptlen);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
crypto_gcm_encrypt_done, req);
|
||||
|
||||
err = crypto_ablkcipher_encrypt(abreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_gcm_hash(req);
|
||||
}
|
||||
|
||||
static int crypto_gcm_verify(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||
struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
|
||||
u8 *auth_tag = pctx->auth_tag;
|
||||
u8 *iauth_tag = pctx->iauth_tag;
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
unsigned int cryptlen = req->cryptlen - authsize;
|
||||
|
||||
crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag);
|
||||
|
||||
authsize = crypto_aead_authsize(aead);
|
||||
scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
|
||||
return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
|
||||
}
|
||||
|
||||
static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err)
|
||||
{
|
||||
struct aead_request *req = areq->data;
|
||||
|
||||
if (!err)
|
||||
err = crypto_gcm_verify(req);
|
||||
|
||||
aead_request_complete(req, err);
|
||||
}
|
||||
|
||||
static int crypto_gcm_decrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||
struct ablkcipher_request *abreq = &pctx->abreq;
|
||||
struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
|
||||
unsigned int cryptlen = req->cryptlen;
|
||||
unsigned int authsize = crypto_aead_authsize(aead);
|
||||
int err;
|
||||
|
||||
if (cryptlen < authsize)
|
||||
return -EINVAL;
|
||||
cryptlen -= authsize;
|
||||
|
||||
crypto_gcm_init_crypt(abreq, req, cryptlen);
|
||||
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
|
||||
crypto_gcm_decrypt_done, req);
|
||||
|
||||
crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen);
|
||||
|
||||
err = crypto_ablkcipher_decrypt(abreq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_gcm_verify(req);
|
||||
}
|
||||
|
||||
static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
|
||||
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_ablkcipher *ctr;
|
||||
unsigned long align;
|
||||
int err;
|
||||
|
||||
ctr = crypto_spawn_skcipher(&ictx->ctr);
|
||||
err = PTR_ERR(ctr);
|
||||
if (IS_ERR(ctr))
|
||||
return err;
|
||||
|
||||
ctx->ctr = ctr;
|
||||
ctx->gf128 = NULL;
|
||||
|
||||
align = crypto_tfm_alg_alignmask(tfm);
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
tfm->crt_aead.reqsize = align +
|
||||
sizeof(struct crypto_gcm_req_priv_ctx) +
|
||||
crypto_ablkcipher_reqsize(ctr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (ctx->gf128 != NULL)
|
||||
gf128mul_free_4k(ctx->gf128);
|
||||
|
||||
crypto_free_ablkcipher(ctx->ctr);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
|
||||
const char *full_name,
|
||||
const char *ctr_name)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_alg *ctr;
|
||||
struct gcm_instance_ctx *ctx;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ctx = crypto_instance_ctx(inst);
|
||||
crypto_set_skcipher_spawn(&ctx->ctr, inst);
|
||||
err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
|
||||
crypto_requires_sync(algt->type,
|
||||
algt->mask));
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
|
||||
|
||||
/* We only support 16-byte blocks. */
|
||||
if (ctr->cra_ablkcipher.ivsize != 16)
|
||||
goto out_put_ctr;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
err = -EINVAL;
|
||||
if (ctr->cra_blocksize != 1)
|
||||
goto out_put_ctr;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"gcm_base(%s)", ctr->cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_put_ctr;
|
||||
|
||||
memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = ctr->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1);
|
||||
inst->alg.cra_type = &crypto_aead_type;
|
||||
inst->alg.cra_aead.ivsize = 16;
|
||||
inst->alg.cra_aead.maxauthsize = 16;
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
|
||||
inst->alg.cra_init = crypto_gcm_init_tfm;
|
||||
inst->alg.cra_exit = crypto_gcm_exit_tfm;
|
||||
inst->alg.cra_aead.setkey = crypto_gcm_setkey;
|
||||
inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize;
|
||||
inst->alg.cra_aead.encrypt = crypto_gcm_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
out_put_ctr:
|
||||
crypto_drop_skcipher(&ctx->ctr);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
|
||||
{
|
||||
int err;
|
||||
const char *cipher_name;
|
||||
char ctr_name[CRYPTO_MAX_ALG_NAME];
|
||||
char full_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
cipher_name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(cipher_name);
|
||||
if (IS_ERR(cipher_name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
return crypto_gcm_alloc_common(tb, full_name, ctr_name);
|
||||
}
|
||||
|
||||
static void crypto_gcm_free(struct crypto_instance *inst)
|
||||
{
|
||||
struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
|
||||
|
||||
crypto_drop_skcipher(&ctx->ctr);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_gcm_tmpl = {
|
||||
.name = "gcm",
|
||||
.alloc = crypto_gcm_alloc,
|
||||
.free = crypto_gcm_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
|
||||
{
|
||||
int err;
|
||||
const char *ctr_name;
|
||||
char full_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
ctr_name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(ctr_name);
|
||||
if (IS_ERR(ctr_name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)",
|
||||
ctr_name) >= CRYPTO_MAX_ALG_NAME)
|
||||
return ERR_PTR(-ENAMETOOLONG);
|
||||
|
||||
return crypto_gcm_alloc_common(tb, full_name, ctr_name);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_gcm_base_tmpl = {
|
||||
.name = "gcm_base",
|
||||
.alloc = crypto_gcm_base_alloc,
|
||||
.free = crypto_gcm_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
|
||||
struct crypto_aead *child = ctx->child;
|
||||
int err;
|
||||
|
||||
if (keylen < 4)
|
||||
return -EINVAL;
|
||||
|
||||
keylen -= 4;
|
||||
memcpy(ctx->nonce, key + keylen, 4);
|
||||
|
||||
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_aead_setkey(child, key, keylen);
|
||||
crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
|
||||
|
||||
switch (authsize) {
|
||||
case 8:
|
||||
case 12:
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return crypto_aead_setauthsize(ctx->child, authsize);
|
||||
}
|
||||
|
||||
static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
|
||||
{
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct crypto_aead *child = ctx->child;
|
||||
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
|
||||
crypto_aead_alignmask(child) + 1);
|
||||
|
||||
memcpy(iv, ctx->nonce, 4);
|
||||
memcpy(iv + 4, req->iv, 8);
|
||||
|
||||
aead_request_set_tfm(subreq, child);
|
||||
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
|
||||
req->base.data);
|
||||
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
|
||||
aead_request_set_assoc(subreq, req->assoc, req->assoclen);
|
||||
|
||||
return subreq;
|
||||
}
|
||||
|
||||
static int crypto_rfc4106_encrypt(struct aead_request *req)
|
||||
{
|
||||
req = crypto_rfc4106_crypt(req);
|
||||
|
||||
return crypto_aead_encrypt(req);
|
||||
}
|
||||
|
||||
static int crypto_rfc4106_decrypt(struct aead_request *req)
|
||||
{
|
||||
req = crypto_rfc4106_crypt(req);
|
||||
|
||||
return crypto_aead_decrypt(req);
|
||||
}
|
||||
|
||||
static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
|
||||
struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_aead *aead;
|
||||
unsigned long align;
|
||||
|
||||
aead = crypto_spawn_aead(spawn);
|
||||
if (IS_ERR(aead))
|
||||
return PTR_ERR(aead);
|
||||
|
||||
ctx->child = aead;
|
||||
|
||||
align = crypto_aead_alignmask(aead);
|
||||
align &= ~(crypto_tfm_ctx_alignment() - 1);
|
||||
tfm->crt_aead.reqsize = sizeof(struct aead_request) +
|
||||
ALIGN(crypto_aead_reqsize(aead),
|
||||
crypto_tfm_ctx_alignment()) +
|
||||
align + 16;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_aead(ctx->child);
|
||||
}
|
||||
|
||||
static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
struct crypto_aead_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
const char *ccm_name;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ccm_name = crypto_attr_alg_name(tb[1]);
|
||||
err = PTR_ERR(ccm_name);
|
||||
if (IS_ERR(ccm_name))
|
||||
return ERR_PTR(err);
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
spawn = crypto_instance_ctx(inst);
|
||||
crypto_set_aead_spawn(spawn, inst);
|
||||
err = crypto_grab_aead(spawn, ccm_name, 0,
|
||||
crypto_requires_sync(algt->type, algt->mask));
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
|
||||
alg = crypto_aead_spawn_alg(spawn);
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
/* We only support 16-byte blocks. */
|
||||
if (alg->cra_aead.ivsize != 16)
|
||||
goto out_drop_alg;
|
||||
|
||||
/* Not a stream cipher? */
|
||||
if (alg->cra_blocksize != 1)
|
||||
goto out_drop_alg;
|
||||
|
||||
err = -ENAMETOOLONG;
|
||||
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4106(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
|
||||
snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
|
||||
"rfc4106(%s)", alg->cra_driver_name) >=
|
||||
CRYPTO_MAX_ALG_NAME)
|
||||
goto out_drop_alg;
|
||||
|
||||
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
|
||||
inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
|
||||
inst->alg.cra_priority = alg->cra_priority;
|
||||
inst->alg.cra_blocksize = 1;
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_nivaead_type;
|
||||
|
||||
inst->alg.cra_aead.ivsize = 8;
|
||||
inst->alg.cra_aead.maxauthsize = 16;
|
||||
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
|
||||
|
||||
inst->alg.cra_init = crypto_rfc4106_init_tfm;
|
||||
inst->alg.cra_exit = crypto_rfc4106_exit_tfm;
|
||||
|
||||
inst->alg.cra_aead.setkey = crypto_rfc4106_setkey;
|
||||
inst->alg.cra_aead.setauthsize = crypto_rfc4106_setauthsize;
|
||||
inst->alg.cra_aead.encrypt = crypto_rfc4106_encrypt;
|
||||
inst->alg.cra_aead.decrypt = crypto_rfc4106_decrypt;
|
||||
|
||||
inst->alg.cra_aead.geniv = "seqiv";
|
||||
|
||||
out:
|
||||
return inst;
|
||||
|
||||
out_drop_alg:
|
||||
crypto_drop_aead(spawn);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
inst = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void crypto_rfc4106_free(struct crypto_instance *inst)
|
||||
{
|
||||
crypto_drop_spawn(crypto_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_rfc4106_tmpl = {
|
||||
.name = "rfc4106",
|
||||
.alloc = crypto_rfc4106_alloc,
|
||||
.free = crypto_rfc4106_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_gcm_module_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = crypto_register_template(&crypto_gcm_base_tmpl);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = crypto_register_template(&crypto_gcm_tmpl);
|
||||
if (err)
|
||||
goto out_undo_base;
|
||||
|
||||
err = crypto_register_template(&crypto_rfc4106_tmpl);
|
||||
if (err)
|
||||
goto out_undo_gcm;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
out_undo_gcm:
|
||||
crypto_unregister_template(&crypto_gcm_tmpl);
|
||||
out_undo_base:
|
||||
crypto_unregister_template(&crypto_gcm_base_tmpl);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void __exit crypto_gcm_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_rfc4106_tmpl);
|
||||
crypto_unregister_template(&crypto_gcm_tmpl);
|
||||
crypto_unregister_template(&crypto_gcm_base_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_gcm_module_init);
|
||||
module_exit(crypto_gcm_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Galois/Counter Mode");
|
||||
MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
|
||||
MODULE_ALIAS("gcm_base");
|
||||
MODULE_ALIAS("rfc4106");
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -160,7 +161,7 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
|
||||
|
||||
sg_init_table(sg1, 2);
|
||||
sg_set_buf(sg1, ipad, bs);
|
||||
sg_set_page(&sg1[1], (void *) sg, 0, 0);
|
||||
scatterwalk_sg_chain(sg1, 2, sg);
|
||||
|
||||
sg_init_table(sg2, 1);
|
||||
sg_set_buf(sg2, opad, bs + ds);
|
||||
|
@ -25,7 +25,6 @@
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/kmap_types.h>
|
||||
|
||||
/* Crypto notification events. */
|
||||
enum {
|
||||
@ -50,34 +49,6 @@ extern struct list_head crypto_alg_list;
|
||||
extern struct rw_semaphore crypto_alg_sem;
|
||||
extern struct blocking_notifier_head crypto_chain;
|
||||
|
||||
static inline enum km_type crypto_kmap_type(int out)
|
||||
{
|
||||
enum km_type type;
|
||||
|
||||
if (in_softirq())
|
||||
type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
|
||||
else
|
||||
type = out * (KM_USER1 - KM_USER0) + KM_USER0;
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
static inline void *crypto_kmap(struct page *page, int out)
|
||||
{
|
||||
return kmap_atomic(page, crypto_kmap_type(out));
|
||||
}
|
||||
|
||||
static inline void crypto_kunmap(void *vaddr, int out)
|
||||
{
|
||||
kunmap_atomic(vaddr, crypto_kmap_type(out));
|
||||
}
|
||||
|
||||
static inline void crypto_yield(u32 flags)
|
||||
{
|
||||
if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
void __init crypto_init_proc(void);
|
||||
void __exit crypto_exit_proc(void);
|
||||
@ -122,6 +93,8 @@ void crypto_exit_digest_ops(struct crypto_tfm *tfm);
|
||||
void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
|
||||
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
|
||||
|
||||
void crypto_larval_kill(struct crypto_alg *alg);
|
||||
struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
|
||||
void crypto_larval_error(const char *name, u32 type, u32 mask);
|
||||
|
||||
void crypto_shoot_alg(struct crypto_alg *alg);
|
||||
|
106
crypto/lzo.c
Normal file
106
crypto/lzo.c
Normal file
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 51
|
||||
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/lzo.h>
|
||||
|
||||
struct lzo_ctx {
|
||||
void *lzo_comp_mem;
|
||||
};
|
||||
|
||||
static int lzo_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
|
||||
if (!ctx->lzo_comp_mem)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lzo_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
vfree(ctx->lzo_comp_mem);
|
||||
}
|
||||
|
||||
static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
|
||||
unsigned int slen, u8 *dst, unsigned int *dlen)
|
||||
{
|
||||
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
|
||||
int err;
|
||||
|
||||
err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem);
|
||||
|
||||
if (err != LZO_E_OK)
|
||||
return -EINVAL;
|
||||
|
||||
*dlen = tmp_len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
|
||||
unsigned int slen, u8 *dst, unsigned int *dlen)
|
||||
{
|
||||
int err;
|
||||
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
|
||||
|
||||
err = lzo1x_decompress_safe(src, slen, dst, &tmp_len);
|
||||
|
||||
if (err != LZO_E_OK)
|
||||
return -EINVAL;
|
||||
|
||||
*dlen = tmp_len;
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "lzo",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
|
||||
.cra_ctxsize = sizeof(struct lzo_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_init = lzo_init,
|
||||
.cra_exit = lzo_exit,
|
||||
.cra_u = { .compress = {
|
||||
.coa_compress = lzo_compress,
|
||||
.coa_decompress = lzo_decompress } }
|
||||
};
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("LZO Compression Algorithm");
|
105
crypto/pcbc.c
105
crypto/pcbc.c
@ -24,7 +24,6 @@
|
||||
|
||||
struct crypto_pcbc_ctx {
|
||||
struct crypto_cipher *child;
|
||||
void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
|
||||
};
|
||||
|
||||
static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
|
||||
@ -45,9 +44,7 @@ static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
|
||||
|
||||
static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm,
|
||||
void (*xor)(u8 *, const u8 *,
|
||||
unsigned int))
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
@ -58,10 +55,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
|
||||
u8 *iv = walk->iv;
|
||||
|
||||
do {
|
||||
xor(iv, src, bsize);
|
||||
crypto_xor(iv, src, bsize);
|
||||
fn(crypto_cipher_tfm(tfm), dst, iv);
|
||||
memcpy(iv, dst, bsize);
|
||||
xor(iv, src, bsize);
|
||||
crypto_xor(iv, src, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
@ -72,9 +69,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
|
||||
|
||||
static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm,
|
||||
void (*xor)(u8 *, const u8 *,
|
||||
unsigned int))
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_encrypt;
|
||||
@ -86,10 +81,10 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
|
||||
|
||||
do {
|
||||
memcpy(tmpbuf, src, bsize);
|
||||
xor(iv, tmpbuf, bsize);
|
||||
crypto_xor(iv, src, bsize);
|
||||
fn(crypto_cipher_tfm(tfm), src, iv);
|
||||
memcpy(iv, src, bsize);
|
||||
xor(iv, tmpbuf, bsize);
|
||||
memcpy(iv, tmpbuf, bsize);
|
||||
crypto_xor(iv, src, bsize);
|
||||
|
||||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
@ -107,7 +102,6 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
@ -115,11 +109,11 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
if (walk.src.virt.addr == walk.dst.virt.addr)
|
||||
nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child,
|
||||
xor);
|
||||
nbytes = crypto_pcbc_encrypt_inplace(desc, &walk,
|
||||
child);
|
||||
else
|
||||
nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child,
|
||||
xor);
|
||||
nbytes = crypto_pcbc_encrypt_segment(desc, &walk,
|
||||
child);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
@ -128,9 +122,7 @@ static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
|
||||
|
||||
static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm,
|
||||
void (*xor)(u8 *, const u8 *,
|
||||
unsigned int))
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_decrypt;
|
||||
@ -142,9 +134,9 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
|
||||
|
||||
do {
|
||||
fn(crypto_cipher_tfm(tfm), dst, src);
|
||||
xor(dst, iv, bsize);
|
||||
crypto_xor(dst, iv, bsize);
|
||||
memcpy(iv, src, bsize);
|
||||
xor(iv, dst, bsize);
|
||||
crypto_xor(iv, dst, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
@ -157,9 +149,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
|
||||
|
||||
static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk,
|
||||
struct crypto_cipher *tfm,
|
||||
void (*xor)(u8 *, const u8 *,
|
||||
unsigned int))
|
||||
struct crypto_cipher *tfm)
|
||||
{
|
||||
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
|
||||
crypto_cipher_alg(tfm)->cia_decrypt;
|
||||
@ -172,9 +162,9 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
|
||||
do {
|
||||
memcpy(tmpbuf, src, bsize);
|
||||
fn(crypto_cipher_tfm(tfm), src, src);
|
||||
xor(src, iv, bsize);
|
||||
crypto_xor(src, iv, bsize);
|
||||
memcpy(iv, tmpbuf, bsize);
|
||||
xor(iv, src, bsize);
|
||||
crypto_xor(iv, src, bsize);
|
||||
|
||||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
@ -192,7 +182,6 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
@ -200,48 +189,17 @@ static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
if (walk.src.virt.addr == walk.dst.virt.addr)
|
||||
nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child,
|
||||
xor);
|
||||
nbytes = crypto_pcbc_decrypt_inplace(desc, &walk,
|
||||
child);
|
||||
else
|
||||
nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child,
|
||||
xor);
|
||||
nbytes = crypto_pcbc_decrypt_segment(desc, &walk,
|
||||
child);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
|
||||
{
|
||||
do {
|
||||
*a++ ^= *b++;
|
||||
} while (--bs);
|
||||
}
|
||||
|
||||
static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
|
||||
{
|
||||
u32 *a = (u32 *)dst;
|
||||
u32 *b = (u32 *)src;
|
||||
|
||||
do {
|
||||
*a++ ^= *b++;
|
||||
} while ((bs -= 4));
|
||||
}
|
||||
|
||||
static void xor_64(u8 *a, const u8 *b, unsigned int bs)
|
||||
{
|
||||
((u32 *)a)[0] ^= ((u32 *)b)[0];
|
||||
((u32 *)a)[1] ^= ((u32 *)b)[1];
|
||||
}
|
||||
|
||||
static void xor_128(u8 *a, const u8 *b, unsigned int bs)
|
||||
{
|
||||
((u32 *)a)[0] ^= ((u32 *)b)[0];
|
||||
((u32 *)a)[1] ^= ((u32 *)b)[1];
|
||||
((u32 *)a)[2] ^= ((u32 *)b)[2];
|
||||
((u32 *)a)[3] ^= ((u32 *)b)[3];
|
||||
}
|
||||
|
||||
static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_instance *inst = (void *)tfm->__crt_alg;
|
||||
@ -249,22 +207,6 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
|
||||
struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct crypto_cipher *cipher;
|
||||
|
||||
switch (crypto_tfm_alg_blocksize(tfm)) {
|
||||
case 8:
|
||||
ctx->xor = xor_64;
|
||||
break;
|
||||
|
||||
case 16:
|
||||
ctx->xor = xor_128;
|
||||
break;
|
||||
|
||||
default:
|
||||
if (crypto_tfm_alg_blocksize(tfm) % 4)
|
||||
ctx->xor = xor_byte;
|
||||
else
|
||||
ctx->xor = xor_quad;
|
||||
}
|
||||
|
||||
cipher = crypto_spawn_cipher(spawn);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
@ -304,8 +246,9 @@ static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_blkcipher_type;
|
||||
|
||||
if (!(alg->cra_blocksize % 4))
|
||||
inst->alg.cra_alignmask |= 3;
|
||||
/* We access the data as u32s when xoring. */
|
||||
inst->alg.cra_alignmask |= __alignof__(u32) - 1;
|
||||
|
||||
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
|
||||
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
|
||||
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
|
||||
|
255
crypto/salsa20_generic.c
Normal file
255
crypto/salsa20_generic.c
Normal file
@ -0,0 +1,255 @@
|
||||
/*
|
||||
* Salsa20: Salsa20 stream cipher algorithm
|
||||
*
|
||||
* Copyright (c) 2007 Tan Swee Heng <thesweeheng@gmail.com>
|
||||
*
|
||||
* Derived from:
|
||||
* - salsa20.c: Public domain C code by Daniel J. Bernstein <djb@cr.yp.to>
|
||||
*
|
||||
* Salsa20 is a stream cipher candidate in eSTREAM, the ECRYPT Stream
|
||||
* Cipher Project. It is designed by Daniel J. Bernstein <djb@cr.yp.to>.
|
||||
* More information about eSTREAM and Salsa20 can be found here:
|
||||
* http://www.ecrypt.eu.org/stream/
|
||||
* http://cr.yp.to/snuffle.html
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define SALSA20_IV_SIZE 8U
|
||||
#define SALSA20_MIN_KEY_SIZE 16U
|
||||
#define SALSA20_MAX_KEY_SIZE 32U
|
||||
|
||||
/*
|
||||
* Start of code taken from D. J. Bernstein's reference implementation.
|
||||
* With some modifications and optimizations made to suit our needs.
|
||||
*/
|
||||
|
||||
/*
|
||||
salsa20-ref.c version 20051118
|
||||
D. J. Bernstein
|
||||
Public domain.
|
||||
*/
|
||||
|
||||
#define ROTATE(v,n) (((v) << (n)) | ((v) >> (32 - (n))))
|
||||
#define XOR(v,w) ((v) ^ (w))
|
||||
#define PLUS(v,w) (((v) + (w)))
|
||||
#define PLUSONE(v) (PLUS((v),1))
|
||||
#define U32TO8_LITTLE(p, v) \
|
||||
{ (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \
|
||||
(p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; }
|
||||
#define U8TO32_LITTLE(p) \
|
||||
(((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
|
||||
((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
|
||||
|
||||
struct salsa20_ctx
|
||||
{
|
||||
u32 input[16];
|
||||
};
|
||||
|
||||
static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
|
||||
{
|
||||
u32 x[16];
|
||||
int i;
|
||||
|
||||
memcpy(x, input, sizeof(x));
|
||||
for (i = 20; i > 0; i -= 2) {
|
||||
x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 0],x[12]), 7));
|
||||
x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[ 4],x[ 0]), 9));
|
||||
x[12] = XOR(x[12],ROTATE(PLUS(x[ 8],x[ 4]),13));
|
||||
x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[12],x[ 8]),18));
|
||||
x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 5],x[ 1]), 7));
|
||||
x[13] = XOR(x[13],ROTATE(PLUS(x[ 9],x[ 5]), 9));
|
||||
x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[13],x[ 9]),13));
|
||||
x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 1],x[13]),18));
|
||||
x[14] = XOR(x[14],ROTATE(PLUS(x[10],x[ 6]), 7));
|
||||
x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[14],x[10]), 9));
|
||||
x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 2],x[14]),13));
|
||||
x[10] = XOR(x[10],ROTATE(PLUS(x[ 6],x[ 2]),18));
|
||||
x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[15],x[11]), 7));
|
||||
x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 3],x[15]), 9));
|
||||
x[11] = XOR(x[11],ROTATE(PLUS(x[ 7],x[ 3]),13));
|
||||
x[15] = XOR(x[15],ROTATE(PLUS(x[11],x[ 7]),18));
|
||||
x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[ 0],x[ 3]), 7));
|
||||
x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[ 1],x[ 0]), 9));
|
||||
x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[ 2],x[ 1]),13));
|
||||
x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[ 3],x[ 2]),18));
|
||||
x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 5],x[ 4]), 7));
|
||||
x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 6],x[ 5]), 9));
|
||||
x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 7],x[ 6]),13));
|
||||
x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 4],x[ 7]),18));
|
||||
x[11] = XOR(x[11],ROTATE(PLUS(x[10],x[ 9]), 7));
|
||||
x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[11],x[10]), 9));
|
||||
x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 8],x[11]),13));
|
||||
x[10] = XOR(x[10],ROTATE(PLUS(x[ 9],x[ 8]),18));
|
||||
x[12] = XOR(x[12],ROTATE(PLUS(x[15],x[14]), 7));
|
||||
x[13] = XOR(x[13],ROTATE(PLUS(x[12],x[15]), 9));
|
||||
x[14] = XOR(x[14],ROTATE(PLUS(x[13],x[12]),13));
|
||||
x[15] = XOR(x[15],ROTATE(PLUS(x[14],x[13]),18));
|
||||
}
|
||||
for (i = 0; i < 16; ++i)
|
||||
x[i] = PLUS(x[i],input[i]);
|
||||
for (i = 0; i < 16; ++i)
|
||||
U32TO8_LITTLE(output + 4 * i,x[i]);
|
||||
}
|
||||
|
||||
static const char sigma[16] = "expand 32-byte k";
|
||||
static const char tau[16] = "expand 16-byte k";
|
||||
|
||||
static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
|
||||
{
|
||||
const char *constants;
|
||||
|
||||
ctx->input[1] = U8TO32_LITTLE(k + 0);
|
||||
ctx->input[2] = U8TO32_LITTLE(k + 4);
|
||||
ctx->input[3] = U8TO32_LITTLE(k + 8);
|
||||
ctx->input[4] = U8TO32_LITTLE(k + 12);
|
||||
if (kbytes == 32) { /* recommended */
|
||||
k += 16;
|
||||
constants = sigma;
|
||||
} else { /* kbytes == 16 */
|
||||
constants = tau;
|
||||
}
|
||||
ctx->input[11] = U8TO32_LITTLE(k + 0);
|
||||
ctx->input[12] = U8TO32_LITTLE(k + 4);
|
||||
ctx->input[13] = U8TO32_LITTLE(k + 8);
|
||||
ctx->input[14] = U8TO32_LITTLE(k + 12);
|
||||
ctx->input[0] = U8TO32_LITTLE(constants + 0);
|
||||
ctx->input[5] = U8TO32_LITTLE(constants + 4);
|
||||
ctx->input[10] = U8TO32_LITTLE(constants + 8);
|
||||
ctx->input[15] = U8TO32_LITTLE(constants + 12);
|
||||
}
|
||||
|
||||
static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
|
||||
{
|
||||
ctx->input[6] = U8TO32_LITTLE(iv + 0);
|
||||
ctx->input[7] = U8TO32_LITTLE(iv + 4);
|
||||
ctx->input[8] = 0;
|
||||
ctx->input[9] = 0;
|
||||
}
|
||||
|
||||
static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
|
||||
const u8 *src, unsigned int bytes)
|
||||
{
|
||||
u8 buf[64];
|
||||
|
||||
if (dst != src)
|
||||
memcpy(dst, src, bytes);
|
||||
|
||||
while (bytes) {
|
||||
salsa20_wordtobyte(buf, ctx->input);
|
||||
|
||||
ctx->input[8] = PLUSONE(ctx->input[8]);
|
||||
if (!ctx->input[8])
|
||||
ctx->input[9] = PLUSONE(ctx->input[9]);
|
||||
|
||||
if (bytes <= 64) {
|
||||
crypto_xor(dst, buf, bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
crypto_xor(dst, buf, 64);
|
||||
bytes -= 64;
|
||||
dst += 64;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* End of code taken from D. J. Bernstein's reference implementation.
|
||||
*/
|
||||
|
||||
static int setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keysize)
|
||||
{
|
||||
struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
salsa20_keysetup(ctx, key, keysize);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, 64);
|
||||
|
||||
salsa20_ivsetup(ctx, walk.iv);
|
||||
|
||||
if (likely(walk.nbytes == nbytes))
|
||||
{
|
||||
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, nbytes);
|
||||
return blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
while (walk.nbytes >= 64) {
|
||||
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
|
||||
walk.src.virt.addr,
|
||||
walk.nbytes - (walk.nbytes % 64));
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
|
||||
walk.src.virt.addr, walk.nbytes);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "salsa20",
|
||||
.cra_driver_name = "salsa20-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct salsa20_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.setkey = setkey,
|
||||
.encrypt = encrypt,
|
||||
.decrypt = encrypt,
|
||||
.min_keysize = SALSA20_MIN_KEY_SIZE,
|
||||
.max_keysize = SALSA20_MAX_KEY_SIZE,
|
||||
.ivsize = SALSA20_IV_SIZE,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
|
||||
MODULE_ALIAS("salsa20");
|
@ -13,6 +13,8 @@
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
@ -20,9 +22,6 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include "internal.h"
|
||||
#include "scatterwalk.h"
|
||||
|
||||
static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
|
||||
{
|
||||
void *src = out ? buf : sgdata;
|
||||
@ -106,6 +105,9 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
|
||||
struct scatter_walk walk;
|
||||
unsigned int offset = 0;
|
||||
|
||||
if (!nbytes)
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
scatterwalk_start(&walk, sg);
|
||||
|
||||
@ -113,7 +115,7 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
|
||||
break;
|
||||
|
||||
offset += sg->length;
|
||||
sg = sg_next(sg);
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
|
||||
scatterwalk_advance(&walk, start - offset);
|
||||
|
345
crypto/seqiv.c
Normal file
345
crypto/seqiv.c
Normal file
@ -0,0 +1,345 @@
|
||||
/*
|
||||
* seqiv: Sequence Number IV Generator
|
||||
*
|
||||
* This generator generates an IV based on a sequence number by xoring it
|
||||
* with a salt. This algorithm is mainly useful for CTR and similar modes.
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
struct seqiv_ctx {
|
||||
spinlock_t lock;
|
||||
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
|
||||
};
|
||||
|
||||
static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
|
||||
{
|
||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
||||
struct crypto_ablkcipher *geniv;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
geniv = skcipher_givcrypt_reqtfm(req);
|
||||
memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
|
||||
|
||||
out:
|
||||
kfree(subreq->info);
|
||||
}
|
||||
|
||||
static void seqiv_complete(struct crypto_async_request *base, int err)
|
||||
{
|
||||
struct skcipher_givcrypt_request *req = base->data;
|
||||
|
||||
seqiv_complete2(req, err);
|
||||
skcipher_givcrypt_complete(req, err);
|
||||
}
|
||||
|
||||
static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
|
||||
{
|
||||
struct aead_request *subreq = aead_givcrypt_reqctx(req);
|
||||
struct crypto_aead *geniv;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
geniv = aead_givcrypt_reqtfm(req);
|
||||
memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
|
||||
|
||||
out:
|
||||
kfree(subreq->iv);
|
||||
}
|
||||
|
||||
static void seqiv_aead_complete(struct crypto_async_request *base, int err)
|
||||
{
|
||||
struct aead_givcrypt_request *req = base->data;
|
||||
|
||||
seqiv_aead_complete2(req, err);
|
||||
aead_givcrypt_complete(req, err);
|
||||
}
|
||||
|
||||
static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
|
||||
unsigned int ivsize)
|
||||
{
|
||||
unsigned int len = ivsize;
|
||||
|
||||
if (ivsize > sizeof(u64)) {
|
||||
memset(info, 0, ivsize - sizeof(u64));
|
||||
len = sizeof(u64);
|
||||
}
|
||||
seq = cpu_to_be64(seq);
|
||||
memcpy(info + ivsize - len, &seq, len);
|
||||
crypto_xor(info, ctx->salt, ivsize);
|
||||
}
|
||||
|
||||
static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
||||
crypto_completion_t complete;
|
||||
void *data;
|
||||
u8 *info;
|
||||
unsigned int ivsize;
|
||||
int err;
|
||||
|
||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
||||
|
||||
complete = req->creq.base.complete;
|
||||
data = req->creq.base.data;
|
||||
info = req->creq.info;
|
||||
|
||||
ivsize = crypto_ablkcipher_ivsize(geniv);
|
||||
|
||||
if (unlikely(!IS_ALIGNED((unsigned long)info,
|
||||
crypto_ablkcipher_alignmask(geniv) + 1))) {
|
||||
info = kmalloc(ivsize, req->creq.base.flags &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
|
||||
GFP_ATOMIC);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
complete = seqiv_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
|
||||
data);
|
||||
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
|
||||
req->creq.nbytes, info);
|
||||
|
||||
seqiv_geniv(ctx, info, req->seq, ivsize);
|
||||
memcpy(req->giv, info, ivsize);
|
||||
|
||||
err = crypto_ablkcipher_encrypt(subreq);
|
||||
if (unlikely(info != req->creq.info))
|
||||
seqiv_complete2(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
|
||||
struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_request *areq = &req->areq;
|
||||
struct aead_request *subreq = aead_givcrypt_reqctx(req);
|
||||
crypto_completion_t complete;
|
||||
void *data;
|
||||
u8 *info;
|
||||
unsigned int ivsize;
|
||||
int err;
|
||||
|
||||
aead_request_set_tfm(subreq, aead_geniv_base(geniv));
|
||||
|
||||
complete = areq->base.complete;
|
||||
data = areq->base.data;
|
||||
info = areq->iv;
|
||||
|
||||
ivsize = crypto_aead_ivsize(geniv);
|
||||
|
||||
if (unlikely(!IS_ALIGNED((unsigned long)info,
|
||||
crypto_aead_alignmask(geniv) + 1))) {
|
||||
info = kmalloc(ivsize, areq->base.flags &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
|
||||
GFP_ATOMIC);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
complete = seqiv_aead_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
aead_request_set_callback(subreq, areq->base.flags, complete, data);
|
||||
aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
|
||||
info);
|
||||
aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
|
||||
|
||||
seqiv_geniv(ctx, info, req->seq, ivsize);
|
||||
memcpy(req->giv, info, ivsize);
|
||||
|
||||
err = crypto_aead_encrypt(subreq);
|
||||
if (unlikely(info != areq->iv))
|
||||
seqiv_aead_complete2(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first)
|
||||
goto unlock;
|
||||
|
||||
crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
|
||||
get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv));
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
return seqiv_givencrypt(req);
|
||||
}
|
||||
|
||||
static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
|
||||
struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
|
||||
goto unlock;
|
||||
|
||||
crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
|
||||
get_random_bytes(ctx->salt, crypto_aead_ivsize(geniv));
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
return seqiv_aead_givencrypt(req);
|
||||
}
|
||||
|
||||
static int seqiv_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
|
||||
struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
|
||||
|
||||
return skcipher_geniv_init(tfm);
|
||||
}
|
||||
|
||||
static int seqiv_aead_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_aead *geniv = __crypto_aead_cast(tfm);
|
||||
struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
tfm->crt_aead.reqsize = sizeof(struct aead_request);
|
||||
|
||||
return aead_geniv_init(tfm);
|
||||
}
|
||||
|
||||
static struct crypto_template seqiv_tmpl;
|
||||
|
||||
static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
|
||||
inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
|
||||
|
||||
if (IS_ERR(inst))
|
||||
goto out;
|
||||
|
||||
inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
|
||||
|
||||
inst->alg.cra_init = seqiv_init;
|
||||
inst->alg.cra_exit = skcipher_geniv_exit;
|
||||
|
||||
inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
}
|
||||
|
||||
static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_instance *inst;
|
||||
|
||||
inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
|
||||
|
||||
if (IS_ERR(inst))
|
||||
goto out;
|
||||
|
||||
inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
|
||||
|
||||
inst->alg.cra_init = seqiv_aead_init;
|
||||
inst->alg.cra_exit = aead_geniv_exit;
|
||||
|
||||
inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
|
||||
|
||||
out:
|
||||
return inst;
|
||||
}
|
||||
|
||||
static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_instance *inst;
|
||||
int err;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
return ERR_PTR(err);
|
||||
|
||||
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
|
||||
inst = seqiv_ablkcipher_alloc(tb);
|
||||
else
|
||||
inst = seqiv_aead_alloc(tb);
|
||||
|
||||
if (IS_ERR(inst))
|
||||
goto out;
|
||||
|
||||
inst->alg.cra_alignmask |= __alignof__(u32) - 1;
|
||||
inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
|
||||
|
||||
out:
|
||||
return inst;
|
||||
}
|
||||
|
||||
static void seqiv_free(struct crypto_instance *inst)
|
||||
{
|
||||
if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
|
||||
skcipher_geniv_free(inst);
|
||||
else
|
||||
aead_geniv_free(inst);
|
||||
}
|
||||
|
||||
static struct crypto_template seqiv_tmpl = {
|
||||
.name = "seqiv",
|
||||
.alloc = seqiv_alloc,
|
||||
.free = seqiv_free,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init seqiv_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&seqiv_tmpl);
|
||||
}
|
||||
|
||||
static void __exit seqiv_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&seqiv_tmpl);
|
||||
}
|
||||
|
||||
module_init(seqiv_module_init);
|
||||
module_exit(seqiv_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Sequence Number IV Generator");
|
@ -9,6 +9,7 @@
|
||||
* Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
|
||||
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
|
||||
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
||||
* SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
@ -218,6 +219,22 @@ static void sha256_transform(u32 *state, const u8 *input)
|
||||
memset(W, 0, 64 * sizeof(u32));
|
||||
}
|
||||
|
||||
|
||||
static void sha224_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
sctx->state[0] = SHA224_H0;
|
||||
sctx->state[1] = SHA224_H1;
|
||||
sctx->state[2] = SHA224_H2;
|
||||
sctx->state[3] = SHA224_H3;
|
||||
sctx->state[4] = SHA224_H4;
|
||||
sctx->state[5] = SHA224_H5;
|
||||
sctx->state[6] = SHA224_H6;
|
||||
sctx->state[7] = SHA224_H7;
|
||||
sctx->count[0] = 0;
|
||||
sctx->count[1] = 0;
|
||||
}
|
||||
|
||||
static void sha256_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct sha256_ctx *sctx = crypto_tfm_ctx(tfm);
|
||||
@ -294,8 +311,17 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
|
||||
memset(sctx, 0, sizeof(*sctx));
|
||||
}
|
||||
|
||||
static void sha224_final(struct crypto_tfm *tfm, u8 *hash)
|
||||
{
|
||||
u8 D[SHA256_DIGEST_SIZE];
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
sha256_final(tfm, D);
|
||||
|
||||
memcpy(hash, D, SHA224_DIGEST_SIZE);
|
||||
memset(D, 0, SHA256_DIGEST_SIZE);
|
||||
}
|
||||
|
||||
static struct crypto_alg sha256 = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name= "sha256-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
@ -303,28 +329,58 @@ static struct crypto_alg alg = {
|
||||
.cra_ctxsize = sizeof(struct sha256_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_list = LIST_HEAD_INIT(alg.cra_list),
|
||||
.cra_list = LIST_HEAD_INIT(sha256.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = SHA256_DIGEST_SIZE,
|
||||
.dia_init = sha256_init,
|
||||
.dia_update = sha256_update,
|
||||
.dia_final = sha256_final } }
|
||||
.dia_init = sha256_init,
|
||||
.dia_update = sha256_update,
|
||||
.dia_final = sha256_final } }
|
||||
};
|
||||
|
||||
static struct crypto_alg sha224 = {
|
||||
.cra_name = "sha224",
|
||||
.cra_driver_name = "sha224-generic",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct sha256_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_alignmask = 3,
|
||||
.cra_list = LIST_HEAD_INIT(sha224.cra_list),
|
||||
.cra_u = { .digest = {
|
||||
.dia_digestsize = SHA224_DIGEST_SIZE,
|
||||
.dia_init = sha224_init,
|
||||
.dia_update = sha256_update,
|
||||
.dia_final = sha224_final } }
|
||||
};
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
int ret = 0;
|
||||
|
||||
ret = crypto_register_alg(&sha224);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = crypto_register_alg(&sha256);
|
||||
|
||||
if (ret < 0)
|
||||
crypto_unregister_alg(&sha224);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
crypto_unregister_alg(&sha224);
|
||||
crypto_unregister_alg(&sha256);
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
|
||||
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
|
||||
|
||||
MODULE_ALIAS("sha224");
|
||||
MODULE_ALIAS("sha256");
|
||||
|
449
crypto/tcrypt.c
449
crypto/tcrypt.c
@ -6,12 +6,16 @@
|
||||
*
|
||||
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
||||
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
|
||||
* Copyright (c) 2007 Nokia Siemens Networks
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* 2007-11-13 Added GCM tests
|
||||
* 2007-11-13 Added AEAD support
|
||||
* 2007-11-06 Added SHA-224 and SHA-224-HMAC tests
|
||||
* 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
|
||||
* 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
|
||||
* 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
|
||||
@ -71,22 +75,23 @@ static unsigned int sec;
|
||||
|
||||
static int mode;
|
||||
static char *xbuf;
|
||||
static char *axbuf;
|
||||
static char *tvmem;
|
||||
|
||||
static char *check[] = {
|
||||
"des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish",
|
||||
"twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6",
|
||||
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
|
||||
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
|
||||
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
||||
"arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
|
||||
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
|
||||
"camellia", "seed", NULL
|
||||
"camellia", "seed", "salsa20", "lzo", NULL
|
||||
};
|
||||
|
||||
static void hexdump(unsigned char *buf, unsigned int len)
|
||||
{
|
||||
while (len--)
|
||||
printk("%02x", *buf++);
|
||||
|
||||
printk("\n");
|
||||
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
|
||||
16, 1,
|
||||
buf, len, false);
|
||||
}
|
||||
|
||||
static void tcrypt_complete(struct crypto_async_request *req, int err)
|
||||
@ -215,6 +220,238 @@ out:
|
||||
crypto_free_hash(tfm);
|
||||
}
|
||||
|
||||
static void test_aead(char *algo, int enc, struct aead_testvec *template,
|
||||
unsigned int tcount)
|
||||
{
|
||||
unsigned int ret, i, j, k, temp;
|
||||
unsigned int tsize;
|
||||
char *q;
|
||||
struct crypto_aead *tfm;
|
||||
char *key;
|
||||
struct aead_testvec *aead_tv;
|
||||
struct aead_request *req;
|
||||
struct scatterlist sg[8];
|
||||
struct scatterlist asg[8];
|
||||
const char *e;
|
||||
struct tcrypt_result result;
|
||||
unsigned int authsize;
|
||||
|
||||
if (enc == ENCRYPT)
|
||||
e = "encryption";
|
||||
else
|
||||
e = "decryption";
|
||||
|
||||
printk(KERN_INFO "\ntesting %s %s\n", algo, e);
|
||||
|
||||
tsize = sizeof(struct aead_testvec);
|
||||
tsize *= tcount;
|
||||
|
||||
if (tsize > TVMEMSIZE) {
|
||||
printk(KERN_INFO "template (%u) too big for tvmem (%u)\n",
|
||||
tsize, TVMEMSIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(tvmem, template, tsize);
|
||||
aead_tv = (void *)tvmem;
|
||||
|
||||
init_completion(&result.completion);
|
||||
|
||||
tfm = crypto_alloc_aead(algo, 0, 0);
|
||||
|
||||
if (IS_ERR(tfm)) {
|
||||
printk(KERN_INFO "failed to load transform for %s: %ld\n",
|
||||
algo, PTR_ERR(tfm));
|
||||
return;
|
||||
}
|
||||
|
||||
req = aead_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
printk(KERN_INFO "failed to allocate request for %s\n", algo);
|
||||
goto out;
|
||||
}
|
||||
|
||||
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
|
||||
for (i = 0, j = 0; i < tcount; i++) {
|
||||
if (!aead_tv[i].np) {
|
||||
printk(KERN_INFO "test %u (%d bit key):\n",
|
||||
++j, aead_tv[i].klen * 8);
|
||||
|
||||
crypto_aead_clear_flags(tfm, ~0);
|
||||
if (aead_tv[i].wk)
|
||||
crypto_aead_set_flags(
|
||||
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
key = aead_tv[i].key;
|
||||
|
||||
ret = crypto_aead_setkey(tfm, key,
|
||||
aead_tv[i].klen);
|
||||
if (ret) {
|
||||
printk(KERN_INFO "setkey() failed flags=%x\n",
|
||||
crypto_aead_get_flags(tfm));
|
||||
|
||||
if (!aead_tv[i].fail)
|
||||
goto out;
|
||||
}
|
||||
|
||||
authsize = abs(aead_tv[i].rlen - aead_tv[i].ilen);
|
||||
ret = crypto_aead_setauthsize(tfm, authsize);
|
||||
if (ret) {
|
||||
printk(KERN_INFO
|
||||
"failed to set authsize = %u\n",
|
||||
authsize);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_init_one(&sg[0], aead_tv[i].input,
|
||||
aead_tv[i].ilen + (enc ? authsize : 0));
|
||||
|
||||
sg_init_one(&asg[0], aead_tv[i].assoc,
|
||||
aead_tv[i].alen);
|
||||
|
||||
aead_request_set_crypt(req, sg, sg,
|
||||
aead_tv[i].ilen,
|
||||
aead_tv[i].iv);
|
||||
|
||||
aead_request_set_assoc(req, asg, aead_tv[i].alen);
|
||||
|
||||
ret = enc ?
|
||||
crypto_aead_encrypt(req) :
|
||||
crypto_aead_decrypt(req);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
ret = wait_for_completion_interruptible(
|
||||
&result.completion);
|
||||
if (!ret && !(ret = result.err)) {
|
||||
INIT_COMPLETION(result.completion);
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
printk(KERN_INFO "%s () failed err=%d\n",
|
||||
e, -ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
q = kmap(sg_page(&sg[0])) + sg[0].offset;
|
||||
hexdump(q, aead_tv[i].rlen);
|
||||
|
||||
printk(KERN_INFO "enc/dec: %s\n",
|
||||
memcmp(q, aead_tv[i].result,
|
||||
aead_tv[i].rlen) ? "fail" : "pass");
|
||||
}
|
||||
}
|
||||
|
||||
printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e);
|
||||
memset(xbuf, 0, XBUFSIZE);
|
||||
memset(axbuf, 0, XBUFSIZE);
|
||||
|
||||
for (i = 0, j = 0; i < tcount; i++) {
|
||||
if (aead_tv[i].np) {
|
||||
printk(KERN_INFO "test %u (%d bit key):\n",
|
||||
++j, aead_tv[i].klen * 8);
|
||||
|
||||
crypto_aead_clear_flags(tfm, ~0);
|
||||
if (aead_tv[i].wk)
|
||||
crypto_aead_set_flags(
|
||||
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
key = aead_tv[i].key;
|
||||
|
||||
ret = crypto_aead_setkey(tfm, key, aead_tv[i].klen);
|
||||
if (ret) {
|
||||
printk(KERN_INFO "setkey() failed flags=%x\n",
|
||||
crypto_aead_get_flags(tfm));
|
||||
|
||||
if (!aead_tv[i].fail)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_init_table(sg, aead_tv[i].np);
|
||||
for (k = 0, temp = 0; k < aead_tv[i].np; k++) {
|
||||
memcpy(&xbuf[IDX[k]],
|
||||
aead_tv[i].input + temp,
|
||||
aead_tv[i].tap[k]);
|
||||
temp += aead_tv[i].tap[k];
|
||||
sg_set_buf(&sg[k], &xbuf[IDX[k]],
|
||||
aead_tv[i].tap[k]);
|
||||
}
|
||||
|
||||
authsize = abs(aead_tv[i].rlen - aead_tv[i].ilen);
|
||||
ret = crypto_aead_setauthsize(tfm, authsize);
|
||||
if (ret) {
|
||||
printk(KERN_INFO
|
||||
"failed to set authsize = %u\n",
|
||||
authsize);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (enc)
|
||||
sg[k - 1].length += authsize;
|
||||
|
||||
sg_init_table(asg, aead_tv[i].anp);
|
||||
for (k = 0, temp = 0; k < aead_tv[i].anp; k++) {
|
||||
memcpy(&axbuf[IDX[k]],
|
||||
aead_tv[i].assoc + temp,
|
||||
aead_tv[i].atap[k]);
|
||||
temp += aead_tv[i].atap[k];
|
||||
sg_set_buf(&asg[k], &axbuf[IDX[k]],
|
||||
aead_tv[i].atap[k]);
|
||||
}
|
||||
|
||||
aead_request_set_crypt(req, sg, sg,
|
||||
aead_tv[i].ilen,
|
||||
aead_tv[i].iv);
|
||||
|
||||
aead_request_set_assoc(req, asg, aead_tv[i].alen);
|
||||
|
||||
ret = enc ?
|
||||
crypto_aead_encrypt(req) :
|
||||
crypto_aead_decrypt(req);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EINPROGRESS:
|
||||
case -EBUSY:
|
||||
ret = wait_for_completion_interruptible(
|
||||
&result.completion);
|
||||
if (!ret && !(ret = result.err)) {
|
||||
INIT_COMPLETION(result.completion);
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
printk(KERN_INFO "%s () failed err=%d\n",
|
||||
e, -ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (k = 0, temp = 0; k < aead_tv[i].np; k++) {
|
||||
printk(KERN_INFO "page %u\n", k);
|
||||
q = kmap(sg_page(&sg[k])) + sg[k].offset;
|
||||
hexdump(q, aead_tv[i].tap[k]);
|
||||
printk(KERN_INFO "%s\n",
|
||||
memcmp(q, aead_tv[i].result + temp,
|
||||
aead_tv[i].tap[k] -
|
||||
(k < aead_tv[i].np - 1 || enc ?
|
||||
0 : authsize)) ?
|
||||
"fail" : "pass");
|
||||
|
||||
temp += aead_tv[i].tap[k];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
crypto_free_aead(tfm);
|
||||
aead_request_free(req);
|
||||
}
|
||||
|
||||
static void test_cipher(char *algo, int enc,
|
||||
struct cipher_testvec *template, unsigned int tcount)
|
||||
{
|
||||
@ -237,15 +474,11 @@ static void test_cipher(char *algo, int enc,
|
||||
printk("\ntesting %s %s\n", algo, e);
|
||||
|
||||
tsize = sizeof (struct cipher_testvec);
|
||||
tsize *= tcount;
|
||||
|
||||
if (tsize > TVMEMSIZE) {
|
||||
printk("template (%u) too big for tvmem (%u)\n", tsize,
|
||||
TVMEMSIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(tvmem, template, tsize);
|
||||
cipher_tv = (void *)tvmem;
|
||||
|
||||
init_completion(&result.completion);
|
||||
@ -269,33 +502,34 @@ static void test_cipher(char *algo, int enc,
|
||||
|
||||
j = 0;
|
||||
for (i = 0; i < tcount; i++) {
|
||||
if (!(cipher_tv[i].np)) {
|
||||
memcpy(cipher_tv, &template[i], tsize);
|
||||
if (!(cipher_tv->np)) {
|
||||
j++;
|
||||
printk("test %u (%d bit key):\n",
|
||||
j, cipher_tv[i].klen * 8);
|
||||
j, cipher_tv->klen * 8);
|
||||
|
||||
crypto_ablkcipher_clear_flags(tfm, ~0);
|
||||
if (cipher_tv[i].wk)
|
||||
if (cipher_tv->wk)
|
||||
crypto_ablkcipher_set_flags(
|
||||
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
key = cipher_tv[i].key;
|
||||
key = cipher_tv->key;
|
||||
|
||||
ret = crypto_ablkcipher_setkey(tfm, key,
|
||||
cipher_tv[i].klen);
|
||||
cipher_tv->klen);
|
||||
if (ret) {
|
||||
printk("setkey() failed flags=%x\n",
|
||||
crypto_ablkcipher_get_flags(tfm));
|
||||
|
||||
if (!cipher_tv[i].fail)
|
||||
if (!cipher_tv->fail)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sg_init_one(&sg[0], cipher_tv[i].input,
|
||||
cipher_tv[i].ilen);
|
||||
sg_init_one(&sg[0], cipher_tv->input,
|
||||
cipher_tv->ilen);
|
||||
|
||||
ablkcipher_request_set_crypt(req, sg, sg,
|
||||
cipher_tv[i].ilen,
|
||||
cipher_tv[i].iv);
|
||||
cipher_tv->ilen,
|
||||
cipher_tv->iv);
|
||||
|
||||
ret = enc ?
|
||||
crypto_ablkcipher_encrypt(req) :
|
||||
@ -319,11 +553,11 @@ static void test_cipher(char *algo, int enc,
|
||||
}
|
||||
|
||||
q = kmap(sg_page(&sg[0])) + sg[0].offset;
|
||||
hexdump(q, cipher_tv[i].rlen);
|
||||
hexdump(q, cipher_tv->rlen);
|
||||
|
||||
printk("%s\n",
|
||||
memcmp(q, cipher_tv[i].result,
|
||||
cipher_tv[i].rlen) ? "fail" : "pass");
|
||||
memcmp(q, cipher_tv->result,
|
||||
cipher_tv->rlen) ? "fail" : "pass");
|
||||
}
|
||||
}
|
||||
|
||||
@ -332,41 +566,42 @@ static void test_cipher(char *algo, int enc,
|
||||
|
||||
j = 0;
|
||||
for (i = 0; i < tcount; i++) {
|
||||
if (cipher_tv[i].np) {
|
||||
memcpy(cipher_tv, &template[i], tsize);
|
||||
if (cipher_tv->np) {
|
||||
j++;
|
||||
printk("test %u (%d bit key):\n",
|
||||
j, cipher_tv[i].klen * 8);
|
||||
j, cipher_tv->klen * 8);
|
||||
|
||||
crypto_ablkcipher_clear_flags(tfm, ~0);
|
||||
if (cipher_tv[i].wk)
|
||||
if (cipher_tv->wk)
|
||||
crypto_ablkcipher_set_flags(
|
||||
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
key = cipher_tv[i].key;
|
||||
key = cipher_tv->key;
|
||||
|
||||
ret = crypto_ablkcipher_setkey(tfm, key,
|
||||
cipher_tv[i].klen);
|
||||
cipher_tv->klen);
|
||||
if (ret) {
|
||||
printk("setkey() failed flags=%x\n",
|
||||
crypto_ablkcipher_get_flags(tfm));
|
||||
|
||||
if (!cipher_tv[i].fail)
|
||||
if (!cipher_tv->fail)
|
||||
goto out;
|
||||
}
|
||||
|
||||
temp = 0;
|
||||
sg_init_table(sg, cipher_tv[i].np);
|
||||
for (k = 0; k < cipher_tv[i].np; k++) {
|
||||
sg_init_table(sg, cipher_tv->np);
|
||||
for (k = 0; k < cipher_tv->np; k++) {
|
||||
memcpy(&xbuf[IDX[k]],
|
||||
cipher_tv[i].input + temp,
|
||||
cipher_tv[i].tap[k]);
|
||||
temp += cipher_tv[i].tap[k];
|
||||
cipher_tv->input + temp,
|
||||
cipher_tv->tap[k]);
|
||||
temp += cipher_tv->tap[k];
|
||||
sg_set_buf(&sg[k], &xbuf[IDX[k]],
|
||||
cipher_tv[i].tap[k]);
|
||||
cipher_tv->tap[k]);
|
||||
}
|
||||
|
||||
ablkcipher_request_set_crypt(req, sg, sg,
|
||||
cipher_tv[i].ilen,
|
||||
cipher_tv[i].iv);
|
||||
cipher_tv->ilen,
|
||||
cipher_tv->iv);
|
||||
|
||||
ret = enc ?
|
||||
crypto_ablkcipher_encrypt(req) :
|
||||
@ -390,15 +625,15 @@ static void test_cipher(char *algo, int enc,
|
||||
}
|
||||
|
||||
temp = 0;
|
||||
for (k = 0; k < cipher_tv[i].np; k++) {
|
||||
for (k = 0; k < cipher_tv->np; k++) {
|
||||
printk("page %u\n", k);
|
||||
q = kmap(sg_page(&sg[k])) + sg[k].offset;
|
||||
hexdump(q, cipher_tv[i].tap[k]);
|
||||
hexdump(q, cipher_tv->tap[k]);
|
||||
printk("%s\n",
|
||||
memcmp(q, cipher_tv[i].result + temp,
|
||||
cipher_tv[i].tap[k]) ? "fail" :
|
||||
memcmp(q, cipher_tv->result + temp,
|
||||
cipher_tv->tap[k]) ? "fail" :
|
||||
"pass");
|
||||
temp += cipher_tv[i].tap[k];
|
||||
temp += cipher_tv->tap[k];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -800,7 +1035,8 @@ out:
|
||||
crypto_free_hash(tfm);
|
||||
}
|
||||
|
||||
static void test_deflate(void)
|
||||
static void test_comp(char *algo, struct comp_testvec *ctemplate,
|
||||
struct comp_testvec *dtemplate, int ctcount, int dtcount)
|
||||
{
|
||||
unsigned int i;
|
||||
char result[COMP_BUF_SIZE];
|
||||
@ -808,25 +1044,26 @@ static void test_deflate(void)
|
||||
struct comp_testvec *tv;
|
||||
unsigned int tsize;
|
||||
|
||||
printk("\ntesting deflate compression\n");
|
||||
printk("\ntesting %s compression\n", algo);
|
||||
|
||||
tsize = sizeof (deflate_comp_tv_template);
|
||||
tsize = sizeof(struct comp_testvec);
|
||||
tsize *= ctcount;
|
||||
if (tsize > TVMEMSIZE) {
|
||||
printk("template (%u) too big for tvmem (%u)\n", tsize,
|
||||
TVMEMSIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(tvmem, deflate_comp_tv_template, tsize);
|
||||
memcpy(tvmem, ctemplate, tsize);
|
||||
tv = (void *)tvmem;
|
||||
|
||||
tfm = crypto_alloc_comp("deflate", 0, CRYPTO_ALG_ASYNC);
|
||||
tfm = crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(tfm)) {
|
||||
printk("failed to load transform for deflate\n");
|
||||
printk("failed to load transform for %s\n", algo);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < DEFLATE_COMP_TEST_VECTORS; i++) {
|
||||
for (i = 0; i < ctcount; i++) {
|
||||
int ilen, ret, dlen = COMP_BUF_SIZE;
|
||||
|
||||
printk("test %u:\n", i + 1);
|
||||
@ -845,19 +1082,20 @@ static void test_deflate(void)
|
||||
ilen, dlen);
|
||||
}
|
||||
|
||||
printk("\ntesting deflate decompression\n");
|
||||
printk("\ntesting %s decompression\n", algo);
|
||||
|
||||
tsize = sizeof (deflate_decomp_tv_template);
|
||||
tsize = sizeof(struct comp_testvec);
|
||||
tsize *= dtcount;
|
||||
if (tsize > TVMEMSIZE) {
|
||||
printk("template (%u) too big for tvmem (%u)\n", tsize,
|
||||
TVMEMSIZE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(tvmem, deflate_decomp_tv_template, tsize);
|
||||
memcpy(tvmem, dtemplate, tsize);
|
||||
tv = (void *)tvmem;
|
||||
|
||||
for (i = 0; i < DEFLATE_DECOMP_TEST_VECTORS; i++) {
|
||||
for (i = 0; i < dtcount; i++) {
|
||||
int ilen, ret, dlen = COMP_BUF_SIZE;
|
||||
|
||||
printk("test %u:\n", i + 1);
|
||||
@ -918,6 +1156,8 @@ static void do_test(void)
|
||||
|
||||
test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
|
||||
|
||||
test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
|
||||
|
||||
test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS);
|
||||
|
||||
//BLOWFISH
|
||||
@ -969,6 +1209,18 @@ static void do_test(void)
|
||||
AES_XTS_ENC_TEST_VECTORS);
|
||||
test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template,
|
||||
AES_XTS_DEC_TEST_VECTORS);
|
||||
test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template,
|
||||
AES_CTR_ENC_TEST_VECTORS);
|
||||
test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template,
|
||||
AES_CTR_DEC_TEST_VECTORS);
|
||||
test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template,
|
||||
AES_GCM_ENC_TEST_VECTORS);
|
||||
test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template,
|
||||
AES_GCM_DEC_TEST_VECTORS);
|
||||
test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template,
|
||||
AES_CCM_ENC_TEST_VECTORS);
|
||||
test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template,
|
||||
AES_CCM_DEC_TEST_VECTORS);
|
||||
|
||||
//CAST5
|
||||
test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
|
||||
@ -1057,12 +1309,18 @@ static void do_test(void)
|
||||
test_hash("tgr192", tgr192_tv_template, TGR192_TEST_VECTORS);
|
||||
test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS);
|
||||
test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
|
||||
test_deflate();
|
||||
test_comp("deflate", deflate_comp_tv_template,
|
||||
deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS,
|
||||
DEFLATE_DECOMP_TEST_VECTORS);
|
||||
test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template,
|
||||
LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS);
|
||||
test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS);
|
||||
test_hash("hmac(md5)", hmac_md5_tv_template,
|
||||
HMAC_MD5_TEST_VECTORS);
|
||||
test_hash("hmac(sha1)", hmac_sha1_tv_template,
|
||||
HMAC_SHA1_TEST_VECTORS);
|
||||
test_hash("hmac(sha224)", hmac_sha224_tv_template,
|
||||
HMAC_SHA224_TEST_VECTORS);
|
||||
test_hash("hmac(sha256)", hmac_sha256_tv_template,
|
||||
HMAC_SHA256_TEST_VECTORS);
|
||||
test_hash("hmac(sha384)", hmac_sha384_tv_template,
|
||||
@ -1156,6 +1414,10 @@ static void do_test(void)
|
||||
AES_XTS_ENC_TEST_VECTORS);
|
||||
test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template,
|
||||
AES_XTS_DEC_TEST_VECTORS);
|
||||
test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template,
|
||||
AES_CTR_ENC_TEST_VECTORS);
|
||||
test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template,
|
||||
AES_CTR_DEC_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 11:
|
||||
@ -1167,7 +1429,9 @@ static void do_test(void)
|
||||
break;
|
||||
|
||||
case 13:
|
||||
test_deflate();
|
||||
test_comp("deflate", deflate_comp_tv_template,
|
||||
deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS,
|
||||
DEFLATE_DECOMP_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 14:
|
||||
@ -1291,6 +1555,34 @@ static void do_test(void)
|
||||
camellia_cbc_dec_tv_template,
|
||||
CAMELLIA_CBC_DEC_TEST_VECTORS);
|
||||
break;
|
||||
case 33:
|
||||
test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 34:
|
||||
test_cipher("salsa20", ENCRYPT,
|
||||
salsa20_stream_enc_tv_template,
|
||||
SALSA20_STREAM_ENC_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 35:
|
||||
test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template,
|
||||
AES_GCM_ENC_TEST_VECTORS);
|
||||
test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template,
|
||||
AES_GCM_DEC_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 36:
|
||||
test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template,
|
||||
LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 37:
|
||||
test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template,
|
||||
AES_CCM_ENC_TEST_VECTORS);
|
||||
test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template,
|
||||
AES_CCM_DEC_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 100:
|
||||
test_hash("hmac(md5)", hmac_md5_tv_template,
|
||||
@ -1317,6 +1609,15 @@ static void do_test(void)
|
||||
HMAC_SHA512_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 105:
|
||||
test_hash("hmac(sha224)", hmac_sha224_tv_template,
|
||||
HMAC_SHA224_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 106:
|
||||
test_hash("xcbc(aes)", aes_xcbc128_tv_template,
|
||||
XCBC_AES_TEST_VECTORS);
|
||||
break;
|
||||
|
||||
case 200:
|
||||
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
@ -1400,6 +1701,11 @@ static void do_test(void)
|
||||
camellia_speed_template);
|
||||
break;
|
||||
|
||||
case 206:
|
||||
test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0,
|
||||
salsa20_speed_template);
|
||||
break;
|
||||
|
||||
case 300:
|
||||
/* fall through */
|
||||
|
||||
@ -1451,6 +1757,10 @@ static void do_test(void)
|
||||
test_hash_speed("tgr192", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
case 313:
|
||||
test_hash_speed("sha224", sec, generic_hash_speed_template);
|
||||
if (mode > 300 && mode < 400) break;
|
||||
|
||||
case 399:
|
||||
break;
|
||||
|
||||
@ -1467,28 +1777,37 @@ static void do_test(void)
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
|
||||
tvmem = kmalloc(TVMEMSIZE, GFP_KERNEL);
|
||||
if (tvmem == NULL)
|
||||
return -ENOMEM;
|
||||
return err;
|
||||
|
||||
xbuf = kmalloc(XBUFSIZE, GFP_KERNEL);
|
||||
if (xbuf == NULL) {
|
||||
kfree(tvmem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (xbuf == NULL)
|
||||
goto err_free_tv;
|
||||
|
||||
axbuf = kmalloc(XBUFSIZE, GFP_KERNEL);
|
||||
if (axbuf == NULL)
|
||||
goto err_free_xbuf;
|
||||
|
||||
do_test();
|
||||
|
||||
kfree(xbuf);
|
||||
kfree(tvmem);
|
||||
|
||||
/* We intentionaly return -EAGAIN to prevent keeping
|
||||
* the module. It does all its work from init()
|
||||
* and doesn't offer any runtime functionality
|
||||
* => we don't need it in the memory, do we?
|
||||
* -- mludvig
|
||||
*/
|
||||
return -EAGAIN;
|
||||
err = -EAGAIN;
|
||||
|
||||
kfree(axbuf);
|
||||
err_free_xbuf:
|
||||
kfree(xbuf);
|
||||
err_free_tv:
|
||||
kfree(tvmem);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
3419
crypto/tcrypt.h
3419
crypto/tcrypt.h
File diff suppressed because it is too large
Load Diff
@ -655,84 +655,48 @@ int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
|
||||
CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
|
||||
}
|
||||
|
||||
/* Calculate whitening and round subkeys. The constants are
|
||||
* indices of subkeys, preprocessed through q0 and q1. */
|
||||
CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
|
||||
CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
|
||||
CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
|
||||
CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
|
||||
CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
|
||||
CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
|
||||
CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
|
||||
CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
|
||||
CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
|
||||
CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
|
||||
CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71);
|
||||
CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
|
||||
CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F);
|
||||
CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
|
||||
CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
|
||||
CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F);
|
||||
CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
|
||||
CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
|
||||
CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00);
|
||||
CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
|
||||
/* CALC_K256/CALC_K192/CALC_K loops were unrolled.
|
||||
* Unrolling produced x2.5 more code (+18k on i386),
|
||||
* and speeded up key setup by 7%:
|
||||
* unrolled: twofish_setkey/sec: 41128
|
||||
* loop: twofish_setkey/sec: 38148
|
||||
* CALC_K256: ~100 insns each
|
||||
* CALC_K192: ~90 insns
|
||||
* CALC_K: ~70 insns
|
||||
*/
|
||||
/* Calculate whitening and round subkeys */
|
||||
for ( i = 0; i < 8; i += 2 ) {
|
||||
CALC_K256 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
|
||||
}
|
||||
for ( i = 0; i < 32; i += 2 ) {
|
||||
CALC_K256 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
|
||||
}
|
||||
} else if (key_len == 24) { /* 192-bit key */
|
||||
/* Compute the S-boxes. */
|
||||
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
|
||||
CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
|
||||
}
|
||||
|
||||
/* Calculate whitening and round subkeys. The constants are
|
||||
* indices of subkeys, preprocessed through q0 and q1. */
|
||||
CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
|
||||
CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
|
||||
CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
|
||||
CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
|
||||
CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
|
||||
CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
|
||||
CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
|
||||
CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
|
||||
CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
|
||||
CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
|
||||
CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71);
|
||||
CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
|
||||
CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F);
|
||||
CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
|
||||
CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
|
||||
CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F);
|
||||
CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
|
||||
CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
|
||||
CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00);
|
||||
CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
|
||||
/* Calculate whitening and round subkeys */
|
||||
for ( i = 0; i < 8; i += 2 ) {
|
||||
CALC_K192 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
|
||||
}
|
||||
for ( i = 0; i < 32; i += 2 ) {
|
||||
CALC_K192 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
|
||||
}
|
||||
} else { /* 128-bit key */
|
||||
/* Compute the S-boxes. */
|
||||
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
|
||||
CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
|
||||
}
|
||||
|
||||
/* Calculate whitening and round subkeys. The constants are
|
||||
* indices of subkeys, preprocessed through q0 and q1. */
|
||||
CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3);
|
||||
CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
|
||||
CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
|
||||
CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
|
||||
CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
|
||||
CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B);
|
||||
CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
|
||||
CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
|
||||
CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
|
||||
CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD);
|
||||
CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71);
|
||||
CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
|
||||
CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F);
|
||||
CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B);
|
||||
CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA);
|
||||
CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F);
|
||||
CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
|
||||
CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
|
||||
CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00);
|
||||
CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
|
||||
/* Calculate whitening and round subkeys */
|
||||
for ( i = 0; i < 8; i += 2 ) {
|
||||
CALC_K (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
|
||||
}
|
||||
for ( i = 0; i < 32; i += 2 ) {
|
||||
CALC_K (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -19,6 +19,7 @@
|
||||
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
|
||||
*/
|
||||
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/hardirq.h>
|
||||
@ -27,7 +28,6 @@
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include "internal.h"
|
||||
|
||||
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
|
||||
0x02020202, 0x02020202, 0x02020202, 0x02020202,
|
||||
@ -307,7 +307,8 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return ERR_PTR(PTR_ERR(alg));
|
||||
inst = ERR_PTR(-EINVAL);
|
||||
goto out_put_alg;
|
||||
}
|
||||
|
||||
inst = crypto_alloc_instance("xcbc", alg);
|
||||
@ -320,10 +321,7 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
|
||||
inst->alg.cra_alignmask = alg->cra_alignmask;
|
||||
inst->alg.cra_type = &crypto_hash_type;
|
||||
|
||||
inst->alg.cra_hash.digestsize =
|
||||
(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
|
||||
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
|
||||
alg->cra_blocksize;
|
||||
inst->alg.cra_hash.digestsize = alg->cra_blocksize;
|
||||
inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
|
||||
ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
|
||||
inst->alg.cra_init = xcbc_init_tfm;
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
|
||||
@ -52,11 +53,18 @@ MODULE_DEVICE_TABLE(pci, pci_tbl);
|
||||
static struct pci_dev *amd_pdev;
|
||||
|
||||
|
||||
static int amd_rng_data_present(struct hwrng *rng)
|
||||
static int amd_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
u32 pmbase = (u32)rng->priv;
|
||||
int data, i;
|
||||
|
||||
return !!(inl(pmbase + 0xF4) & 1);
|
||||
for (i = 0; i < 20; i++) {
|
||||
data = !!(inl(pmbase + 0xF4) & 1);
|
||||
if (data || !wait)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static int amd_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
|
@ -66,11 +66,11 @@ static inline void hwrng_cleanup(struct hwrng *rng)
|
||||
rng->cleanup(rng);
|
||||
}
|
||||
|
||||
static inline int hwrng_data_present(struct hwrng *rng)
|
||||
static inline int hwrng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
if (!rng->data_present)
|
||||
return 1;
|
||||
return rng->data_present(rng);
|
||||
return rng->data_present(rng, wait);
|
||||
}
|
||||
|
||||
static inline int hwrng_data_read(struct hwrng *rng, u32 *data)
|
||||
@ -94,8 +94,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||
{
|
||||
u32 data;
|
||||
ssize_t ret = 0;
|
||||
int i, err = 0;
|
||||
int data_present;
|
||||
int err = 0;
|
||||
int bytes_read;
|
||||
|
||||
while (size) {
|
||||
@ -107,21 +106,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
if (filp->f_flags & O_NONBLOCK) {
|
||||
data_present = hwrng_data_present(current_rng);
|
||||
} else {
|
||||
/* Some RNG require some time between data_reads to gather
|
||||
* new entropy. Poll it.
|
||||
*/
|
||||
for (i = 0; i < 20; i++) {
|
||||
data_present = hwrng_data_present(current_rng);
|
||||
if (data_present)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
}
|
||||
|
||||
bytes_read = 0;
|
||||
if (data_present)
|
||||
if (hwrng_data_present(current_rng,
|
||||
!(filp->f_flags & O_NONBLOCK)))
|
||||
bytes_read = hwrng_data_read(current_rng, &data);
|
||||
mutex_unlock(&rng_mutex);
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
|
||||
@ -61,11 +62,18 @@ static int geode_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
return 4;
|
||||
}
|
||||
|
||||
static int geode_rng_data_present(struct hwrng *rng)
|
||||
static int geode_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
void __iomem *mem = (void __iomem *)rng->priv;
|
||||
int data, i;
|
||||
|
||||
return !!(readl(mem + GEODE_RNG_STATUS_REG));
|
||||
for (i = 0; i < 20; i++) {
|
||||
data = !!(readl(mem + GEODE_RNG_STATUS_REG));
|
||||
if (data || !wait)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
|
||||
@ -162,11 +163,19 @@ static inline u8 hwstatus_set(void __iomem *mem,
|
||||
return hwstatus_get(mem);
|
||||
}
|
||||
|
||||
static int intel_rng_data_present(struct hwrng *rng)
|
||||
static int intel_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
void __iomem *mem = (void __iomem *)rng->priv;
|
||||
int data, i;
|
||||
|
||||
return !!(readb(mem + INTEL_RNG_STATUS) & INTEL_RNG_DATA_PRESENT);
|
||||
for (i = 0; i < 20; i++) {
|
||||
data = !!(readb(mem + INTEL_RNG_STATUS) &
|
||||
INTEL_RNG_DATA_PRESENT);
|
||||
if (data || !wait)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static int intel_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
@ -65,9 +66,17 @@ static void omap_rng_write_reg(int reg, u32 val)
|
||||
}
|
||||
|
||||
/* REVISIT: Does the status bit really work on 16xx? */
|
||||
static int omap_rng_data_present(struct hwrng *rng)
|
||||
static int omap_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
return omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
|
||||
int data, i;
|
||||
|
||||
for (i = 0; i < 20; i++) {
|
||||
data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1;
|
||||
if (data || !wait)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static int omap_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/of_platform.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
@ -41,12 +42,19 @@
|
||||
|
||||
#define MODULE_NAME "pasemi_rng"
|
||||
|
||||
static int pasemi_rng_data_present(struct hwrng *rng)
|
||||
static int pasemi_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
void __iomem *rng_regs = (void __iomem *)rng->priv;
|
||||
int data, i;
|
||||
|
||||
return (in_le32(rng_regs + SDCRNG_CTL_REG)
|
||||
& SDCRNG_CTL_FVLD_M) ? 1 : 0;
|
||||
for (i = 0; i < 20; i++) {
|
||||
data = (in_le32(rng_regs + SDCRNG_CTL_REG)
|
||||
& SDCRNG_CTL_FVLD_M) ? 1 : 0;
|
||||
if (data || !wait)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/cpufeature.h>
|
||||
@ -77,10 +78,11 @@ static inline u32 xstore(u32 *addr, u32 edx_in)
|
||||
return eax_out;
|
||||
}
|
||||
|
||||
static int via_rng_data_present(struct hwrng *rng)
|
||||
static int via_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
u32 bytes_out;
|
||||
u32 *via_rng_datum = (u32 *)(&rng->priv);
|
||||
int i;
|
||||
|
||||
/* We choose the recommended 1-byte-per-instruction RNG rate,
|
||||
* for greater randomness at the expense of speed. Larger
|
||||
@ -95,12 +97,15 @@ static int via_rng_data_present(struct hwrng *rng)
|
||||
* completes.
|
||||
*/
|
||||
|
||||
*via_rng_datum = 0; /* paranoia, not really necessary */
|
||||
bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
|
||||
bytes_out &= VIA_XSTORE_CNT_MASK;
|
||||
if (bytes_out == 0)
|
||||
return 0;
|
||||
return 1;
|
||||
for (i = 0; i < 20; i++) {
|
||||
*via_rng_datum = 0; /* paranoia, not really necessary */
|
||||
bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1);
|
||||
bytes_out &= VIA_XSTORE_CNT_MASK;
|
||||
if (bytes_out || !wait)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
||||
return bytes_out ? 1 : 0;
|
||||
}
|
||||
|
||||
static int via_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
|
@ -83,4 +83,15 @@ config ZCRYPT_MONOLITHIC
|
||||
that contains all parts of the crypto device driver (ap bus,
|
||||
request router and all the card drivers).
|
||||
|
||||
config CRYPTO_DEV_HIFN_795X
|
||||
tristate "Driver HIFN 795x crypto accelerator chips"
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
depends on PCI
|
||||
help
|
||||
This option allows you to have support for HIFN 795x crypto adapters.
|
||||
|
||||
|
||||
|
||||
endif # CRYPTO_HW
|
||||
|
@ -1,3 +1,4 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
|
||||
|
@ -13,44 +13,13 @@
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aes.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/delay.h>
|
||||
|
||||
#include "geode-aes.h"
|
||||
|
||||
/* Register definitions */
|
||||
|
||||
#define AES_CTRLA_REG 0x0000
|
||||
|
||||
#define AES_CTRL_START 0x01
|
||||
#define AES_CTRL_DECRYPT 0x00
|
||||
#define AES_CTRL_ENCRYPT 0x02
|
||||
#define AES_CTRL_WRKEY 0x04
|
||||
#define AES_CTRL_DCA 0x08
|
||||
#define AES_CTRL_SCA 0x10
|
||||
#define AES_CTRL_CBC 0x20
|
||||
|
||||
#define AES_INTR_REG 0x0008
|
||||
|
||||
#define AES_INTRA_PENDING (1 << 16)
|
||||
#define AES_INTRB_PENDING (1 << 17)
|
||||
|
||||
#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING)
|
||||
#define AES_INTR_MASK 0x07
|
||||
|
||||
#define AES_SOURCEA_REG 0x0010
|
||||
#define AES_DSTA_REG 0x0014
|
||||
#define AES_LENA_REG 0x0018
|
||||
#define AES_WRITEKEY0_REG 0x0030
|
||||
#define AES_WRITEIV0_REG 0x0040
|
||||
|
||||
/* A very large counter that is used to gracefully bail out of an
|
||||
* operation in case of trouble
|
||||
*/
|
||||
|
||||
#define AES_OP_TIMEOUT 0x50000
|
||||
|
||||
/* Static structures */
|
||||
|
||||
static void __iomem * _iobase;
|
||||
@ -87,9 +56,10 @@ do_crypt(void *src, void *dst, int len, u32 flags)
|
||||
/* Start the operation */
|
||||
iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
|
||||
|
||||
do
|
||||
do {
|
||||
status = ioread32(_iobase + AES_INTR_REG);
|
||||
while(!(status & AES_INTRA_PENDING) && --counter);
|
||||
cpu_relax();
|
||||
} while(!(status & AES_INTRA_PENDING) && --counter);
|
||||
|
||||
/* Clear the event */
|
||||
iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
|
||||
@ -101,6 +71,7 @@ geode_aes_crypt(struct geode_aes_op *op)
|
||||
{
|
||||
u32 flags = 0;
|
||||
unsigned long iflags;
|
||||
int ret;
|
||||
|
||||
if (op->len == 0)
|
||||
return 0;
|
||||
@ -129,7 +100,8 @@ geode_aes_crypt(struct geode_aes_op *op)
|
||||
_writefield(AES_WRITEKEY0_REG, op->key);
|
||||
}
|
||||
|
||||
do_crypt(op->src, op->dst, op->len, flags);
|
||||
ret = do_crypt(op->src, op->dst, op->len, flags);
|
||||
BUG_ON(ret);
|
||||
|
||||
if (op->mode == AES_MODE_CBC)
|
||||
_readfield(AES_WRITEIV0_REG, op->iv);
|
||||
@ -141,18 +113,103 @@ geode_aes_crypt(struct geode_aes_op *op)
|
||||
|
||||
/* CRYPTO-API Functions */
|
||||
|
||||
static int
|
||||
geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len)
|
||||
static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
unsigned int ret;
|
||||
|
||||
if (len != AES_KEY_LENGTH) {
|
||||
op->keylen = len;
|
||||
|
||||
if (len == AES_KEYSIZE_128) {
|
||||
memcpy(op->key, key, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
|
||||
/* not supported at all */
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(op->key, key, len);
|
||||
return 0;
|
||||
/*
|
||||
* The requested key size is not supported by HW, do a fallback
|
||||
*/
|
||||
op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_cipher_setkey(op->fallback.cip, key, len);
|
||||
if (ret) {
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int len)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
unsigned int ret;
|
||||
|
||||
op->keylen = len;
|
||||
|
||||
if (len == AES_KEYSIZE_128) {
|
||||
memcpy(op->key, key, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
|
||||
/* not supported at all */
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The requested key size is not supported by HW, do a fallback
|
||||
*/
|
||||
op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
||||
op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
|
||||
|
||||
ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
|
||||
if (ret) {
|
||||
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
|
||||
tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fallback_blk_dec(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
unsigned int ret;
|
||||
struct crypto_blkcipher *tfm;
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
tfm = desc->tfm;
|
||||
desc->tfm = op->fallback.blk;
|
||||
|
||||
ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
|
||||
|
||||
desc->tfm = tfm;
|
||||
return ret;
|
||||
}
|
||||
static int fallback_blk_enc(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
unsigned int ret;
|
||||
struct crypto_blkcipher *tfm;
|
||||
struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
tfm = desc->tfm;
|
||||
desc->tfm = op->fallback.blk;
|
||||
|
||||
ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
|
||||
|
||||
desc->tfm = tfm;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -160,8 +217,10 @@ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((out == NULL) || (in == NULL))
|
||||
if (unlikely(op->keylen != AES_KEYSIZE_128)) {
|
||||
crypto_cipher_encrypt_one(op->fallback.cip, out, in);
|
||||
return;
|
||||
}
|
||||
|
||||
op->src = (void *) in;
|
||||
op->dst = (void *) out;
|
||||
@ -179,8 +238,10 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((out == NULL) || (in == NULL))
|
||||
if (unlikely(op->keylen != AES_KEYSIZE_128)) {
|
||||
crypto_cipher_decrypt_one(op->fallback.cip, out, in);
|
||||
return;
|
||||
}
|
||||
|
||||
op->src = (void *) in;
|
||||
op->dst = (void *) out;
|
||||
@ -192,24 +253,50 @@ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
geode_aes_crypt(op);
|
||||
}
|
||||
|
||||
static int fallback_init_cip(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *name = tfm->__crt_alg->cra_name;
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
op->fallback.cip = crypto_alloc_cipher(name, 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
|
||||
if (IS_ERR(op->fallback.cip)) {
|
||||
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
|
||||
return PTR_ERR(op->fallback.blk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fallback_exit_cip(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_cipher(op->fallback.cip);
|
||||
op->fallback.cip = NULL;
|
||||
}
|
||||
|
||||
static struct crypto_alg geode_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "geode-aes-128",
|
||||
.cra_priority = 300,
|
||||
.cra_alignmask = 15,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "geode-aes",
|
||||
.cra_priority = 300,
|
||||
.cra_alignmask = 15,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_init = fallback_init_cip,
|
||||
.cra_exit = fallback_exit_cip,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_KEY_LENGTH,
|
||||
.cia_max_keysize = AES_KEY_LENGTH,
|
||||
.cia_setkey = geode_setkey,
|
||||
.cia_encrypt = geode_encrypt,
|
||||
.cia_decrypt = geode_decrypt
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
||||
.cia_setkey = geode_setkey_cip,
|
||||
.cia_encrypt = geode_encrypt,
|
||||
.cia_decrypt = geode_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -223,8 +310,12 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
if (unlikely(op->keylen != AES_KEYSIZE_128))
|
||||
return fallback_blk_dec(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
op->iv = walk.iv;
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
@ -233,13 +324,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_DECRYPT;
|
||||
|
||||
memcpy(op->iv, walk.iv, AES_IV_LENGTH);
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
|
||||
memcpy(walk.iv, op->iv, AES_IV_LENGTH);
|
||||
nbytes -= ret;
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
@ -255,8 +342,12 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
if (unlikely(op->keylen != AES_KEYSIZE_128))
|
||||
return fallback_blk_enc(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
op->iv = walk.iv;
|
||||
|
||||
while((nbytes = walk.nbytes)) {
|
||||
op->src = walk.src.virt.addr,
|
||||
@ -265,8 +356,6 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
|
||||
op->dir = AES_DIR_ENCRYPT;
|
||||
|
||||
memcpy(op->iv, walk.iv, AES_IV_LENGTH);
|
||||
|
||||
ret = geode_aes_crypt(op);
|
||||
nbytes -= ret;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
@ -275,22 +364,49 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int fallback_init_blk(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *name = tfm->__crt_alg->cra_name;
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
op->fallback.blk = crypto_alloc_blkcipher(name, 0,
|
||||
CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
|
||||
|
||||
if (IS_ERR(op->fallback.blk)) {
|
||||
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
|
||||
return PTR_ERR(op->fallback.blk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fallback_exit_blk(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_blkcipher(op->fallback.blk);
|
||||
op->fallback.blk = NULL;
|
||||
}
|
||||
|
||||
static struct crypto_alg geode_cbc_alg = {
|
||||
.cra_name = "cbc(aes)",
|
||||
.cra_driver_name = "cbc-aes-geode-128",
|
||||
.cra_driver_name = "cbc-aes-geode",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_init = fallback_init_blk,
|
||||
.cra_exit = fallback_exit_blk,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_alignmask = 15,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_KEY_LENGTH,
|
||||
.max_keysize = AES_KEY_LENGTH,
|
||||
.setkey = geode_setkey,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = geode_setkey_blk,
|
||||
.encrypt = geode_cbc_encrypt,
|
||||
.decrypt = geode_cbc_decrypt,
|
||||
.ivsize = AES_IV_LENGTH,
|
||||
@ -307,6 +423,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
if (unlikely(op->keylen != AES_KEYSIZE_128))
|
||||
return fallback_blk_dec(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
@ -334,6 +453,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk walk;
|
||||
int err, ret;
|
||||
|
||||
if (unlikely(op->keylen != AES_KEYSIZE_128))
|
||||
return fallback_blk_enc(desc, dst, src, nbytes);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
@ -353,28 +475,31 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
|
||||
}
|
||||
|
||||
static struct crypto_alg geode_ecb_alg = {
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-geode-128",
|
||||
.cra_name = "ecb(aes)",
|
||||
.cra_driver_name = "ecb-aes-geode",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_init = fallback_init_blk,
|
||||
.cra_exit = fallback_exit_blk,
|
||||
.cra_blocksize = AES_MIN_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct geode_aes_op),
|
||||
.cra_alignmask = 15,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_KEY_LENGTH,
|
||||
.max_keysize = AES_KEY_LENGTH,
|
||||
.setkey = geode_setkey,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
.setkey = geode_setkey_blk,
|
||||
.encrypt = geode_ecb_encrypt,
|
||||
.decrypt = geode_ecb_decrypt,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static void
|
||||
static void __devexit
|
||||
geode_aes_remove(struct pci_dev *dev)
|
||||
{
|
||||
crypto_unregister_alg(&geode_alg);
|
||||
@ -389,7 +514,7 @@ geode_aes_remove(struct pci_dev *dev)
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
static int __devinit
|
||||
geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
{
|
||||
int ret;
|
||||
@ -397,7 +522,7 @@ geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
if ((ret = pci_enable_device(dev)))
|
||||
return ret;
|
||||
|
||||
if ((ret = pci_request_regions(dev, "geode-aes-128")))
|
||||
if ((ret = pci_request_regions(dev, "geode-aes")))
|
||||
goto eenable;
|
||||
|
||||
_iobase = pci_iomap(dev, 0, 0);
|
||||
@ -472,7 +597,6 @@ geode_aes_exit(void)
|
||||
MODULE_AUTHOR("Advanced Micro Devices, Inc.");
|
||||
MODULE_DESCRIPTION("Geode LX Hardware AES driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("aes");
|
||||
|
||||
module_init(geode_aes_init);
|
||||
module_exit(geode_aes_exit);
|
||||
|
@ -9,9 +9,9 @@
|
||||
#ifndef _GEODE_AES_H_
|
||||
#define _GEODE_AES_H_
|
||||
|
||||
#define AES_KEY_LENGTH 16
|
||||
/* driver logic flags */
|
||||
#define AES_IV_LENGTH 16
|
||||
|
||||
#define AES_KEY_LENGTH 16
|
||||
#define AES_MIN_BLOCK_SIZE 16
|
||||
|
||||
#define AES_MODE_ECB 0
|
||||
@ -22,6 +22,38 @@
|
||||
|
||||
#define AES_FLAGS_HIDDENKEY (1 << 0)
|
||||
|
||||
/* Register definitions */
|
||||
|
||||
#define AES_CTRLA_REG 0x0000
|
||||
|
||||
#define AES_CTRL_START 0x01
|
||||
#define AES_CTRL_DECRYPT 0x00
|
||||
#define AES_CTRL_ENCRYPT 0x02
|
||||
#define AES_CTRL_WRKEY 0x04
|
||||
#define AES_CTRL_DCA 0x08
|
||||
#define AES_CTRL_SCA 0x10
|
||||
#define AES_CTRL_CBC 0x20
|
||||
|
||||
#define AES_INTR_REG 0x0008
|
||||
|
||||
#define AES_INTRA_PENDING (1 << 16)
|
||||
#define AES_INTRB_PENDING (1 << 17)
|
||||
|
||||
#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING)
|
||||
#define AES_INTR_MASK 0x07
|
||||
|
||||
#define AES_SOURCEA_REG 0x0010
|
||||
#define AES_DSTA_REG 0x0014
|
||||
#define AES_LENA_REG 0x0018
|
||||
#define AES_WRITEKEY0_REG 0x0030
|
||||
#define AES_WRITEIV0_REG 0x0040
|
||||
|
||||
/* A very large counter that is used to gracefully bail out of an
|
||||
* operation in case of trouble
|
||||
*/
|
||||
|
||||
#define AES_OP_TIMEOUT 0x50000
|
||||
|
||||
struct geode_aes_op {
|
||||
|
||||
void *src;
|
||||
@ -33,7 +65,13 @@ struct geode_aes_op {
|
||||
int len;
|
||||
|
||||
u8 key[AES_KEY_LENGTH];
|
||||
u8 iv[AES_IV_LENGTH];
|
||||
u8 *iv;
|
||||
|
||||
union {
|
||||
struct crypto_blkcipher *blk;
|
||||
struct crypto_cipher *cip;
|
||||
} fallback;
|
||||
u32 keylen;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
2838
drivers/crypto/hifn_795x.c
Normal file
2838
drivers/crypto/hifn_795x.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -44,6 +44,7 @@
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
@ -53,9 +54,6 @@
|
||||
#include <asm/byteorder.h>
|
||||
#include "padlock.h"
|
||||
|
||||
#define AES_MIN_KEY_SIZE 16 /* in uint8_t units */
|
||||
#define AES_MAX_KEY_SIZE 32 /* ditto */
|
||||
#define AES_BLOCK_SIZE 16 /* ditto */
|
||||
#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
|
||||
#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
|
||||
|
||||
@ -419,6 +417,11 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
/* ====== Encryption/decryption routines ====== */
|
||||
|
||||
/* These are the real call to PadLock. */
|
||||
static inline void padlock_reset_key(void)
|
||||
{
|
||||
asm volatile ("pushfl; popfl");
|
||||
}
|
||||
|
||||
static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
|
||||
void *control_word)
|
||||
{
|
||||
@ -439,8 +442,6 @@ static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
|
||||
static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
|
||||
struct cword *cword)
|
||||
{
|
||||
asm volatile ("pushfl; popfl");
|
||||
|
||||
/* padlock_xcrypt requires at least two blocks of data. */
|
||||
if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
|
||||
(PAGE_SIZE - 1)))) {
|
||||
@ -459,7 +460,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
|
||||
return;
|
||||
}
|
||||
|
||||
asm volatile ("pushfl; popfl"); /* enforce key reload. */
|
||||
asm volatile ("test $1, %%cl;"
|
||||
"je 1f;"
|
||||
"lea -1(%%ecx), %%eax;"
|
||||
@ -476,8 +476,6 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
|
||||
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
|
||||
u8 *iv, void *control_word, u32 count)
|
||||
{
|
||||
/* Enforce key reload. */
|
||||
asm volatile ("pushfl; popfl");
|
||||
/* rep xcryptcbc */
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
|
||||
: "+S" (input), "+D" (output), "+a" (iv)
|
||||
@ -488,12 +486,14 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct aes_ctx *ctx = aes_ctx(tfm);
|
||||
padlock_reset_key();
|
||||
aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
|
||||
}
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
struct aes_ctx *ctx = aes_ctx(tfm);
|
||||
padlock_reset_key();
|
||||
aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
|
||||
}
|
||||
|
||||
@ -526,6 +526,8 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
padlock_reset_key();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
@ -548,6 +550,8 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
padlock_reset_key();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
@ -592,6 +596,8 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
padlock_reset_key();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
@ -616,6 +622,8 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
padlock_reset_key();
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
|
105
include/crypto/aead.h
Normal file
105
include/crypto/aead.h
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* AEAD: Authenticated Encryption with Associated Data
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_AEAD_H
|
||||
#define _CRYPTO_AEAD_H
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* struct aead_givcrypt_request - AEAD request with IV generation
|
||||
* @seq: Sequence number for IV generation
|
||||
* @giv: Space for generated IV
|
||||
* @areq: The AEAD request itself
|
||||
*/
|
||||
struct aead_givcrypt_request {
|
||||
u64 seq;
|
||||
u8 *giv;
|
||||
|
||||
struct aead_request areq;
|
||||
};
|
||||
|
||||
static inline struct crypto_aead *aead_givcrypt_reqtfm(
|
||||
struct aead_givcrypt_request *req)
|
||||
{
|
||||
return crypto_aead_reqtfm(&req->areq);
|
||||
}
|
||||
|
||||
static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
|
||||
return crt->givencrypt(req);
|
||||
};
|
||||
|
||||
static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
|
||||
{
|
||||
struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
|
||||
return crt->givdecrypt(req);
|
||||
};
|
||||
|
||||
static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
|
||||
struct crypto_aead *tfm)
|
||||
{
|
||||
req->areq.base.tfm = crypto_aead_tfm(tfm);
|
||||
}
|
||||
|
||||
static inline struct aead_givcrypt_request *aead_givcrypt_alloc(
|
||||
struct crypto_aead *tfm, gfp_t gfp)
|
||||
{
|
||||
struct aead_givcrypt_request *req;
|
||||
|
||||
req = kmalloc(sizeof(struct aead_givcrypt_request) +
|
||||
crypto_aead_reqsize(tfm), gfp);
|
||||
|
||||
if (likely(req))
|
||||
aead_givcrypt_set_tfm(req, tfm);
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
|
||||
{
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
static inline void aead_givcrypt_set_callback(
|
||||
struct aead_givcrypt_request *req, u32 flags,
|
||||
crypto_completion_t complete, void *data)
|
||||
{
|
||||
aead_request_set_callback(&req->areq, flags, complete, data);
|
||||
}
|
||||
|
||||
static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
|
||||
struct scatterlist *src,
|
||||
struct scatterlist *dst,
|
||||
unsigned int nbytes, void *iv)
|
||||
{
|
||||
aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
|
||||
}
|
||||
|
||||
static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
|
||||
struct scatterlist *assoc,
|
||||
unsigned int assoclen)
|
||||
{
|
||||
aead_request_set_assoc(&req->areq, assoc, assoclen);
|
||||
}
|
||||
|
||||
static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
|
||||
u8 *giv, u64 seq)
|
||||
{
|
||||
req->giv = giv;
|
||||
req->seq = seq;
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_AEAD_H */
|
31
include/crypto/aes.h
Normal file
31
include/crypto/aes.h
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Common values for AES algorithms
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_AES_H
|
||||
#define _CRYPTO_AES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
|
||||
#define AES_MIN_KEY_SIZE 16
|
||||
#define AES_MAX_KEY_SIZE 32
|
||||
#define AES_KEYSIZE_128 16
|
||||
#define AES_KEYSIZE_192 24
|
||||
#define AES_KEYSIZE_256 32
|
||||
#define AES_BLOCK_SIZE 16
|
||||
|
||||
struct crypto_aes_ctx {
|
||||
u32 key_length;
|
||||
u32 key_enc[60];
|
||||
u32 key_dec[60];
|
||||
};
|
||||
|
||||
extern u32 crypto_ft_tab[4][256];
|
||||
extern u32 crypto_fl_tab[4][256];
|
||||
extern u32 crypto_it_tab[4][256];
|
||||
extern u32 crypto_il_tab[4][256];
|
||||
|
||||
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len);
|
||||
#endif
|
@ -111,8 +111,15 @@ void crypto_drop_spawn(struct crypto_spawn *spawn);
|
||||
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
|
||||
u32 mask);
|
||||
|
||||
static inline void crypto_set_spawn(struct crypto_spawn *spawn,
|
||||
struct crypto_instance *inst)
|
||||
{
|
||||
spawn->inst = inst;
|
||||
}
|
||||
|
||||
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
|
||||
int crypto_check_attr_type(struct rtattr **tb, u32 type);
|
||||
const char *crypto_attr_alg_name(struct rtattr *rta);
|
||||
struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask);
|
||||
int crypto_attr_u32(struct rtattr *rta, u32 *num);
|
||||
struct crypto_instance *crypto_alloc_instance(const char *name,
|
||||
@ -124,6 +131,10 @@ int crypto_enqueue_request(struct crypto_queue *queue,
|
||||
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
|
||||
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
|
||||
|
||||
/* These functions require the input/output to be aligned as u32. */
|
||||
void crypto_inc(u8 *a, unsigned int size);
|
||||
void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
|
||||
|
||||
int blkcipher_walk_done(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk, int err);
|
||||
int blkcipher_walk_virt(struct blkcipher_desc *desc,
|
||||
@ -187,20 +198,11 @@ static inline struct crypto_instance *crypto_aead_alg_instance(
|
||||
return crypto_tfm_alg_instance(&aead->base);
|
||||
}
|
||||
|
||||
static inline struct crypto_ablkcipher *crypto_spawn_ablkcipher(
|
||||
struct crypto_spawn *spawn)
|
||||
{
|
||||
u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
u32 mask = CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
return __crypto_ablkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
|
||||
}
|
||||
|
||||
static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
|
||||
struct crypto_spawn *spawn)
|
||||
{
|
||||
u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
u32 mask = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC;
|
||||
u32 mask = CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
|
||||
}
|
||||
@ -303,5 +305,14 @@ static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
|
||||
return crypto_attr_alg(tb[1], type, mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
|
||||
* Otherwise returns zero.
|
||||
*/
|
||||
static inline int crypto_requires_sync(u32 type, u32 mask)
|
||||
{
|
||||
return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_ALGAPI_H */
|
||||
|
||||
|
27
include/crypto/authenc.h
Normal file
27
include/crypto/authenc.h
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Authenc: Simple AEAD wrapper for IPsec
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
#ifndef _CRYPTO_AUTHENC_H
|
||||
#define _CRYPTO_AUTHENC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
enum {
|
||||
CRYPTO_AUTHENC_KEYA_UNSPEC,
|
||||
CRYPTO_AUTHENC_KEYA_PARAM,
|
||||
};
|
||||
|
||||
struct crypto_authenc_key_param {
|
||||
__be32 enckeylen;
|
||||
};
|
||||
|
||||
#endif /* _CRYPTO_AUTHENC_H */
|
||||
|
20
include/crypto/ctr.h
Normal file
20
include/crypto/ctr.h
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
* CTR: Counter mode
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_CTR_H
|
||||
#define _CRYPTO_CTR_H
|
||||
|
||||
#define CTR_RFC3686_NONCE_SIZE 4
|
||||
#define CTR_RFC3686_IV_SIZE 8
|
||||
#define CTR_RFC3686_BLOCK_SIZE 16
|
||||
|
||||
#endif /* _CRYPTO_CTR_H */
|
19
include/crypto/des.h
Normal file
19
include/crypto/des.h
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
* DES & Triple DES EDE Cipher Algorithms.
|
||||
*/
|
||||
|
||||
#ifndef __CRYPTO_DES_H
|
||||
#define __CRYPTO_DES_H
|
||||
|
||||
#define DES_KEY_SIZE 8
|
||||
#define DES_EXPKEY_WORDS 32
|
||||
#define DES_BLOCK_SIZE 8
|
||||
|
||||
#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE)
|
||||
#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
|
||||
#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
|
||||
|
||||
|
||||
extern unsigned long des_ekey(u32 *pe, const u8 *k);
|
||||
|
||||
#endif /* __CRYPTO_DES_H */
|
80
include/crypto/internal/aead.h
Normal file
80
include/crypto/internal/aead.h
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* AEAD: Authenticated Encryption with Associated Data
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_INTERNAL_AEAD_H
|
||||
#define _CRYPTO_INTERNAL_AEAD_H
|
||||
|
||||
#include <crypto/aead.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct rtattr;
|
||||
|
||||
struct crypto_aead_spawn {
|
||||
struct crypto_spawn base;
|
||||
};
|
||||
|
||||
extern const struct crypto_type crypto_nivaead_type;
|
||||
|
||||
static inline void crypto_set_aead_spawn(
|
||||
struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
|
||||
{
|
||||
crypto_set_spawn(&spawn->base, inst);
|
||||
}
|
||||
|
||||
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
|
||||
u32 type, u32 mask);
|
||||
|
||||
static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
|
||||
{
|
||||
crypto_drop_spawn(&spawn->base);
|
||||
}
|
||||
|
||||
static inline struct crypto_alg *crypto_aead_spawn_alg(
|
||||
struct crypto_aead_spawn *spawn)
|
||||
{
|
||||
return spawn->base.alg;
|
||||
}
|
||||
|
||||
static inline struct crypto_aead *crypto_spawn_aead(
|
||||
struct crypto_aead_spawn *spawn)
|
||||
{
|
||||
return __crypto_aead_cast(
|
||||
crypto_spawn_tfm(&spawn->base, CRYPTO_ALG_TYPE_AEAD,
|
||||
CRYPTO_ALG_TYPE_MASK));
|
||||
}
|
||||
|
||||
struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type,
|
||||
u32 mask);
|
||||
void aead_geniv_free(struct crypto_instance *inst);
|
||||
int aead_geniv_init(struct crypto_tfm *tfm);
|
||||
void aead_geniv_exit(struct crypto_tfm *tfm);
|
||||
|
||||
static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
|
||||
{
|
||||
return crypto_aead_crt(geniv)->base;
|
||||
}
|
||||
|
||||
static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
|
||||
{
|
||||
return aead_request_ctx(&req->areq);
|
||||
}
|
||||
|
||||
static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
|
||||
int err)
|
||||
{
|
||||
aead_request_complete(&req->areq, err);
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_INTERNAL_AEAD_H */
|
||||
|
110
include/crypto/internal/skcipher.h
Normal file
110
include/crypto/internal/skcipher.h
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Symmetric key ciphers.
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_INTERNAL_SKCIPHER_H
|
||||
#define _CRYPTO_INTERNAL_SKCIPHER_H
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct rtattr;
|
||||
|
||||
struct crypto_skcipher_spawn {
|
||||
struct crypto_spawn base;
|
||||
};
|
||||
|
||||
extern const struct crypto_type crypto_givcipher_type;
|
||||
|
||||
static inline void crypto_set_skcipher_spawn(
|
||||
struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
|
||||
{
|
||||
crypto_set_spawn(&spawn->base, inst);
|
||||
}
|
||||
|
||||
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
|
||||
u32 type, u32 mask);
|
||||
|
||||
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
|
||||
{
|
||||
crypto_drop_spawn(&spawn->base);
|
||||
}
|
||||
|
||||
static inline struct crypto_alg *crypto_skcipher_spawn_alg(
|
||||
struct crypto_skcipher_spawn *spawn)
|
||||
{
|
||||
return spawn->base.alg;
|
||||
}
|
||||
|
||||
static inline struct crypto_ablkcipher *crypto_spawn_skcipher(
|
||||
struct crypto_skcipher_spawn *spawn)
|
||||
{
|
||||
return __crypto_ablkcipher_cast(
|
||||
crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0),
|
||||
crypto_skcipher_mask(0)));
|
||||
}
|
||||
|
||||
int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req);
|
||||
int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req);
|
||||
const char *crypto_default_geniv(const struct crypto_alg *alg);
|
||||
|
||||
struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
|
||||
struct rtattr **tb, u32 type,
|
||||
u32 mask);
|
||||
void skcipher_geniv_free(struct crypto_instance *inst);
|
||||
int skcipher_geniv_init(struct crypto_tfm *tfm);
|
||||
void skcipher_geniv_exit(struct crypto_tfm *tfm);
|
||||
|
||||
static inline struct crypto_ablkcipher *skcipher_geniv_cipher(
|
||||
struct crypto_ablkcipher *geniv)
|
||||
{
|
||||
return crypto_ablkcipher_crt(geniv)->base;
|
||||
}
|
||||
|
||||
static inline int skcipher_enqueue_givcrypt(
|
||||
struct crypto_queue *queue, struct skcipher_givcrypt_request *request)
|
||||
{
|
||||
return ablkcipher_enqueue_request(queue, &request->creq);
|
||||
}
|
||||
|
||||
static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
|
||||
struct crypto_queue *queue)
|
||||
{
|
||||
return container_of(ablkcipher_dequeue_request(queue),
|
||||
struct skcipher_givcrypt_request, creq);
|
||||
}
|
||||
|
||||
static inline void *skcipher_givcrypt_reqctx(
|
||||
struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
return ablkcipher_request_ctx(&req->creq);
|
||||
}
|
||||
|
||||
static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
|
||||
int err)
|
||||
{
|
||||
req->base.complete(&req->base, err);
|
||||
}
|
||||
|
||||
static inline void skcipher_givcrypt_complete(
|
||||
struct skcipher_givcrypt_request *req, int err)
|
||||
{
|
||||
ablkcipher_request_complete(&req->creq, err);
|
||||
}
|
||||
|
||||
static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
|
||||
{
|
||||
return req->base.flags;
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
|
||||
|
@ -1,9 +1,10 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
* Cryptographic scatter and gather helpers.
|
||||
*
|
||||
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
||||
* Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
|
||||
* Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
@ -15,14 +16,52 @@
|
||||
#ifndef _CRYPTO_SCATTERWALK_H
|
||||
#define _CRYPTO_SCATTERWALK_H
|
||||
|
||||
#include <asm/kmap_types.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include "internal.h"
|
||||
static inline enum km_type crypto_kmap_type(int out)
|
||||
{
|
||||
enum km_type type;
|
||||
|
||||
if (in_softirq())
|
||||
type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
|
||||
else
|
||||
type = out * (KM_USER1 - KM_USER0) + KM_USER0;
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
static inline void *crypto_kmap(struct page *page, int out)
|
||||
{
|
||||
return kmap_atomic(page, crypto_kmap_type(out));
|
||||
}
|
||||
|
||||
static inline void crypto_kunmap(void *vaddr, int out)
|
||||
{
|
||||
kunmap_atomic(vaddr, crypto_kmap_type(out));
|
||||
}
|
||||
|
||||
static inline void crypto_yield(u32 flags)
|
||||
{
|
||||
if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
|
||||
struct scatterlist *sg2)
|
||||
{
|
||||
sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
|
||||
}
|
||||
|
||||
static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
|
||||
{
|
||||
return (++sg)->length ? sg : (void *) sg_page(sg);
|
||||
return (++sg)->length ? sg : (void *)sg_page(sg);
|
||||
}
|
||||
|
||||
static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
|
@ -8,6 +8,9 @@
|
||||
#define SHA1_DIGEST_SIZE 20
|
||||
#define SHA1_BLOCK_SIZE 64
|
||||
|
||||
#define SHA224_DIGEST_SIZE 28
|
||||
#define SHA224_BLOCK_SIZE 64
|
||||
|
||||
#define SHA256_DIGEST_SIZE 32
|
||||
#define SHA256_BLOCK_SIZE 64
|
||||
|
||||
@ -23,6 +26,15 @@
|
||||
#define SHA1_H3 0x10325476UL
|
||||
#define SHA1_H4 0xc3d2e1f0UL
|
||||
|
||||
#define SHA224_H0 0xc1059ed8UL
|
||||
#define SHA224_H1 0x367cd507UL
|
||||
#define SHA224_H2 0x3070dd17UL
|
||||
#define SHA224_H3 0xf70e5939UL
|
||||
#define SHA224_H4 0xffc00b31UL
|
||||
#define SHA224_H5 0x68581511UL
|
||||
#define SHA224_H6 0x64f98fa7UL
|
||||
#define SHA224_H7 0xbefa4fa4UL
|
||||
|
||||
#define SHA256_H0 0x6a09e667UL
|
||||
#define SHA256_H1 0xbb67ae85UL
|
||||
#define SHA256_H2 0x3c6ef372UL
|
||||
|
110
include/crypto/skcipher.h
Normal file
110
include/crypto/skcipher.h
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Symmetric key ciphers.
|
||||
*
|
||||
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_SKCIPHER_H
|
||||
#define _CRYPTO_SKCIPHER_H
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/**
|
||||
* struct skcipher_givcrypt_request - Crypto request with IV generation
|
||||
* @seq: Sequence number for IV generation
|
||||
* @giv: Space for generated IV
|
||||
* @creq: The crypto request itself
|
||||
*/
|
||||
struct skcipher_givcrypt_request {
|
||||
u64 seq;
|
||||
u8 *giv;
|
||||
|
||||
struct ablkcipher_request creq;
|
||||
};
|
||||
|
||||
static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
|
||||
struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
return crypto_ablkcipher_reqtfm(&req->creq);
|
||||
}
|
||||
|
||||
static inline int crypto_skcipher_givencrypt(
|
||||
struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct ablkcipher_tfm *crt =
|
||||
crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
|
||||
return crt->givencrypt(req);
|
||||
};
|
||||
|
||||
static inline int crypto_skcipher_givdecrypt(
|
||||
struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
struct ablkcipher_tfm *crt =
|
||||
crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
|
||||
return crt->givdecrypt(req);
|
||||
};
|
||||
|
||||
static inline void skcipher_givcrypt_set_tfm(
|
||||
struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm)
|
||||
{
|
||||
req->creq.base.tfm = crypto_ablkcipher_tfm(tfm);
|
||||
}
|
||||
|
||||
static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast(
|
||||
struct crypto_async_request *req)
|
||||
{
|
||||
return container_of(ablkcipher_request_cast(req),
|
||||
struct skcipher_givcrypt_request, creq);
|
||||
}
|
||||
|
||||
static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc(
|
||||
struct crypto_ablkcipher *tfm, gfp_t gfp)
|
||||
{
|
||||
struct skcipher_givcrypt_request *req;
|
||||
|
||||
req = kmalloc(sizeof(struct skcipher_givcrypt_request) +
|
||||
crypto_ablkcipher_reqsize(tfm), gfp);
|
||||
|
||||
if (likely(req))
|
||||
skcipher_givcrypt_set_tfm(req, tfm);
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req)
|
||||
{
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
static inline void skcipher_givcrypt_set_callback(
|
||||
struct skcipher_givcrypt_request *req, u32 flags,
|
||||
crypto_completion_t complete, void *data)
|
||||
{
|
||||
ablkcipher_request_set_callback(&req->creq, flags, complete, data);
|
||||
}
|
||||
|
||||
static inline void skcipher_givcrypt_set_crypt(
|
||||
struct skcipher_givcrypt_request *req,
|
||||
struct scatterlist *src, struct scatterlist *dst,
|
||||
unsigned int nbytes, void *iv)
|
||||
{
|
||||
ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv);
|
||||
}
|
||||
|
||||
static inline void skcipher_givcrypt_set_giv(
|
||||
struct skcipher_givcrypt_request *req, u8 *giv, u64 seq)
|
||||
{
|
||||
req->giv = giv;
|
||||
req->seq = seq;
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_SKCIPHER_H */
|
||||
|
@ -33,10 +33,13 @@
|
||||
#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
|
||||
#define CRYPTO_ALG_TYPE_HASH 0x00000003
|
||||
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
|
||||
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000005
|
||||
#define CRYPTO_ALG_TYPE_AEAD 0x00000006
|
||||
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
|
||||
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
|
||||
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008
|
||||
#define CRYPTO_ALG_TYPE_AEAD 0x00000009
|
||||
|
||||
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
|
||||
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
|
||||
|
||||
#define CRYPTO_ALG_LARVAL 0x00000010
|
||||
#define CRYPTO_ALG_DEAD 0x00000020
|
||||
@ -49,6 +52,12 @@
|
||||
*/
|
||||
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
|
||||
|
||||
/*
|
||||
* This bit is set for symmetric key ciphers that have already been wrapped
|
||||
* with a generic IV generator to prevent them from being wrapped again.
|
||||
*/
|
||||
#define CRYPTO_ALG_GENIV 0x00000200
|
||||
|
||||
/*
|
||||
* Transform masks and values (for crt_flags).
|
||||
*/
|
||||
@ -81,13 +90,11 @@
|
||||
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
|
||||
#elif defined(ARCH_SLAB_MINALIGN)
|
||||
#define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN
|
||||
#else
|
||||
#define CRYPTO_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
#ifdef CRYPTO_MINALIGN
|
||||
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
|
||||
#else
|
||||
#define CRYPTO_MINALIGN_ATTR
|
||||
#endif
|
||||
|
||||
struct scatterlist;
|
||||
struct crypto_ablkcipher;
|
||||
@ -97,6 +104,8 @@ struct crypto_blkcipher;
|
||||
struct crypto_hash;
|
||||
struct crypto_tfm;
|
||||
struct crypto_type;
|
||||
struct aead_givcrypt_request;
|
||||
struct skcipher_givcrypt_request;
|
||||
|
||||
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
|
||||
|
||||
@ -176,6 +185,10 @@ struct ablkcipher_alg {
|
||||
unsigned int keylen);
|
||||
int (*encrypt)(struct ablkcipher_request *req);
|
||||
int (*decrypt)(struct ablkcipher_request *req);
|
||||
int (*givencrypt)(struct skcipher_givcrypt_request *req);
|
||||
int (*givdecrypt)(struct skcipher_givcrypt_request *req);
|
||||
|
||||
const char *geniv;
|
||||
|
||||
unsigned int min_keysize;
|
||||
unsigned int max_keysize;
|
||||
@ -185,11 +198,16 @@ struct ablkcipher_alg {
|
||||
struct aead_alg {
|
||||
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
|
||||
int (*encrypt)(struct aead_request *req);
|
||||
int (*decrypt)(struct aead_request *req);
|
||||
int (*givencrypt)(struct aead_givcrypt_request *req);
|
||||
int (*givdecrypt)(struct aead_givcrypt_request *req);
|
||||
|
||||
const char *geniv;
|
||||
|
||||
unsigned int ivsize;
|
||||
unsigned int authsize;
|
||||
unsigned int maxauthsize;
|
||||
};
|
||||
|
||||
struct blkcipher_alg {
|
||||
@ -202,6 +220,8 @@ struct blkcipher_alg {
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes);
|
||||
|
||||
const char *geniv;
|
||||
|
||||
unsigned int min_keysize;
|
||||
unsigned int max_keysize;
|
||||
unsigned int ivsize;
|
||||
@ -317,6 +337,11 @@ struct ablkcipher_tfm {
|
||||
unsigned int keylen);
|
||||
int (*encrypt)(struct ablkcipher_request *req);
|
||||
int (*decrypt)(struct ablkcipher_request *req);
|
||||
int (*givencrypt)(struct skcipher_givcrypt_request *req);
|
||||
int (*givdecrypt)(struct skcipher_givcrypt_request *req);
|
||||
|
||||
struct crypto_ablkcipher *base;
|
||||
|
||||
unsigned int ivsize;
|
||||
unsigned int reqsize;
|
||||
};
|
||||
@ -326,6 +351,11 @@ struct aead_tfm {
|
||||
unsigned int keylen);
|
||||
int (*encrypt)(struct aead_request *req);
|
||||
int (*decrypt)(struct aead_request *req);
|
||||
int (*givencrypt)(struct aead_givcrypt_request *req);
|
||||
int (*givdecrypt)(struct aead_givcrypt_request *req);
|
||||
|
||||
struct crypto_aead *base;
|
||||
|
||||
unsigned int ivsize;
|
||||
unsigned int authsize;
|
||||
unsigned int reqsize;
|
||||
@ -525,17 +555,23 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
|
||||
return (struct crypto_ablkcipher *)tfm;
|
||||
}
|
||||
|
||||
static inline struct crypto_ablkcipher *crypto_alloc_ablkcipher(
|
||||
const char *alg_name, u32 type, u32 mask)
|
||||
static inline u32 crypto_skcipher_type(u32 type)
|
||||
{
|
||||
type &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
return __crypto_ablkcipher_cast(
|
||||
crypto_alloc_base(alg_name, type, mask));
|
||||
return type;
|
||||
}
|
||||
|
||||
static inline u32 crypto_skcipher_mask(u32 mask)
|
||||
{
|
||||
mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
|
||||
mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
|
||||
return mask;
|
||||
}
|
||||
|
||||
struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
|
||||
u32 type, u32 mask);
|
||||
|
||||
static inline struct crypto_tfm *crypto_ablkcipher_tfm(
|
||||
struct crypto_ablkcipher *tfm)
|
||||
{
|
||||
@ -550,11 +586,8 @@ static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
|
||||
static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
type &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
return crypto_has_alg(alg_name, type, mask);
|
||||
return crypto_has_alg(alg_name, crypto_skcipher_type(type),
|
||||
crypto_skcipher_mask(mask));
|
||||
}
|
||||
|
||||
static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
|
||||
@ -601,7 +634,9 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
|
||||
static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto_ablkcipher_crt(tfm)->setkey(tfm, key, keylen);
|
||||
struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
|
||||
|
||||
return crt->setkey(crt->base, key, keylen);
|
||||
}
|
||||
|
||||
static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
|
||||
@ -633,7 +668,7 @@ static inline unsigned int crypto_ablkcipher_reqsize(
|
||||
static inline void ablkcipher_request_set_tfm(
|
||||
struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
|
||||
{
|
||||
req->base.tfm = crypto_ablkcipher_tfm(tfm);
|
||||
req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
|
||||
}
|
||||
|
||||
static inline struct ablkcipher_request *ablkcipher_request_cast(
|
||||
@ -686,15 +721,7 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
|
||||
return (struct crypto_aead *)tfm;
|
||||
}
|
||||
|
||||
static inline struct crypto_aead *crypto_alloc_aead(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
type &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
type |= CRYPTO_ALG_TYPE_AEAD;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
return __crypto_aead_cast(crypto_alloc_base(alg_name, type, mask));
|
||||
}
|
||||
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
|
||||
|
||||
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
|
||||
{
|
||||
@ -749,9 +776,13 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
|
||||
static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return crypto_aead_crt(tfm)->setkey(tfm, key, keylen);
|
||||
struct aead_tfm *crt = crypto_aead_crt(tfm);
|
||||
|
||||
return crt->setkey(crt->base, key, keylen);
|
||||
}
|
||||
|
||||
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
|
||||
|
||||
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
|
||||
{
|
||||
return __crypto_aead_cast(req->base.tfm);
|
||||
@ -775,7 +806,7 @@ static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
|
||||
static inline void aead_request_set_tfm(struct aead_request *req,
|
||||
struct crypto_aead *tfm)
|
||||
{
|
||||
req->base.tfm = crypto_aead_tfm(tfm);
|
||||
req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
|
||||
}
|
||||
|
||||
static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
|
||||
@ -841,9 +872,9 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
|
||||
static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
|
||||
const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
|
||||
type &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
|
||||
}
|
||||
@ -861,9 +892,9 @@ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
|
||||
|
||||
static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
|
||||
type &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
return crypto_has_alg(alg_name, type, mask);
|
||||
}
|
||||
@ -1081,6 +1112,7 @@ static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
type &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
mask &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
type |= CRYPTO_ALG_TYPE_HASH;
|
||||
mask |= CRYPTO_ALG_TYPE_HASH_MASK;
|
||||
|
||||
@ -1100,6 +1132,7 @@ static inline void crypto_free_hash(struct crypto_hash *tfm)
|
||||
static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
type &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
mask &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
type |= CRYPTO_ALG_TYPE_HASH;
|
||||
mask |= CRYPTO_ALG_TYPE_HASH_MASK;
|
||||
|
||||
|
@ -33,7 +33,7 @@ struct hwrng {
|
||||
const char *name;
|
||||
int (*init)(struct hwrng *rng);
|
||||
void (*cleanup)(struct hwrng *rng);
|
||||
int (*data_present)(struct hwrng *rng);
|
||||
int (*data_present)(struct hwrng *rng, int wait);
|
||||
int (*data_read)(struct hwrng *rng, u32 *data);
|
||||
unsigned long priv;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user