forked from Minki/linux
arm/crypto: Add optimized AES and SHA1 routines
Add assembler versions of AES and SHA1 for ARM platforms. This has provided up to a 50% improvement in IPsec/TCP throughout for tunnels using AES128/SHA1. Platform CPU SPeed Endian Before (bps) After (bps) Improvement IXP425 533 MHz big 11217042 15566294 ~38% KS8695 166 MHz little 3828549 5795373 ~51% Signed-off-by: David McCullough <ucdevel@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
956c203c5e
commit
f0be44f4fb
@ -255,6 +255,7 @@ core-$(CONFIG_VFP) += arch/arm/vfp/
|
||||
# If we have a machine-specific directory, then include it in the build.
|
||||
core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
|
||||
core-y += arch/arm/net/
|
||||
core-y += arch/arm/crypto/
|
||||
core-y += $(machdirs) $(platdirs)
|
||||
|
||||
drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
|
||||
|
9
arch/arm/crypto/Makefile
Normal file
9
arch/arm/crypto/Makefile
Normal file
@ -0,0 +1,9 @@
|
||||
#
|
||||
# Arch-specific CryptoAPI modules.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
|
||||
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
|
||||
|
||||
aes-arm-y := aes-armv4.o aes_glue.o
|
||||
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
|
1112
arch/arm/crypto/aes-armv4.S
Normal file
1112
arch/arm/crypto/aes-armv4.S
Normal file
File diff suppressed because it is too large
Load Diff
108
arch/arm/crypto/aes_glue.c
Normal file
108
arch/arm/crypto/aes_glue.c
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Glue Code for the asm optimized version of the AES Cipher Algorithm
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/aes.h>
|
||||
|
||||
#define AES_MAXNR 14
|
||||
|
||||
typedef struct {
|
||||
unsigned int rd_key[4 *(AES_MAXNR + 1)];
|
||||
int rounds;
|
||||
} AES_KEY;
|
||||
|
||||
struct AES_CTX {
|
||||
AES_KEY enc_key;
|
||||
AES_KEY dec_key;
|
||||
};
|
||||
|
||||
asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
|
||||
asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
|
||||
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
|
||||
|
||||
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
|
||||
AES_encrypt(src, dst, &ctx->enc_key);
|
||||
}
|
||||
|
||||
static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
|
||||
AES_decrypt(src, dst, &ctx->dec_key);
|
||||
}
|
||||
|
||||
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct AES_CTX *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
switch (key_len) {
|
||||
case AES_KEYSIZE_128:
|
||||
key_len = 128;
|
||||
break;
|
||||
case AES_KEYSIZE_192:
|
||||
key_len = 192;
|
||||
break;
|
||||
case AES_KEYSIZE_256:
|
||||
key_len = 256;
|
||||
break;
|
||||
default:
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (private_AES_set_encrypt_key(in_key, key_len, &ctx->enc_key) == -1) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
/* private_AES_set_decrypt_key expects an encryption key as input */
|
||||
ctx->dec_key = ctx->enc_key;
|
||||
if (private_AES_set_decrypt_key(in_key, key_len, &ctx->dec_key) == -1) {
|
||||
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg aes_alg = {
|
||||
.cra_name = "aes",
|
||||
.cra_driver_name = "aes-asm",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = AES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct AES_CTX),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
||||
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
||||
.cia_setkey = aes_set_key,
|
||||
.cia_encrypt = aes_encrypt,
|
||||
.cia_decrypt = aes_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init aes_init(void)
|
||||
{
|
||||
return crypto_register_alg(&aes_alg);
|
||||
}
|
||||
|
||||
static void __exit aes_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&aes_alg);
|
||||
}
|
||||
|
||||
module_init(aes_init);
|
||||
module_exit(aes_fini);
|
||||
|
||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("aes");
|
||||
MODULE_ALIAS("aes-asm");
|
||||
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
|
503
arch/arm/crypto/sha1-armv4-large.S
Normal file
503
arch/arm/crypto/sha1-armv4-large.S
Normal file
@ -0,0 +1,503 @@
|
||||
#define __ARM_ARCH__ __LINUX_ARM_ARCH__
|
||||
@ ====================================================================
|
||||
@ Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
|
||||
@ project. The module is, however, dual licensed under OpenSSL and
|
||||
@ CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
@ details see http://www.openssl.org/~appro/cryptogams/.
|
||||
@ ====================================================================
|
||||
|
||||
@ sha1_block procedure for ARMv4.
|
||||
@
|
||||
@ January 2007.
|
||||
|
||||
@ Size/performance trade-off
|
||||
@ ====================================================================
|
||||
@ impl size in bytes comp cycles[*] measured performance
|
||||
@ ====================================================================
|
||||
@ thumb 304 3212 4420
|
||||
@ armv4-small 392/+29% 1958/+64% 2250/+96%
|
||||
@ armv4-compact 740/+89% 1552/+26% 1840/+22%
|
||||
@ armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
|
||||
@ full unroll ~5100/+260% ~1260/+4% ~1300/+5%
|
||||
@ ====================================================================
|
||||
@ thumb = same as 'small' but in Thumb instructions[**] and
|
||||
@ with recurring code in two private functions;
|
||||
@ small = detached Xload/update, loops are folded;
|
||||
@ compact = detached Xload/update, 5x unroll;
|
||||
@ large = interleaved Xload/update, 5x unroll;
|
||||
@ full unroll = interleaved Xload/update, full unroll, estimated[!];
|
||||
@
|
||||
@ [*] Manually counted instructions in "grand" loop body. Measured
|
||||
@ performance is affected by prologue and epilogue overhead,
|
||||
@ i-cache availability, branch penalties, etc.
|
||||
@ [**] While each Thumb instruction is twice smaller, they are not as
|
||||
@ diverse as ARM ones: e.g., there are only two arithmetic
|
||||
@ instructions with 3 arguments, no [fixed] rotate, addressing
|
||||
@ modes are limited. As result it takes more instructions to do
|
||||
@ the same job in Thumb, therefore the code is never twice as
|
||||
@ small and always slower.
|
||||
@ [***] which is also ~35% better than compiler generated code. Dual-
|
||||
@ issue Cortex A8 core was measured to process input block in
|
||||
@ ~990 cycles.
|
||||
|
||||
@ August 2010.
|
||||
@
|
||||
@ Rescheduling for dual-issue pipeline resulted in 13% improvement on
|
||||
@ Cortex A8 core and in absolute terms ~870 cycles per input block
|
||||
@ [or 13.6 cycles per byte].
|
||||
|
||||
@ February 2011.
|
||||
@
|
||||
@ Profiler-assisted and platform-specific optimization resulted in 10%
|
||||
@ improvement on Cortex A8 core and 12.2 cycles per byte.
|
||||
|
||||
.text
|
||||
|
||||
.global sha1_block_data_order
|
||||
.type sha1_block_data_order,%function
|
||||
|
||||
.align 2
|
||||
sha1_block_data_order:
|
||||
stmdb sp!,{r4-r12,lr}
|
||||
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
|
||||
ldmia r0,{r3,r4,r5,r6,r7}
|
||||
.Lloop:
|
||||
ldr r8,.LK_00_19
|
||||
mov r14,sp
|
||||
sub sp,sp,#15*4
|
||||
mov r5,r5,ror#30
|
||||
mov r6,r6,ror#30
|
||||
mov r7,r7,ror#30 @ [6]
|
||||
.L_00_15:
|
||||
#if __ARM_ARCH__<7
|
||||
ldrb r10,[r1,#2]
|
||||
ldrb r9,[r1,#3]
|
||||
ldrb r11,[r1,#1]
|
||||
add r7,r8,r7,ror#2 @ E+=K_00_19
|
||||
ldrb r12,[r1],#4
|
||||
orr r9,r9,r10,lsl#8
|
||||
eor r10,r5,r6 @ F_xx_xx
|
||||
orr r9,r9,r11,lsl#16
|
||||
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
|
||||
orr r9,r9,r12,lsl#24
|
||||
#else
|
||||
ldr r9,[r1],#4 @ handles unaligned
|
||||
add r7,r8,r7,ror#2 @ E+=K_00_19
|
||||
eor r10,r5,r6 @ F_xx_xx
|
||||
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
|
||||
#ifdef __ARMEL__
|
||||
rev r9,r9 @ byte swap
|
||||
#endif
|
||||
#endif
|
||||
and r10,r4,r10,ror#2
|
||||
add r7,r7,r9 @ E+=X[i]
|
||||
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
|
||||
str r9,[r14,#-4]!
|
||||
add r7,r7,r10 @ E+=F_00_19(B,C,D)
|
||||
#if __ARM_ARCH__<7
|
||||
ldrb r10,[r1,#2]
|
||||
ldrb r9,[r1,#3]
|
||||
ldrb r11,[r1,#1]
|
||||
add r6,r8,r6,ror#2 @ E+=K_00_19
|
||||
ldrb r12,[r1],#4
|
||||
orr r9,r9,r10,lsl#8
|
||||
eor r10,r4,r5 @ F_xx_xx
|
||||
orr r9,r9,r11,lsl#16
|
||||
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
|
||||
orr r9,r9,r12,lsl#24
|
||||
#else
|
||||
ldr r9,[r1],#4 @ handles unaligned
|
||||
add r6,r8,r6,ror#2 @ E+=K_00_19
|
||||
eor r10,r4,r5 @ F_xx_xx
|
||||
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
|
||||
#ifdef __ARMEL__
|
||||
rev r9,r9 @ byte swap
|
||||
#endif
|
||||
#endif
|
||||
and r10,r3,r10,ror#2
|
||||
add r6,r6,r9 @ E+=X[i]
|
||||
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
|
||||
str r9,[r14,#-4]!
|
||||
add r6,r6,r10 @ E+=F_00_19(B,C,D)
|
||||
#if __ARM_ARCH__<7
|
||||
ldrb r10,[r1,#2]
|
||||
ldrb r9,[r1,#3]
|
||||
ldrb r11,[r1,#1]
|
||||
add r5,r8,r5,ror#2 @ E+=K_00_19
|
||||
ldrb r12,[r1],#4
|
||||
orr r9,r9,r10,lsl#8
|
||||
eor r10,r3,r4 @ F_xx_xx
|
||||
orr r9,r9,r11,lsl#16
|
||||
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
|
||||
orr r9,r9,r12,lsl#24
|
||||
#else
|
||||
ldr r9,[r1],#4 @ handles unaligned
|
||||
add r5,r8,r5,ror#2 @ E+=K_00_19
|
||||
eor r10,r3,r4 @ F_xx_xx
|
||||
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
|
||||
#ifdef __ARMEL__
|
||||
rev r9,r9 @ byte swap
|
||||
#endif
|
||||
#endif
|
||||
and r10,r7,r10,ror#2
|
||||
add r5,r5,r9 @ E+=X[i]
|
||||
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
|
||||
str r9,[r14,#-4]!
|
||||
add r5,r5,r10 @ E+=F_00_19(B,C,D)
|
||||
#if __ARM_ARCH__<7
|
||||
ldrb r10,[r1,#2]
|
||||
ldrb r9,[r1,#3]
|
||||
ldrb r11,[r1,#1]
|
||||
add r4,r8,r4,ror#2 @ E+=K_00_19
|
||||
ldrb r12,[r1],#4
|
||||
orr r9,r9,r10,lsl#8
|
||||
eor r10,r7,r3 @ F_xx_xx
|
||||
orr r9,r9,r11,lsl#16
|
||||
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
|
||||
orr r9,r9,r12,lsl#24
|
||||
#else
|
||||
ldr r9,[r1],#4 @ handles unaligned
|
||||
add r4,r8,r4,ror#2 @ E+=K_00_19
|
||||
eor r10,r7,r3 @ F_xx_xx
|
||||
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
|
||||
#ifdef __ARMEL__
|
||||
rev r9,r9 @ byte swap
|
||||
#endif
|
||||
#endif
|
||||
and r10,r6,r10,ror#2
|
||||
add r4,r4,r9 @ E+=X[i]
|
||||
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
|
||||
str r9,[r14,#-4]!
|
||||
add r4,r4,r10 @ E+=F_00_19(B,C,D)
|
||||
#if __ARM_ARCH__<7
|
||||
ldrb r10,[r1,#2]
|
||||
ldrb r9,[r1,#3]
|
||||
ldrb r11,[r1,#1]
|
||||
add r3,r8,r3,ror#2 @ E+=K_00_19
|
||||
ldrb r12,[r1],#4
|
||||
orr r9,r9,r10,lsl#8
|
||||
eor r10,r6,r7 @ F_xx_xx
|
||||
orr r9,r9,r11,lsl#16
|
||||
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
|
||||
orr r9,r9,r12,lsl#24
|
||||
#else
|
||||
ldr r9,[r1],#4 @ handles unaligned
|
||||
add r3,r8,r3,ror#2 @ E+=K_00_19
|
||||
eor r10,r6,r7 @ F_xx_xx
|
||||
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
|
||||
#ifdef __ARMEL__
|
||||
rev r9,r9 @ byte swap
|
||||
#endif
|
||||
#endif
|
||||
and r10,r5,r10,ror#2
|
||||
add r3,r3,r9 @ E+=X[i]
|
||||
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
|
||||
str r9,[r14,#-4]!
|
||||
add r3,r3,r10 @ E+=F_00_19(B,C,D)
|
||||
teq r14,sp
|
||||
bne .L_00_15 @ [((11+4)*5+2)*3]
|
||||
#if __ARM_ARCH__<7
|
||||
ldrb r10,[r1,#2]
|
||||
ldrb r9,[r1,#3]
|
||||
ldrb r11,[r1,#1]
|
||||
add r7,r8,r7,ror#2 @ E+=K_00_19
|
||||
ldrb r12,[r1],#4
|
||||
orr r9,r9,r10,lsl#8
|
||||
eor r10,r5,r6 @ F_xx_xx
|
||||
orr r9,r9,r11,lsl#16
|
||||
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
|
||||
orr r9,r9,r12,lsl#24
|
||||
#else
|
||||
ldr r9,[r1],#4 @ handles unaligned
|
||||
add r7,r8,r7,ror#2 @ E+=K_00_19
|
||||
eor r10,r5,r6 @ F_xx_xx
|
||||
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
|
||||
#ifdef __ARMEL__
|
||||
rev r9,r9 @ byte swap
|
||||
#endif
|
||||
#endif
|
||||
and r10,r4,r10,ror#2
|
||||
add r7,r7,r9 @ E+=X[i]
|
||||
eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
|
||||
str r9,[r14,#-4]!
|
||||
add r7,r7,r10 @ E+=F_00_19(B,C,D)
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r6,r8,r6,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r4,r5 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r3,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r6,r6,r9 @ E+=X[i]
|
||||
eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
|
||||
add r6,r6,r10 @ E+=F_00_19(B,C,D)
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r5,r8,r5,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r3,r4 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r7,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r5,r5,r9 @ E+=X[i]
|
||||
eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
|
||||
add r5,r5,r10 @ E+=F_00_19(B,C,D)
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r4,r8,r4,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r7,r3 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r6,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r4,r4,r9 @ E+=X[i]
|
||||
eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
|
||||
add r4,r4,r10 @ E+=F_00_19(B,C,D)
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r3,r8,r3,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r6,r7 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r5,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r3,r3,r9 @ E+=X[i]
|
||||
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
|
||||
add r3,r3,r10 @ E+=F_00_19(B,C,D)
|
||||
|
||||
ldr r8,.LK_20_39 @ [+15+16*4]
|
||||
sub sp,sp,#25*4
|
||||
cmn sp,#0 @ [+3], clear carry to denote 20_39
|
||||
.L_20_39_or_60_79:
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r7,r8,r7,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r5,r6 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
eor r10,r4,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r7,r7,r9 @ E+=X[i]
|
||||
add r7,r7,r10 @ E+=F_20_39(B,C,D)
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r6,r8,r6,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r4,r5 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
eor r10,r3,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r6,r6,r9 @ E+=X[i]
|
||||
add r6,r6,r10 @ E+=F_20_39(B,C,D)
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r5,r8,r5,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r3,r4 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
eor r10,r7,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r5,r5,r9 @ E+=X[i]
|
||||
add r5,r5,r10 @ E+=F_20_39(B,C,D)
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r4,r8,r4,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r7,r3 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
eor r10,r6,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r4,r4,r9 @ E+=X[i]
|
||||
add r4,r4,r10 @ E+=F_20_39(B,C,D)
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r3,r8,r3,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r6,r7 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
eor r10,r5,r10,ror#2 @ F_xx_xx
|
||||
@ F_xx_xx
|
||||
add r3,r3,r9 @ E+=X[i]
|
||||
add r3,r3,r10 @ E+=F_20_39(B,C,D)
|
||||
teq r14,sp @ preserve carry
|
||||
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
|
||||
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
|
||||
|
||||
ldr r8,.LK_40_59
|
||||
sub sp,sp,#20*4 @ [+2]
|
||||
.L_40_59:
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r7,r8,r7,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r5,r6 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r7,r7,r3,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r4,r10,ror#2 @ F_xx_xx
|
||||
and r11,r5,r6 @ F_xx_xx
|
||||
add r7,r7,r9 @ E+=X[i]
|
||||
add r7,r7,r10 @ E+=F_40_59(B,C,D)
|
||||
add r7,r7,r11,ror#2
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r6,r8,r6,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r4,r5 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r6,r6,r7,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r3,r10,ror#2 @ F_xx_xx
|
||||
and r11,r4,r5 @ F_xx_xx
|
||||
add r6,r6,r9 @ E+=X[i]
|
||||
add r6,r6,r10 @ E+=F_40_59(B,C,D)
|
||||
add r6,r6,r11,ror#2
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r5,r8,r5,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r3,r4 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r5,r5,r6,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r7,r10,ror#2 @ F_xx_xx
|
||||
and r11,r3,r4 @ F_xx_xx
|
||||
add r5,r5,r9 @ E+=X[i]
|
||||
add r5,r5,r10 @ E+=F_40_59(B,C,D)
|
||||
add r5,r5,r11,ror#2
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r4,r8,r4,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r7,r3 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r4,r4,r5,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r6,r10,ror#2 @ F_xx_xx
|
||||
and r11,r7,r3 @ F_xx_xx
|
||||
add r4,r4,r9 @ E+=X[i]
|
||||
add r4,r4,r10 @ E+=F_40_59(B,C,D)
|
||||
add r4,r4,r11,ror#2
|
||||
ldr r9,[r14,#15*4]
|
||||
ldr r10,[r14,#13*4]
|
||||
ldr r11,[r14,#7*4]
|
||||
add r3,r8,r3,ror#2 @ E+=K_xx_xx
|
||||
ldr r12,[r14,#2*4]
|
||||
eor r9,r9,r10
|
||||
eor r11,r11,r12 @ 1 cycle stall
|
||||
eor r10,r6,r7 @ F_xx_xx
|
||||
mov r9,r9,ror#31
|
||||
add r3,r3,r4,ror#27 @ E+=ROR(A,27)
|
||||
eor r9,r9,r11,ror#31
|
||||
str r9,[r14,#-4]!
|
||||
and r10,r5,r10,ror#2 @ F_xx_xx
|
||||
and r11,r6,r7 @ F_xx_xx
|
||||
add r3,r3,r9 @ E+=X[i]
|
||||
add r3,r3,r10 @ E+=F_40_59(B,C,D)
|
||||
add r3,r3,r11,ror#2
|
||||
teq r14,sp
|
||||
bne .L_40_59 @ [+((12+5)*5+2)*4]
|
||||
|
||||
ldr r8,.LK_60_79
|
||||
sub sp,sp,#20*4
|
||||
cmp sp,#0 @ set carry to denote 60_79
|
||||
b .L_20_39_or_60_79 @ [+4], spare 300 bytes
|
||||
.L_done:
|
||||
add sp,sp,#80*4 @ "deallocate" stack frame
|
||||
ldmia r0,{r8,r9,r10,r11,r12}
|
||||
add r3,r8,r3
|
||||
add r4,r9,r4
|
||||
add r5,r10,r5,ror#2
|
||||
add r6,r11,r6,ror#2
|
||||
add r7,r12,r7,ror#2
|
||||
stmia r0,{r3,r4,r5,r6,r7}
|
||||
teq r1,r2
|
||||
bne .Lloop @ [+18], total 1307
|
||||
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4-r12,pc}
|
||||
#else
|
||||
ldmia sp!,{r4-r12,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.align 2
|
||||
.LK_00_19: .word 0x5a827999
|
||||
.LK_20_39: .word 0x6ed9eba1
|
||||
.LK_40_59: .word 0x8f1bbcdc
|
||||
.LK_60_79: .word 0xca62c1d6
|
||||
.size sha1_block_data_order,.-sha1_block_data_order
|
||||
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
|
||||
.align 2
|
179
arch/arm/crypto/sha1_glue.c
Normal file
179
arch/arm/crypto/sha1_glue.c
Normal file
@ -0,0 +1,179 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
* Glue code for the SHA1 Secure Hash Algorithm assembler implementation
|
||||
*
|
||||
* This file is based on sha1_generic.c and sha1_ssse3_glue.c
|
||||
*
|
||||
* Copyright (c) Alan Smithee.
|
||||
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
|
||||
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
|
||||
* Copyright (c) Mathias Krause <minipli@googlemail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
struct SHA1_CTX {
|
||||
uint32_t h0,h1,h2,h3,h4;
|
||||
u64 count;
|
||||
u8 data[SHA1_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
asmlinkage void sha1_block_data_order(struct SHA1_CTX *digest,
|
||||
const unsigned char *data, unsigned int rounds);
|
||||
|
||||
|
||||
static int sha1_init(struct shash_desc *desc)
|
||||
{
|
||||
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
|
||||
memset(sctx, 0, sizeof(*sctx));
|
||||
sctx->h0 = SHA1_H0;
|
||||
sctx->h1 = SHA1_H1;
|
||||
sctx->h2 = SHA1_H2;
|
||||
sctx->h3 = SHA1_H3;
|
||||
sctx->h4 = SHA1_H4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
|
||||
unsigned int len, unsigned int partial)
|
||||
{
|
||||
unsigned int done = 0;
|
||||
|
||||
sctx->count += len;
|
||||
|
||||
if (partial) {
|
||||
done = SHA1_BLOCK_SIZE - partial;
|
||||
memcpy(sctx->data + partial, data, done);
|
||||
sha1_block_data_order(sctx, sctx->data, 1);
|
||||
}
|
||||
|
||||
if (len - done >= SHA1_BLOCK_SIZE) {
|
||||
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
|
||||
sha1_block_data_order(sctx, data + done, rounds);
|
||||
done += rounds * SHA1_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
memcpy(sctx->data, data + done, len - done);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int sha1_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
|
||||
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
|
||||
int res;
|
||||
|
||||
/* Handle the fast case right here */
|
||||
if (partial + len < SHA1_BLOCK_SIZE) {
|
||||
sctx->count += len;
|
||||
memcpy(sctx->data + partial, data, len);
|
||||
return 0;
|
||||
}
|
||||
res = __sha1_update(sctx, data, len, partial);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
static int sha1_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
|
||||
unsigned int i, index, padlen;
|
||||
__be32 *dst = (__be32 *)out;
|
||||
__be64 bits;
|
||||
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
|
||||
|
||||
bits = cpu_to_be64(sctx->count << 3);
|
||||
|
||||
/* Pad out to 56 mod 64 and append length */
|
||||
index = sctx->count % SHA1_BLOCK_SIZE;
|
||||
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
|
||||
/* We need to fill a whole block for __sha1_update() */
|
||||
if (padlen <= 56) {
|
||||
sctx->count += padlen;
|
||||
memcpy(sctx->data + index, padding, padlen);
|
||||
} else {
|
||||
__sha1_update(sctx, padding, padlen, index);
|
||||
}
|
||||
__sha1_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 5; i++)
|
||||
dst[i] = cpu_to_be32(((u32 *)sctx)[i]);
|
||||
|
||||
/* Wipe context */
|
||||
memset(sctx, 0, sizeof(*sctx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int sha1_export(struct shash_desc *desc, void *out)
|
||||
{
|
||||
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
|
||||
memcpy(out, sctx, sizeof(*sctx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int sha1_import(struct shash_desc *desc, const void *in)
|
||||
{
|
||||
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
|
||||
memcpy(sctx, in, sizeof(*sctx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct shash_alg alg = {
|
||||
.digestsize = SHA1_DIGEST_SIZE,
|
||||
.init = sha1_init,
|
||||
.update = sha1_update,
|
||||
.final = sha1_final,
|
||||
.export = sha1_export,
|
||||
.import = sha1_import,
|
||||
.descsize = sizeof(struct SHA1_CTX),
|
||||
.statesize = sizeof(struct SHA1_CTX),
|
||||
.base = {
|
||||
.cra_name = "sha1",
|
||||
.cra_driver_name= "sha1-asm",
|
||||
.cra_priority = 150,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA1_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static int __init sha1_mod_init(void)
|
||||
{
|
||||
return crypto_register_shash(&alg);
|
||||
}
|
||||
|
||||
|
||||
static void __exit sha1_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_shash(&alg);
|
||||
}
|
||||
|
||||
|
||||
module_init(sha1_mod_init);
|
||||
module_exit(sha1_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
|
||||
MODULE_ALIAS("sha1");
|
||||
MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
|
@ -433,6 +433,15 @@ config CRYPTO_SHA1_SSSE3
|
||||
using Supplemental SSE3 (SSSE3) instructions or Advanced Vector
|
||||
Extensions (AVX), when available.
|
||||
|
||||
config CRYPTO_SHA1_ARM
|
||||
tristate "SHA1 digest algorithm (ARM-asm)"
|
||||
depends on ARM
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
|
||||
using optimized ARM assembler.
|
||||
|
||||
config CRYPTO_SHA256
|
||||
tristate "SHA224 and SHA256 digest algorithm"
|
||||
select CRYPTO_HASH
|
||||
@ -590,6 +599,30 @@ config CRYPTO_AES_NI_INTEL
|
||||
ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
|
||||
acceleration for CTR.
|
||||
|
||||
config CRYPTO_AES_ARM
|
||||
tristate "AES cipher algorithms (ARM-asm)"
|
||||
depends on ARM
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_AES
|
||||
help
|
||||
Use optimized AES assembler routines for ARM platforms.
|
||||
|
||||
AES cipher algorithms (FIPS-197). AES uses the Rijndael
|
||||
algorithm.
|
||||
|
||||
Rijndael appears to be consistently a very good performer in
|
||||
both hardware and software across a wide range of computing
|
||||
environments regardless of its use in feedback or non-feedback
|
||||
modes. Its key setup time is excellent, and its key agility is
|
||||
good. Rijndael's very low memory requirements make it very well
|
||||
suited for restricted-space environments, in which it also
|
||||
demonstrates excellent performance. Rijndael's operations are
|
||||
among the easiest to defend against power and timing attacks.
|
||||
|
||||
The AES specifies three key sizes: 128, 192 and 256 bits
|
||||
|
||||
See <http://csrc.nist.gov/encryption/aes/> for more information.
|
||||
|
||||
config CRYPTO_ANUBIS
|
||||
tristate "Anubis cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
|
Loading…
Reference in New Issue
Block a user