diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
index efc8d90a9a3f..0992531ffefb 100644
--- a/Documentation/DocBook/crypto-API.tmpl
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -119,7 +119,7 @@
Note: The terms "transformation" and cipher algorithm are used
- interchangably.
+ interchangeably.
@@ -536,8 +536,8 @@
For other use cases of AEAD ciphers, the ASCII art applies as
- well, but the caller may not use the GIVCIPHER interface. In
- this case, the caller must generate the IV.
+ well, but the caller may not use the AEAD cipher with a separate
+ IV generator. In this case, the caller must generate the IV.
@@ -584,8 +584,8 @@ kernel crypto API | IPSEC Layer
|
+-----------+ |
| | (1)
-| givcipher | <----------------------------------- esp_output
-| (seqiv) | ---+
+| aead | <----------------------------------- esp_output
+| (seqniv) | ---+
+-----------+ |
| (2)
+-----------+ |
@@ -620,8 +620,8 @@ kernel crypto API | IPSEC Layer
- esp_output() invokes crypto_aead_givencrypt() to trigger an encryption
- operation of the GIVCIPHER implementation.
+ esp_output() invokes crypto_aead_encrypt() to trigger an encryption
+ operation of the AEAD cipher with IV generator.
@@ -1563,7 +1563,7 @@ struct sockaddr_alg sa = {
Zero-Copy Interface
- In addition to the send/write/read/recv system call familty, the AF_ALG
+ In addition to the send/write/read/recv system call family, the AF_ALG
interface can be accessed with the zero-copy interface of splice/vmsplice.
As the name indicates, the kernel tries to avoid a copy operation into
kernel space.
@@ -1669,9 +1669,19 @@ read(opfd, out, outlen);
Programming Interface
+
+ Please note that the kernel crypto API contains the AEAD givcrypt
+ API (crypto_aead_giv* and aead_givcrypt_* function calls in
+ include/crypto/aead.h). This API is obsolete and will be removed
+ in the future. To obtain the functionality of an AEAD cipher with
+ internal IV generation, use the IV generator as a regular cipher.
+ For example, rfc4106(gcm(aes)) is the AEAD cipher with external
+ IV generation and seqniv(rfc4106(gcm(aes))) implies that the kernel
+ crypto API generates the IV. Different IV generators are available.
+
Block Cipher Context Data Structures
!Pinclude/linux/crypto.h Block Cipher Context Data Structures
-!Finclude/linux/crypto.h aead_request
+!Finclude/crypto/aead.h aead_request
Block Cipher Algorithm Definitions
!Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
@@ -1680,7 +1690,7 @@ read(opfd, out, outlen);
!Finclude/linux/crypto.h aead_alg
!Finclude/linux/crypto.h blkcipher_alg
!Finclude/linux/crypto.h cipher_alg
-!Finclude/linux/crypto.h rng_alg
+!Finclude/crypto/rng.h rng_alg
Asynchronous Block Cipher API
!Pinclude/linux/crypto.h Asynchronous Block Cipher API
@@ -1704,26 +1714,27 @@ read(opfd, out, outlen);
!Finclude/linux/crypto.h ablkcipher_request_set_crypt
Authenticated Encryption With Associated Data (AEAD) Cipher API
-!Pinclude/linux/crypto.h Authenticated Encryption With Associated Data (AEAD) Cipher API
-!Finclude/linux/crypto.h crypto_alloc_aead
-!Finclude/linux/crypto.h crypto_free_aead
-!Finclude/linux/crypto.h crypto_aead_ivsize
-!Finclude/linux/crypto.h crypto_aead_authsize
-!Finclude/linux/crypto.h crypto_aead_blocksize
-!Finclude/linux/crypto.h crypto_aead_setkey
-!Finclude/linux/crypto.h crypto_aead_setauthsize
-!Finclude/linux/crypto.h crypto_aead_encrypt
-!Finclude/linux/crypto.h crypto_aead_decrypt
+!Pinclude/crypto/aead.h Authenticated Encryption With Associated Data (AEAD) Cipher API
+!Finclude/crypto/aead.h crypto_alloc_aead
+!Finclude/crypto/aead.h crypto_free_aead
+!Finclude/crypto/aead.h crypto_aead_ivsize
+!Finclude/crypto/aead.h crypto_aead_authsize
+!Finclude/crypto/aead.h crypto_aead_blocksize
+!Finclude/crypto/aead.h crypto_aead_setkey
+!Finclude/crypto/aead.h crypto_aead_setauthsize
+!Finclude/crypto/aead.h crypto_aead_encrypt
+!Finclude/crypto/aead.h crypto_aead_decrypt
Asynchronous AEAD Request Handle
-!Pinclude/linux/crypto.h Asynchronous AEAD Request Handle
-!Finclude/linux/crypto.h crypto_aead_reqsize
-!Finclude/linux/crypto.h aead_request_set_tfm
-!Finclude/linux/crypto.h aead_request_alloc
-!Finclude/linux/crypto.h aead_request_free
-!Finclude/linux/crypto.h aead_request_set_callback
-!Finclude/linux/crypto.h aead_request_set_crypt
-!Finclude/linux/crypto.h aead_request_set_assoc
+!Pinclude/crypto/aead.h Asynchronous AEAD Request Handle
+!Finclude/crypto/aead.h crypto_aead_reqsize
+!Finclude/crypto/aead.h aead_request_set_tfm
+!Finclude/crypto/aead.h aead_request_alloc
+!Finclude/crypto/aead.h aead_request_free
+!Finclude/crypto/aead.h aead_request_set_callback
+!Finclude/crypto/aead.h aead_request_set_crypt
+!Finclude/crypto/aead.h aead_request_set_assoc
+!Finclude/crypto/aead.h aead_request_set_ad
Synchronous Block Cipher API
!Pinclude/linux/crypto.h Synchronous Block Cipher API
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
index 38988ef1336b..f0d926bf9f36 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
@@ -1,9 +1,11 @@
-Freescale SoC SEC Security Engines versions 2.x-3.x
+Freescale SoC SEC Security Engines versions 1.x-2.x-3.x
Required properties:
- compatible : Should contain entries for this and backward compatible
- SEC versions, high to low, e.g., "fsl,sec2.1", "fsl,sec2.0"
+ SEC versions, high to low, e.g., "fsl,sec2.1", "fsl,sec2.0" (SEC2/3)
+ e.g., "fsl,sec1.2", "fsl,sec1.0" (SEC1)
+ warning: SEC1 and SEC2 are mutually exclusive
- reg : Offset and length of the register set for the device
- interrupts : the SEC's interrupt number
- fsl,num-channels : An integer representing the number of channels
diff --git a/Documentation/devicetree/bindings/crypto/marvell-cesa.txt b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt
new file mode 100644
index 000000000000..c6c6a4a045bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/marvell-cesa.txt
@@ -0,0 +1,45 @@
+Marvell Cryptographic Engines And Security Accelerator
+
+Required properties:
+- compatible: should be one of the following string
+ "marvell,orion-crypto"
+ "marvell,kirkwood-crypto"
+ "marvell,dove-crypto"
+ "marvell,armada-370-crypto"
+ "marvell,armada-xp-crypto"
+ "marvell,armada-375-crypto"
+ "marvell,armada-38x-crypto"
+- reg: base physical address of the engine and length of memory mapped
+ region. Can also contain an entry for the SRAM attached to the CESA,
+ but this representation is deprecated and marvell,crypto-srams should
+ be used instead
+- reg-names: "regs". Can contain an "sram" entry, but this representation
+ is deprecated and marvell,crypto-srams should be used instead
+- interrupts: interrupt number
+- clocks: reference to the crypto engines clocks. This property is not
+ required for orion and kirkwood platforms
+- clock-names: "cesaX" and "cesazX", X should be replaced by the crypto engine
+ id.
+ This property is not required for the orion and kirkwoord
+ platforms.
+ "cesazX" clocks are not required on armada-370 platforms
+- marvell,crypto-srams: phandle to crypto SRAM definitions
+
+Optional properties:
+- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not
+ specified the whole SRAM is used (2KB)
+
+
+Examples:
+
+ crypto@90000 {
+ compatible = "marvell,armada-xp-crypto";
+ reg = <0x90000 0x10000>;
+ reg-names = "regs";
+ interrupts = <48>, <49>;
+ clocks = <&gateclk 23>, <&gateclk 23>;
+ clock-names = "cesa0", "cesa1";
+ marvell,crypto-srams = <&crypto_sram0>, <&crypto_sram1>;
+ marvell,crypto-sram-size = <0x600>;
+ status = "okay";
+ };
diff --git a/Documentation/devicetree/bindings/crypto/mv_cesa.txt b/Documentation/devicetree/bindings/crypto/mv_cesa.txt
index 47229b1a594b..c0c35f00335b 100644
--- a/Documentation/devicetree/bindings/crypto/mv_cesa.txt
+++ b/Documentation/devicetree/bindings/crypto/mv_cesa.txt
@@ -1,20 +1,33 @@
Marvell Cryptographic Engines And Security Accelerator
Required properties:
-- compatible : should be "marvell,orion-crypto"
-- reg : base physical address of the engine and length of memory mapped
- region, followed by base physical address of sram and its memory
- length
-- reg-names : "regs" , "sram";
-- interrupts : interrupt number
+- compatible: should be one of the following string
+ "marvell,orion-crypto"
+ "marvell,kirkwood-crypto"
+ "marvell,dove-crypto"
+- reg: base physical address of the engine and length of memory mapped
+ region. Can also contain an entry for the SRAM attached to the CESA,
+ but this representation is deprecated and marvell,crypto-srams should
+ be used instead
+- reg-names: "regs". Can contain an "sram" entry, but this representation
+ is deprecated and marvell,crypto-srams should be used instead
+- interrupts: interrupt number
+- clocks: reference to the crypto engines clocks. This property is only
+ required for Dove platforms
+- marvell,crypto-srams: phandle to crypto SRAM definitions
+
+Optional properties:
+- marvell,crypto-sram-size: SRAM size reserved for crypto operations, if not
+ specified the whole SRAM is used (2KB)
Examples:
crypto@30000 {
compatible = "marvell,orion-crypto";
- reg = <0x30000 0x10000>,
- <0x4000000 0x800>;
- reg-names = "regs" , "sram";
+ reg = <0x30000 0x10000>;
+ reg-names = "regs";
interrupts = <22>;
+ marvell,crypto-srams = <&crypto_sram>;
+ marvell,crypto-sram-size = <0x600>;
status = "okay";
};
diff --git a/MAINTAINERS b/MAINTAINERS
index a655435705aa..4f1e79b52cc5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4879,13 +4879,23 @@ M: Marcelo Henrique Cerri
M: Fionnuala Gunter
L: linux-crypto@vger.kernel.org
S: Supported
-F: drivers/crypto/nx/
+F: drivers/crypto/nx/Makefile
+F: drivers/crypto/nx/Kconfig
+F: drivers/crypto/nx/nx-aes*
+F: drivers/crypto/nx/nx-sha*
+F: drivers/crypto/nx/nx.*
+F: drivers/crypto/nx/nx_csbcpb.h
+F: drivers/crypto/nx/nx_debugfs.h
IBM Power 842 compression accelerator
M: Dan Streetman
S: Supported
-F: drivers/crypto/nx/nx-842.c
-F: include/linux/nx842.h
+F: drivers/crypto/nx/Makefile
+F: drivers/crypto/nx/Kconfig
+F: drivers/crypto/nx/nx-842*
+F: include/linux/sw842.h
+F: crypto/842.c
+F: lib/842/
IBM Power Linux RAID adapter
M: Brian King
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 8da2207b0072..27ed1b1cd1d7 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -53,20 +53,13 @@ config CRYPTO_SHA256_ARM
SHA-256 secure hash standard (DFIPS 180-2) implemented
using optimized ARM assembler and NEON, when available.
-config CRYPTO_SHA512_ARM_NEON
- tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
- depends on KERNEL_MODE_NEON
- select CRYPTO_SHA512
+config CRYPTO_SHA512_ARM
+ tristate "SHA-384/512 digest algorithm (ARM-asm and NEON)"
select CRYPTO_HASH
+ depends on !CPU_V7M
help
SHA-512 secure hash standard (DFIPS 180-2) implemented
- using ARM NEON instructions, when available.
-
- This version of SHA implements a 512 bit hash with 256 bits of
- security against collision attacks.
-
- This code also includes SHA-384, a 384 bit hash with 192 bits
- of security against collision attacks.
+ using optimized ARM assembler and NEON, when available.
config CRYPTO_AES_ARM
tristate "AES cipher algorithms (ARM-asm)"
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 6ea828241fcb..fc5150702b64 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
-obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
+obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -30,7 +30,8 @@ sha1-arm-y := sha1-armv4-large.o sha1_glue.o
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
-sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
+sha512-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha512-neon-glue.o
+sha512-arm-y := sha512-core.o sha512-glue.o $(sha512-arm-neon-y)
sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
@@ -45,4 +46,7 @@ $(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
$(call cmd,perl)
-.PRECIOUS: $(obj)/aesbs-core.S $(obj)/sha256-core.S
+$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
+ $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S $(obj)/sha256-core.S $(obj)/sha512-core.S
diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S
index 8cfa468ee570..987aa632c9f0 100644
--- a/arch/arm/crypto/aes-ce-core.S
+++ b/arch/arm/crypto/aes-ce-core.S
@@ -101,15 +101,14 @@
\dround q10, q11
blo 0f @ AES-128: 10 rounds
vld1.8 {q10-q11}, [ip]!
- beq 1f @ AES-192: 12 rounds
\dround q12, q13
+ beq 1f @ AES-192: 12 rounds
vld1.8 {q12-q13}, [ip]
\dround q10, q11
0: \fround q12, q13, q14
bx lr
-1: \dround q12, q13
- \fround q10, q11, q14
+1: \fround q10, q11, q14
bx lr
.endm
@@ -122,8 +121,8 @@
* q2 : third in/output block (_3x version only)
* q8 : first round key
* q9 : secound round key
- * ip : address of 3rd round key
* q14 : final round key
+ * r2 : address of round key array
* r3 : number of rounds
*/
.align 6
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
new file mode 100644
index 000000000000..a2b11a844357
--- /dev/null
+++ b/arch/arm/crypto/sha512-armv4.pl
@@ -0,0 +1,649 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Permission to use under GPL terms is granted.
+# ====================================================================
+
+# SHA512 block procedure for ARMv4. September 2007.
+
+# This code is ~4.5 (four and a half) times faster than code generated
+# by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
+# Xscale PXA250 core].
+#
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 6% improvement on
+# Cortex A8 core and ~40 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 7%
+# improvement on Coxtex A8 core and ~38 cycles per byte.
+
+# March 2011.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process
+# one byte in 23.3 cycles or ~60% faster than integer-only code.
+
+# August 2012.
+#
+# Improve NEON performance by 12% on Snapdragon S4. In absolute
+# terms it's 22.6 cycles per byte, which is disappointing result.
+# Technical writers asserted that 3-way S4 pipeline can sustain
+# multiple NEON instructions per cycle, but dual NEON issue could
+# not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
+# for further details. On side note Cortex-A15 processes one byte in
+# 16 cycles.
+
+# Byte order [in]dependence. =========================================
+#
+# Originally caller was expected to maintain specific *dword* order in
+# h[0-7], namely with most significant dword at *lower* address, which
+# was reflected in below two parameters as 0 and 4. Now caller is
+# expected to maintain native byte order for whole 64-bit values.
+$hi="HI";
+$lo="LO";
+# ====================================================================
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$ctx="r0"; # parameter block
+$inp="r1";
+$len="r2";
+
+$Tlo="r3";
+$Thi="r4";
+$Alo="r5";
+$Ahi="r6";
+$Elo="r7";
+$Ehi="r8";
+$t0="r9";
+$t1="r10";
+$t2="r11";
+$t3="r12";
+############ r13 is stack pointer
+$Ktbl="r14";
+############ r15 is program counter
+
+$Aoff=8*0;
+$Boff=8*1;
+$Coff=8*2;
+$Doff=8*3;
+$Eoff=8*4;
+$Foff=8*5;
+$Goff=8*6;
+$Hoff=8*7;
+$Xoff=8*8;
+
+sub BODY_00_15() {
+my $magic = shift;
+$code.=<<___;
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov $t0,$Elo,lsr#14
+ str $Tlo,[sp,#$Xoff+0]
+ mov $t1,$Ehi,lsr#14
+ str $Thi,[sp,#$Xoff+4]
+ eor $t0,$t0,$Ehi,lsl#18
+ ldr $t2,[sp,#$Hoff+0] @ h.lo
+ eor $t1,$t1,$Elo,lsl#18
+ ldr $t3,[sp,#$Hoff+4] @ h.hi
+ eor $t0,$t0,$Elo,lsr#18
+ eor $t1,$t1,$Ehi,lsr#18
+ eor $t0,$t0,$Ehi,lsl#14
+ eor $t1,$t1,$Elo,lsl#14
+ eor $t0,$t0,$Ehi,lsr#9
+ eor $t1,$t1,$Elo,lsr#9
+ eor $t0,$t0,$Elo,lsl#23
+ eor $t1,$t1,$Ehi,lsl#23 @ Sigma1(e)
+ adds $Tlo,$Tlo,$t0
+ ldr $t0,[sp,#$Foff+0] @ f.lo
+ adc $Thi,$Thi,$t1 @ T += Sigma1(e)
+ ldr $t1,[sp,#$Foff+4] @ f.hi
+ adds $Tlo,$Tlo,$t2
+ ldr $t2,[sp,#$Goff+0] @ g.lo
+ adc $Thi,$Thi,$t3 @ T += h
+ ldr $t3,[sp,#$Goff+4] @ g.hi
+
+ eor $t0,$t0,$t2
+ str $Elo,[sp,#$Eoff+0]
+ eor $t1,$t1,$t3
+ str $Ehi,[sp,#$Eoff+4]
+ and $t0,$t0,$Elo
+ str $Alo,[sp,#$Aoff+0]
+ and $t1,$t1,$Ehi
+ str $Ahi,[sp,#$Aoff+4]
+ eor $t0,$t0,$t2
+ ldr $t2,[$Ktbl,#$lo] @ K[i].lo
+ eor $t1,$t1,$t3 @ Ch(e,f,g)
+ ldr $t3,[$Ktbl,#$hi] @ K[i].hi
+
+ adds $Tlo,$Tlo,$t0
+ ldr $Elo,[sp,#$Doff+0] @ d.lo
+ adc $Thi,$Thi,$t1 @ T += Ch(e,f,g)
+ ldr $Ehi,[sp,#$Doff+4] @ d.hi
+ adds $Tlo,$Tlo,$t2
+ and $t0,$t2,#0xff
+ adc $Thi,$Thi,$t3 @ T += K[i]
+ adds $Elo,$Elo,$Tlo
+ ldr $t2,[sp,#$Boff+0] @ b.lo
+ adc $Ehi,$Ehi,$Thi @ d += T
+ teq $t0,#$magic
+
+ ldr $t3,[sp,#$Coff+0] @ c.lo
+#if __ARM_ARCH__>=7
+ it eq @ Thumb2 thing, sanity check in ARM
+#endif
+ orreq $Ktbl,$Ktbl,#1
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov $t0,$Alo,lsr#28
+ mov $t1,$Ahi,lsr#28
+ eor $t0,$t0,$Ahi,lsl#4
+ eor $t1,$t1,$Alo,lsl#4
+ eor $t0,$t0,$Ahi,lsr#2
+ eor $t1,$t1,$Alo,lsr#2
+ eor $t0,$t0,$Alo,lsl#30
+ eor $t1,$t1,$Ahi,lsl#30
+ eor $t0,$t0,$Ahi,lsr#7
+ eor $t1,$t1,$Alo,lsr#7
+ eor $t0,$t0,$Alo,lsl#25
+ eor $t1,$t1,$Ahi,lsl#25 @ Sigma0(a)
+ adds $Tlo,$Tlo,$t0
+ and $t0,$Alo,$t2
+ adc $Thi,$Thi,$t1 @ T += Sigma0(a)
+
+ ldr $t1,[sp,#$Boff+4] @ b.hi
+ orr $Alo,$Alo,$t2
+ ldr $t2,[sp,#$Coff+4] @ c.hi
+ and $Alo,$Alo,$t3
+ and $t3,$Ahi,$t1
+ orr $Ahi,$Ahi,$t1
+ orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo
+ and $Ahi,$Ahi,$t2
+ adds $Alo,$Alo,$Tlo
+ orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi
+ sub sp,sp,#8
+ adc $Ahi,$Ahi,$Thi @ h += T
+ tst $Ktbl,#1
+ add $Ktbl,$Ktbl,#8
+___
+}
+$code=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+#endif
+
+#ifdef __ARMEL__
+# define LO 0
+# define HI 4
+# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
+#else
+# define HI 0
+# define LO 4
+# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
+#endif
+
+.text
+#if __ARM_ARCH__<7
+.code 32
+#else
+.syntax unified
+# ifdef __thumb2__
+# define adrl adr
+.thumb
+# else
+.code 32
+# endif
+#endif
+
+.type K512,%object
+.align 5
+K512:
+WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+.size K512,.-K512
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha512_block_data_order
+.skip 32-4
+#else
+.skip 32
+#endif
+
+.global sha512_block_data_order
+.type sha512_block_data_order,%function
+sha512_block_data_order:
+#if __ARM_ARCH__<7
+ sub r3,pc,#8 @ sha512_block_data_order
+#else
+ adr r3,sha512_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#1
+ bne .LNEON
+#endif
+ add $len,$inp,$len,lsl#7 @ len to point at the end of inp
+ stmdb sp!,{r4-r12,lr}
+ sub $Ktbl,r3,#672 @ K512
+ sub sp,sp,#9*8
+
+ ldr $Elo,[$ctx,#$Eoff+$lo]
+ ldr $Ehi,[$ctx,#$Eoff+$hi]
+ ldr $t0, [$ctx,#$Goff+$lo]
+ ldr $t1, [$ctx,#$Goff+$hi]
+ ldr $t2, [$ctx,#$Hoff+$lo]
+ ldr $t3, [$ctx,#$Hoff+$hi]
+.Loop:
+ str $t0, [sp,#$Goff+0]
+ str $t1, [sp,#$Goff+4]
+ str $t2, [sp,#$Hoff+0]
+ str $t3, [sp,#$Hoff+4]
+ ldr $Alo,[$ctx,#$Aoff+$lo]
+ ldr $Ahi,[$ctx,#$Aoff+$hi]
+ ldr $Tlo,[$ctx,#$Boff+$lo]
+ ldr $Thi,[$ctx,#$Boff+$hi]
+ ldr $t0, [$ctx,#$Coff+$lo]
+ ldr $t1, [$ctx,#$Coff+$hi]
+ ldr $t2, [$ctx,#$Doff+$lo]
+ ldr $t3, [$ctx,#$Doff+$hi]
+ str $Tlo,[sp,#$Boff+0]
+ str $Thi,[sp,#$Boff+4]
+ str $t0, [sp,#$Coff+0]
+ str $t1, [sp,#$Coff+4]
+ str $t2, [sp,#$Doff+0]
+ str $t3, [sp,#$Doff+4]
+ ldr $Tlo,[$ctx,#$Foff+$lo]
+ ldr $Thi,[$ctx,#$Foff+$hi]
+ str $Tlo,[sp,#$Foff+0]
+ str $Thi,[sp,#$Foff+4]
+
+.L00_15:
+#if __ARM_ARCH__<7
+ ldrb $Tlo,[$inp,#7]
+ ldrb $t0, [$inp,#6]
+ ldrb $t1, [$inp,#5]
+ ldrb $t2, [$inp,#4]
+ ldrb $Thi,[$inp,#3]
+ ldrb $t3, [$inp,#2]
+ orr $Tlo,$Tlo,$t0,lsl#8
+ ldrb $t0, [$inp,#1]
+ orr $Tlo,$Tlo,$t1,lsl#16
+ ldrb $t1, [$inp],#8
+ orr $Tlo,$Tlo,$t2,lsl#24
+ orr $Thi,$Thi,$t3,lsl#8
+ orr $Thi,$Thi,$t0,lsl#16
+ orr $Thi,$Thi,$t1,lsl#24
+#else
+ ldr $Tlo,[$inp,#4]
+ ldr $Thi,[$inp],#8
+#ifdef __ARMEL__
+ rev $Tlo,$Tlo
+ rev $Thi,$Thi
+#endif
+#endif
+___
+ &BODY_00_15(0x94);
+$code.=<<___;
+ tst $Ktbl,#1
+ beq .L00_15
+ ldr $t0,[sp,#`$Xoff+8*(16-1)`+0]
+ ldr $t1,[sp,#`$Xoff+8*(16-1)`+4]
+ bic $Ktbl,$Ktbl,#1
+.L16_79:
+ @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+ @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+ @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
+ mov $Tlo,$t0,lsr#1
+ ldr $t2,[sp,#`$Xoff+8*(16-14)`+0]
+ mov $Thi,$t1,lsr#1
+ ldr $t3,[sp,#`$Xoff+8*(16-14)`+4]
+ eor $Tlo,$Tlo,$t1,lsl#31
+ eor $Thi,$Thi,$t0,lsl#31
+ eor $Tlo,$Tlo,$t0,lsr#8
+ eor $Thi,$Thi,$t1,lsr#8
+ eor $Tlo,$Tlo,$t1,lsl#24
+ eor $Thi,$Thi,$t0,lsl#24
+ eor $Tlo,$Tlo,$t0,lsr#7
+ eor $Thi,$Thi,$t1,lsr#7
+ eor $Tlo,$Tlo,$t1,lsl#25
+
+ @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+ @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+ @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+ mov $t0,$t2,lsr#19
+ mov $t1,$t3,lsr#19
+ eor $t0,$t0,$t3,lsl#13
+ eor $t1,$t1,$t2,lsl#13
+ eor $t0,$t0,$t3,lsr#29
+ eor $t1,$t1,$t2,lsr#29
+ eor $t0,$t0,$t2,lsl#3
+ eor $t1,$t1,$t3,lsl#3
+ eor $t0,$t0,$t2,lsr#6
+ eor $t1,$t1,$t3,lsr#6
+ ldr $t2,[sp,#`$Xoff+8*(16-9)`+0]
+ eor $t0,$t0,$t3,lsl#26
+
+ ldr $t3,[sp,#`$Xoff+8*(16-9)`+4]
+ adds $Tlo,$Tlo,$t0
+ ldr $t0,[sp,#`$Xoff+8*16`+0]
+ adc $Thi,$Thi,$t1
+
+ ldr $t1,[sp,#`$Xoff+8*16`+4]
+ adds $Tlo,$Tlo,$t2
+ adc $Thi,$Thi,$t3
+ adds $Tlo,$Tlo,$t0
+ adc $Thi,$Thi,$t1
+___
+ &BODY_00_15(0x17);
+$code.=<<___;
+#if __ARM_ARCH__>=7
+ ittt eq @ Thumb2 thing, sanity check in ARM
+#endif
+ ldreq $t0,[sp,#`$Xoff+8*(16-1)`+0]
+ ldreq $t1,[sp,#`$Xoff+8*(16-1)`+4]
+ beq .L16_79
+ bic $Ktbl,$Ktbl,#1
+
+ ldr $Tlo,[sp,#$Boff+0]
+ ldr $Thi,[sp,#$Boff+4]
+ ldr $t0, [$ctx,#$Aoff+$lo]
+ ldr $t1, [$ctx,#$Aoff+$hi]
+ ldr $t2, [$ctx,#$Boff+$lo]
+ ldr $t3, [$ctx,#$Boff+$hi]
+ adds $t0,$Alo,$t0
+ str $t0, [$ctx,#$Aoff+$lo]
+ adc $t1,$Ahi,$t1
+ str $t1, [$ctx,#$Aoff+$hi]
+ adds $t2,$Tlo,$t2
+ str $t2, [$ctx,#$Boff+$lo]
+ adc $t3,$Thi,$t3
+ str $t3, [$ctx,#$Boff+$hi]
+
+ ldr $Alo,[sp,#$Coff+0]
+ ldr $Ahi,[sp,#$Coff+4]
+ ldr $Tlo,[sp,#$Doff+0]
+ ldr $Thi,[sp,#$Doff+4]
+ ldr $t0, [$ctx,#$Coff+$lo]
+ ldr $t1, [$ctx,#$Coff+$hi]
+ ldr $t2, [$ctx,#$Doff+$lo]
+ ldr $t3, [$ctx,#$Doff+$hi]
+ adds $t0,$Alo,$t0
+ str $t0, [$ctx,#$Coff+$lo]
+ adc $t1,$Ahi,$t1
+ str $t1, [$ctx,#$Coff+$hi]
+ adds $t2,$Tlo,$t2
+ str $t2, [$ctx,#$Doff+$lo]
+ adc $t3,$Thi,$t3
+ str $t3, [$ctx,#$Doff+$hi]
+
+ ldr $Tlo,[sp,#$Foff+0]
+ ldr $Thi,[sp,#$Foff+4]
+ ldr $t0, [$ctx,#$Eoff+$lo]
+ ldr $t1, [$ctx,#$Eoff+$hi]
+ ldr $t2, [$ctx,#$Foff+$lo]
+ ldr $t3, [$ctx,#$Foff+$hi]
+ adds $Elo,$Elo,$t0
+ str $Elo,[$ctx,#$Eoff+$lo]
+ adc $Ehi,$Ehi,$t1
+ str $Ehi,[$ctx,#$Eoff+$hi]
+ adds $t2,$Tlo,$t2
+ str $t2, [$ctx,#$Foff+$lo]
+ adc $t3,$Thi,$t3
+ str $t3, [$ctx,#$Foff+$hi]
+
+ ldr $Alo,[sp,#$Goff+0]
+ ldr $Ahi,[sp,#$Goff+4]
+ ldr $Tlo,[sp,#$Hoff+0]
+ ldr $Thi,[sp,#$Hoff+4]
+ ldr $t0, [$ctx,#$Goff+$lo]
+ ldr $t1, [$ctx,#$Goff+$hi]
+ ldr $t2, [$ctx,#$Hoff+$lo]
+ ldr $t3, [$ctx,#$Hoff+$hi]
+ adds $t0,$Alo,$t0
+ str $t0, [$ctx,#$Goff+$lo]
+ adc $t1,$Ahi,$t1
+ str $t1, [$ctx,#$Goff+$hi]
+ adds $t2,$Tlo,$t2
+ str $t2, [$ctx,#$Hoff+$lo]
+ adc $t3,$Thi,$t3
+ str $t3, [$ctx,#$Hoff+$hi]
+
+ add sp,sp,#640
+ sub $Ktbl,$Ktbl,#640
+
+ teq $inp,$len
+ bne .Loop
+
+ add sp,sp,#8*9 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size sha512_block_data_order,.-sha512_block_data_order
+___
+
+{
+my @Sigma0=(28,34,39);
+my @Sigma1=(14,18,41);
+my @sigma0=(1, 8, 7);
+my @sigma1=(19,61,6);
+
+my $Ktbl="r3";
+my $cnt="r12"; # volatile register known as ip, intra-procedure-call scratch
+
+my @X=map("d$_",(0..15));
+my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23));
+
+sub NEON_00_15() {
+my $i=shift;
+my ($a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31)); # temps
+
+$code.=<<___ if ($i<16 || $i&1);
+ vshr.u64 $t0,$e,#@Sigma1[0] @ $i
+#if $i<16
+ vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned
+#endif
+ vshr.u64 $t1,$e,#@Sigma1[1]
+#if $i>0
+ vadd.i64 $a,$Maj @ h+=Maj from the past
+#endif
+ vshr.u64 $t2,$e,#@Sigma1[2]
+___
+$code.=<<___;
+ vld1.64 {$K},[$Ktbl,:64]! @ K[i++]
+ vsli.64 $t0,$e,#`64-@Sigma1[0]`
+ vsli.64 $t1,$e,#`64-@Sigma1[1]`
+ vmov $Ch,$e
+ vsli.64 $t2,$e,#`64-@Sigma1[2]`
+#if $i<16 && defined(__ARMEL__)
+ vrev64.8 @X[$i],@X[$i]
+#endif
+ veor $t1,$t0
+ vbsl $Ch,$f,$g @ Ch(e,f,g)
+ vshr.u64 $t0,$a,#@Sigma0[0]
+ veor $t2,$t1 @ Sigma1(e)
+ vadd.i64 $T1,$Ch,$h
+ vshr.u64 $t1,$a,#@Sigma0[1]
+ vsli.64 $t0,$a,#`64-@Sigma0[0]`
+ vadd.i64 $T1,$t2
+ vshr.u64 $t2,$a,#@Sigma0[2]
+ vadd.i64 $K,@X[$i%16]
+ vsli.64 $t1,$a,#`64-@Sigma0[1]`
+ veor $Maj,$a,$b
+ vsli.64 $t2,$a,#`64-@Sigma0[2]`
+ veor $h,$t0,$t1
+ vadd.i64 $T1,$K
+ vbsl $Maj,$c,$b @ Maj(a,b,c)
+ veor $h,$t2 @ Sigma0(a)
+ vadd.i64 $d,$T1
+ vadd.i64 $Maj,$T1
+ @ vadd.i64 $h,$Maj
+___
+}
+
+sub NEON_16_79() {
+my $i=shift;
+
+if ($i&1) { &NEON_00_15($i,@_); return; }
+
+# 2x-vectorized, therefore runs every 2nd round
+my @X=map("q$_",(0..7)); # view @X as 128-bit vector
+my ($t0,$t1,$s0,$s1) = map("q$_",(12..15)); # temps
+my ($d0,$d1,$d2) = map("d$_",(24..26)); # temps from NEON_00_15
+my $e=@_[4]; # $e from NEON_00_15
+$i /= 2;
+$code.=<<___;
+ vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0]
+ vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1]
+ vadd.i64 @_[0],d30 @ h+=Maj from the past
+ vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2]
+ vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]`
+ vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1]
+ vsli.64 $t1,@X[($i+7)%8],#`64-@sigma1[1]`
+ veor $s1,$t0
+ vshr.u64 $t0,$s0,#@sigma0[0]
+ veor $s1,$t1 @ sigma1(X[i+14])
+ vshr.u64 $t1,$s0,#@sigma0[1]
+ vadd.i64 @X[$i%8],$s1
+ vshr.u64 $s1,$s0,#@sigma0[2]
+ vsli.64 $t0,$s0,#`64-@sigma0[0]`
+ vsli.64 $t1,$s0,#`64-@sigma0[1]`
+ vext.8 $s0,@X[($i+4)%8],@X[($i+5)%8],#8 @ X[i+9]
+ veor $s1,$t0
+ vshr.u64 $d0,$e,#@Sigma1[0] @ from NEON_00_15
+ vadd.i64 @X[$i%8],$s0
+ vshr.u64 $d1,$e,#@Sigma1[1] @ from NEON_00_15
+ veor $s1,$t1 @ sigma0(X[i+1])
+ vshr.u64 $d2,$e,#@Sigma1[2] @ from NEON_00_15
+ vadd.i64 @X[$i%8],$s1
+___
+ &NEON_00_15(2*$i,@_);
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.global sha512_block_data_order_neon
+.type sha512_block_data_order_neon,%function
+.align 4
+sha512_block_data_order_neon:
+.LNEON:
+ dmb @ errata #451034 on early Cortex A8
+ add $len,$inp,$len,lsl#7 @ len to point at the end of inp
+ VFP_ABI_PUSH
+ adrl $Ktbl,K512
+ vldmia $ctx,{$A-$H} @ load context
+.Loop_neon:
+___
+for($i=0;$i<16;$i++) { &NEON_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ mov $cnt,#4
+.L16_79_neon:
+ subs $cnt,#1
+___
+for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ bne .L16_79_neon
+
+ vadd.i64 $A,d30 @ h+=Maj from the past
+ vldmia $ctx,{d24-d31} @ load context to temp
+ vadd.i64 q8,q12 @ vectorized accumulate
+ vadd.i64 q9,q13
+ vadd.i64 q10,q14
+ vadd.i64 q11,q15
+ vstmia $ctx,{$A-$H} @ save context
+ teq $inp,$len
+ sub $Ktbl,#640 @ rewind K512
+ bne .Loop_neon
+
+ VFP_ABI_POP
+ ret @ bx lr
+.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
+#endif
+___
+}
+$code.=<<___;
+.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by "
+.align 2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+$code =~ s/\bret\b/bx lr/gm;
+
+open SELF,$0;
+while() {
+ next if (/^#!/);
+ last if (!s/^#/@/ and !/^$/);
+ print;
+}
+close SELF;
+
+print $code;
+close STDOUT; # enforce flush
diff --git a/arch/arm/crypto/sha512-armv7-neon.S b/arch/arm/crypto/sha512-armv7-neon.S
deleted file mode 100644
index fe99472e507c..000000000000
--- a/arch/arm/crypto/sha512-armv7-neon.S
+++ /dev/null
@@ -1,455 +0,0 @@
-/* sha512-armv7-neon.S - ARM/NEON assembly implementation of SHA-512 transform
- *
- * Copyright © 2013-2014 Jussi Kivilinna
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include
-
-
-.syntax unified
-.code 32
-.fpu neon
-
-.text
-
-/* structure of SHA512_CONTEXT */
-#define hd_a 0
-#define hd_b ((hd_a) + 8)
-#define hd_c ((hd_b) + 8)
-#define hd_d ((hd_c) + 8)
-#define hd_e ((hd_d) + 8)
-#define hd_f ((hd_e) + 8)
-#define hd_g ((hd_f) + 8)
-
-/* register macros */
-#define RK %r2
-
-#define RA d0
-#define RB d1
-#define RC d2
-#define RD d3
-#define RE d4
-#define RF d5
-#define RG d6
-#define RH d7
-
-#define RT0 d8
-#define RT1 d9
-#define RT2 d10
-#define RT3 d11
-#define RT4 d12
-#define RT5 d13
-#define RT6 d14
-#define RT7 d15
-
-#define RT01q q4
-#define RT23q q5
-#define RT45q q6
-#define RT67q q7
-
-#define RW0 d16
-#define RW1 d17
-#define RW2 d18
-#define RW3 d19
-#define RW4 d20
-#define RW5 d21
-#define RW6 d22
-#define RW7 d23
-#define RW8 d24
-#define RW9 d25
-#define RW10 d26
-#define RW11 d27
-#define RW12 d28
-#define RW13 d29
-#define RW14 d30
-#define RW15 d31
-
-#define RW01q q8
-#define RW23q q9
-#define RW45q q10
-#define RW67q q11
-#define RW89q q12
-#define RW1011q q13
-#define RW1213q q14
-#define RW1415q q15
-
-/***********************************************************************
- * ARM assembly implementation of sha512 transform
- ***********************************************************************/
-#define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, \
- rw23q, rw1415q, rw9, rw10, interleave_op, arg1) \
- /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
- vshr.u64 RT2, re, #14; \
- vshl.u64 RT3, re, #64 - 14; \
- interleave_op(arg1); \
- vshr.u64 RT4, re, #18; \
- vshl.u64 RT5, re, #64 - 18; \
- vld1.64 {RT0}, [RK]!; \
- veor.64 RT23q, RT23q, RT45q; \
- vshr.u64 RT4, re, #41; \
- vshl.u64 RT5, re, #64 - 41; \
- vadd.u64 RT0, RT0, rw0; \
- veor.64 RT23q, RT23q, RT45q; \
- vmov.64 RT7, re; \
- veor.64 RT1, RT2, RT3; \
- vbsl.64 RT7, rf, rg; \
- \
- vadd.u64 RT1, RT1, rh; \
- vshr.u64 RT2, ra, #28; \
- vshl.u64 RT3, ra, #64 - 28; \
- vadd.u64 RT1, RT1, RT0; \
- vshr.u64 RT4, ra, #34; \
- vshl.u64 RT5, ra, #64 - 34; \
- vadd.u64 RT1, RT1, RT7; \
- \
- /* h = Sum0 (a) + Maj (a, b, c); */ \
- veor.64 RT23q, RT23q, RT45q; \
- vshr.u64 RT4, ra, #39; \
- vshl.u64 RT5, ra, #64 - 39; \
- veor.64 RT0, ra, rb; \
- veor.64 RT23q, RT23q, RT45q; \
- vbsl.64 RT0, rc, rb; \
- vadd.u64 rd, rd, RT1; /* d+=t1; */ \
- veor.64 rh, RT2, RT3; \
- \
- /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
- vshr.u64 RT2, rd, #14; \
- vshl.u64 RT3, rd, #64 - 14; \
- vadd.u64 rh, rh, RT0; \
- vshr.u64 RT4, rd, #18; \
- vshl.u64 RT5, rd, #64 - 18; \
- vadd.u64 rh, rh, RT1; /* h+=t1; */ \
- vld1.64 {RT0}, [RK]!; \
- veor.64 RT23q, RT23q, RT45q; \
- vshr.u64 RT4, rd, #41; \
- vshl.u64 RT5, rd, #64 - 41; \
- vadd.u64 RT0, RT0, rw1; \
- veor.64 RT23q, RT23q, RT45q; \
- vmov.64 RT7, rd; \
- veor.64 RT1, RT2, RT3; \
- vbsl.64 RT7, re, rf; \
- \
- vadd.u64 RT1, RT1, rg; \
- vshr.u64 RT2, rh, #28; \
- vshl.u64 RT3, rh, #64 - 28; \
- vadd.u64 RT1, RT1, RT0; \
- vshr.u64 RT4, rh, #34; \
- vshl.u64 RT5, rh, #64 - 34; \
- vadd.u64 RT1, RT1, RT7; \
- \
- /* g = Sum0 (h) + Maj (h, a, b); */ \
- veor.64 RT23q, RT23q, RT45q; \
- vshr.u64 RT4, rh, #39; \
- vshl.u64 RT5, rh, #64 - 39; \
- veor.64 RT0, rh, ra; \
- veor.64 RT23q, RT23q, RT45q; \
- vbsl.64 RT0, rb, ra; \
- vadd.u64 rc, rc, RT1; /* c+=t1; */ \
- veor.64 rg, RT2, RT3; \
- \
- /* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \
- /* w[1] += S1 (w[15]) + w[10] + S0 (w[2]); */ \
- \
- /**** S0(w[1:2]) */ \
- \
- /* w[0:1] += w[9:10] */ \
- /* RT23q = rw1:rw2 */ \
- vext.u64 RT23q, rw01q, rw23q, #1; \
- vadd.u64 rw0, rw9; \
- vadd.u64 rg, rg, RT0; \
- vadd.u64 rw1, rw10;\
- vadd.u64 rg, rg, RT1; /* g+=t1; */ \
- \
- vshr.u64 RT45q, RT23q, #1; \
- vshl.u64 RT67q, RT23q, #64 - 1; \
- vshr.u64 RT01q, RT23q, #8; \
- veor.u64 RT45q, RT45q, RT67q; \
- vshl.u64 RT67q, RT23q, #64 - 8; \
- veor.u64 RT45q, RT45q, RT01q; \
- vshr.u64 RT01q, RT23q, #7; \
- veor.u64 RT45q, RT45q, RT67q; \
- \
- /**** S1(w[14:15]) */ \
- vshr.u64 RT23q, rw1415q, #6; \
- veor.u64 RT01q, RT01q, RT45q; \
- vshr.u64 RT45q, rw1415q, #19; \
- vshl.u64 RT67q, rw1415q, #64 - 19; \
- veor.u64 RT23q, RT23q, RT45q; \
- vshr.u64 RT45q, rw1415q, #61; \
- veor.u64 RT23q, RT23q, RT67q; \
- vshl.u64 RT67q, rw1415q, #64 - 61; \
- veor.u64 RT23q, RT23q, RT45q; \
- vadd.u64 rw01q, RT01q; /* w[0:1] += S(w[1:2]) */ \
- veor.u64 RT01q, RT23q, RT67q;
-#define vadd_RT01q(rw01q) \
- /* w[0:1] += S(w[14:15]) */ \
- vadd.u64 rw01q, RT01q;
-
-#define dummy(_) /*_*/
-
-#define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, \
- interleave_op1, arg1, interleave_op2, arg2) \
- /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
- vshr.u64 RT2, re, #14; \
- vshl.u64 RT3, re, #64 - 14; \
- interleave_op1(arg1); \
- vshr.u64 RT4, re, #18; \
- vshl.u64 RT5, re, #64 - 18; \
- interleave_op2(arg2); \
- vld1.64 {RT0}, [RK]!; \
- veor.64 RT23q, RT23q, RT45q; \
- vshr.u64 RT4, re, #41; \
- vshl.u64 RT5, re, #64 - 41; \
- vadd.u64 RT0, RT0, rw0; \
- veor.64 RT23q, RT23q, RT45q; \
- vmov.64 RT7, re; \
- veor.64 RT1, RT2, RT3; \
- vbsl.64 RT7, rf, rg; \
- \
- vadd.u64 RT1, RT1, rh; \
- vshr.u64 RT2, ra, #28; \
- vshl.u64 RT3, ra, #64 - 28; \
- vadd.u64 RT1, RT1, RT0; \
- vshr.u64 RT4, ra, #34; \
- vshl.u64 RT5, ra, #64 - 34; \
- vadd.u64 RT1, RT1, RT7; \
- \
- /* h = Sum0 (a) + Maj (a, b, c); */ \
- veor.64 RT23q, RT23q, RT45q; \
- vshr.u64 RT4, ra, #39; \
- vshl.u64 RT5, ra, #64 - 39; \
- veor.64 RT0, ra, rb; \
- veor.64 RT23q, RT23q, RT45q; \
- vbsl.64 RT0, rc, rb; \
- vadd.u64 rd, rd, RT1; /* d+=t1; */ \
- veor.64 rh, RT2, RT3; \
- \
- /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
- vshr.u64 RT2, rd, #14; \
- vshl.u64 RT3, rd, #64 - 14; \
- vadd.u64 rh, rh, RT0; \
- vshr.u64 RT4, rd, #18; \
- vshl.u64 RT5, rd, #64 - 18; \
- vadd.u64 rh, rh, RT1; /* h+=t1; */ \
- vld1.64 {RT0}, [RK]!; \
- veor.64 RT23q, RT23q, RT45q; \
- vshr.u64 RT4, rd, #41; \
- vshl.u64 RT5, rd, #64 - 41; \
- vadd.u64 RT0, RT0, rw1; \
- veor.64 RT23q, RT23q, RT45q; \
- vmov.64 RT7, rd; \
- veor.64 RT1, RT2, RT3; \
- vbsl.64 RT7, re, rf; \
- \
- vadd.u64 RT1, RT1, rg; \
- vshr.u64 RT2, rh, #28; \
- vshl.u64 RT3, rh, #64 - 28; \
- vadd.u64 RT1, RT1, RT0; \
- vshr.u64 RT4, rh, #34; \
- vshl.u64 RT5, rh, #64 - 34; \
- vadd.u64 RT1, RT1, RT7; \
- \
- /* g = Sum0 (h) + Maj (h, a, b); */ \
- veor.64 RT23q, RT23q, RT45q; \
- vshr.u64 RT4, rh, #39; \
- vshl.u64 RT5, rh, #64 - 39; \
- veor.64 RT0, rh, ra; \
- veor.64 RT23q, RT23q, RT45q; \
- vbsl.64 RT0, rb, ra; \
- vadd.u64 rc, rc, RT1; /* c+=t1; */ \
- veor.64 rg, RT2, RT3;
-#define vadd_rg_RT0(rg) \
- vadd.u64 rg, rg, RT0;
-#define vadd_rg_RT1(rg) \
- vadd.u64 rg, rg, RT1; /* g+=t1; */
-
-.align 3
-ENTRY(sha512_transform_neon)
- /* Input:
- * %r0: SHA512_CONTEXT
- * %r1: data
- * %r2: u64 k[] constants
- * %r3: nblks
- */
- push {%lr};
-
- mov %lr, #0;
-
- /* Load context to d0-d7 */
- vld1.64 {RA-RD}, [%r0]!;
- vld1.64 {RE-RH}, [%r0];
- sub %r0, #(4*8);
-
- /* Load input to w[16], d16-d31 */
- /* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */
- vld1.64 {RW0-RW3}, [%r1]!;
- vld1.64 {RW4-RW7}, [%r1]!;
- vld1.64 {RW8-RW11}, [%r1]!;
- vld1.64 {RW12-RW15}, [%r1]!;
-#ifdef __ARMEL__
- /* byteswap */
- vrev64.8 RW01q, RW01q;
- vrev64.8 RW23q, RW23q;
- vrev64.8 RW45q, RW45q;
- vrev64.8 RW67q, RW67q;
- vrev64.8 RW89q, RW89q;
- vrev64.8 RW1011q, RW1011q;
- vrev64.8 RW1213q, RW1213q;
- vrev64.8 RW1415q, RW1415q;
-#endif
-
- /* EABI says that d8-d15 must be preserved by callee. */
- /*vpush {RT0-RT7};*/
-
-.Loop:
- rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
- RW23q, RW1415q, RW9, RW10, dummy, _);
- b .Lenter_rounds;
-
-.Loop_rounds:
- rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
- RW23q, RW1415q, RW9, RW10, vadd_RT01q, RW1415q);
-.Lenter_rounds:
- rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, RW23q, RW4,
- RW45q, RW01q, RW11, RW12, vadd_RT01q, RW01q);
- rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, RW45q, RW6,
- RW67q, RW23q, RW13, RW14, vadd_RT01q, RW23q);
- rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, RW67q, RW8,
- RW89q, RW45q, RW15, RW0, vadd_RT01q, RW45q);
- rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, RW89q, RW10,
- RW1011q, RW67q, RW1, RW2, vadd_RT01q, RW67q);
- rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, RW1011q, RW12,
- RW1213q, RW89q, RW3, RW4, vadd_RT01q, RW89q);
- add %lr, #16;
- rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, RW1213q, RW14,
- RW1415q, RW1011q, RW5, RW6, vadd_RT01q, RW1011q);
- cmp %lr, #64;
- rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, RW1415q, RW0,
- RW01q, RW1213q, RW7, RW8, vadd_RT01q, RW1213q);
- bne .Loop_rounds;
-
- subs %r3, #1;
-
- rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1,
- vadd_RT01q, RW1415q, dummy, _);
- rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3,
- vadd_rg_RT0, RG, vadd_rg_RT1, RG);
- beq .Lhandle_tail;
- vld1.64 {RW0-RW3}, [%r1]!;
- rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
- vadd_rg_RT0, RE, vadd_rg_RT1, RE);
- rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
- vadd_rg_RT0, RC, vadd_rg_RT1, RC);
-#ifdef __ARMEL__
- vrev64.8 RW01q, RW01q;
- vrev64.8 RW23q, RW23q;
-#endif
- vld1.64 {RW4-RW7}, [%r1]!;
- rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
- vadd_rg_RT0, RA, vadd_rg_RT1, RA);
- rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
- vadd_rg_RT0, RG, vadd_rg_RT1, RG);
-#ifdef __ARMEL__
- vrev64.8 RW45q, RW45q;
- vrev64.8 RW67q, RW67q;
-#endif
- vld1.64 {RW8-RW11}, [%r1]!;
- rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
- vadd_rg_RT0, RE, vadd_rg_RT1, RE);
- rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
- vadd_rg_RT0, RC, vadd_rg_RT1, RC);
-#ifdef __ARMEL__
- vrev64.8 RW89q, RW89q;
- vrev64.8 RW1011q, RW1011q;
-#endif
- vld1.64 {RW12-RW15}, [%r1]!;
- vadd_rg_RT0(RA);
- vadd_rg_RT1(RA);
-
- /* Load context */
- vld1.64 {RT0-RT3}, [%r0]!;
- vld1.64 {RT4-RT7}, [%r0];
- sub %r0, #(4*8);
-
-#ifdef __ARMEL__
- vrev64.8 RW1213q, RW1213q;
- vrev64.8 RW1415q, RW1415q;
-#endif
-
- vadd.u64 RA, RT0;
- vadd.u64 RB, RT1;
- vadd.u64 RC, RT2;
- vadd.u64 RD, RT3;
- vadd.u64 RE, RT4;
- vadd.u64 RF, RT5;
- vadd.u64 RG, RT6;
- vadd.u64 RH, RT7;
-
- /* Store the first half of context */
- vst1.64 {RA-RD}, [%r0]!;
- sub RK, $(8*80);
- vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
- mov %lr, #0;
- sub %r0, #(4*8);
-
- b .Loop;
-
-.Lhandle_tail:
- rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
- vadd_rg_RT0, RE, vadd_rg_RT1, RE);
- rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
- vadd_rg_RT0, RC, vadd_rg_RT1, RC);
- rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
- vadd_rg_RT0, RA, vadd_rg_RT1, RA);
- rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
- vadd_rg_RT0, RG, vadd_rg_RT1, RG);
- rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
- vadd_rg_RT0, RE, vadd_rg_RT1, RE);
- rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
- vadd_rg_RT0, RC, vadd_rg_RT1, RC);
-
- /* Load context to d16-d23 */
- vld1.64 {RW0-RW3}, [%r0]!;
- vadd_rg_RT0(RA);
- vld1.64 {RW4-RW7}, [%r0];
- vadd_rg_RT1(RA);
- sub %r0, #(4*8);
-
- vadd.u64 RA, RW0;
- vadd.u64 RB, RW1;
- vadd.u64 RC, RW2;
- vadd.u64 RD, RW3;
- vadd.u64 RE, RW4;
- vadd.u64 RF, RW5;
- vadd.u64 RG, RW6;
- vadd.u64 RH, RW7;
-
- /* Store the first half of context */
- vst1.64 {RA-RD}, [%r0]!;
-
- /* Clear used registers */
- /* d16-d31 */
- veor.u64 RW01q, RW01q;
- veor.u64 RW23q, RW23q;
- veor.u64 RW45q, RW45q;
- veor.u64 RW67q, RW67q;
- vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
- veor.u64 RW89q, RW89q;
- veor.u64 RW1011q, RW1011q;
- veor.u64 RW1213q, RW1213q;
- veor.u64 RW1415q, RW1415q;
- /* d8-d15 */
- /*vpop {RT0-RT7};*/
- /* d0-d7 (q0-q3) */
- veor.u64 %q0, %q0;
- veor.u64 %q1, %q1;
- veor.u64 %q2, %q2;
- veor.u64 %q3, %q3;
-
- pop {%pc};
-ENDPROC(sha512_transform_neon)
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
new file mode 100644
index 000000000000..3694c4d4ca2b
--- /dev/null
+++ b/arch/arm/crypto/sha512-core.S_shipped
@@ -0,0 +1,1861 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Permission to use under GPL terms is granted.
+@ ====================================================================
+
+@ SHA512 block procedure for ARMv4. September 2007.
+
+@ This code is ~4.5 (four and a half) times faster than code generated
+@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
+@ Xscale PXA250 core].
+@
+@ July 2010.
+@
+@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
+@ Cortex A8 core and ~40 cycles per processed byte.
+
+@ February 2011.
+@
+@ Profiler-assisted and platform-specific optimization resulted in 7%
+@ improvement on Coxtex A8 core and ~38 cycles per byte.
+
+@ March 2011.
+@
+@ Add NEON implementation. On Cortex A8 it was measured to process
+@ one byte in 23.3 cycles or ~60% faster than integer-only code.
+
+@ August 2012.
+@
+@ Improve NEON performance by 12% on Snapdragon S4. In absolute
+@ terms it's 22.6 cycles per byte, which is disappointing result.
+@ Technical writers asserted that 3-way S4 pipeline can sustain
+@ multiple NEON instructions per cycle, but dual NEON issue could
+@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
+@ for further details. On side note Cortex-A15 processes one byte in
+@ 16 cycles.
+
+@ Byte order [in]dependence. =========================================
+@
+@ Originally caller was expected to maintain specific *dword* order in
+@ h[0-7], namely with most significant dword at *lower* address, which
+@ was reflected in below two parameters as 0 and 4. Now caller is
+@ expected to maintain native byte order for whole 64-bit values.
+#ifndef __KERNEL__
+# include "arm_arch.h"
+# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
+# define VFP_ABI_POP vldmia sp!,{d8-d15}
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+#endif
+
+#ifdef __ARMEL__
+# define LO 0
+# define HI 4
+# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
+#else
+# define HI 0
+# define LO 4
+# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
+#endif
+
+.text
+#if __ARM_ARCH__<7
+.code 32
+#else
+.syntax unified
+# ifdef __thumb2__
+# define adrl adr
+.thumb
+# else
+.code 32
+# endif
+#endif
+
+.type K512,%object
+.align 5
+K512:
+WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+.size K512,.-K512
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha512_block_data_order
+.skip 32-4
+#else
+.skip 32
+#endif
+
+.global sha512_block_data_order
+.type sha512_block_data_order,%function
+sha512_block_data_order:
+#if __ARM_ARCH__<7
+ sub r3,pc,#8 @ sha512_block_data_order
+#else
+ adr r3,sha512_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#1
+ bne .LNEON
+#endif
+ add r2,r1,r2,lsl#7 @ len to point at the end of inp
+ stmdb sp!,{r4-r12,lr}
+ sub r14,r3,#672 @ K512
+ sub sp,sp,#9*8
+
+ ldr r7,[r0,#32+LO]
+ ldr r8,[r0,#32+HI]
+ ldr r9, [r0,#48+LO]
+ ldr r10, [r0,#48+HI]
+ ldr r11, [r0,#56+LO]
+ ldr r12, [r0,#56+HI]
+.Loop:
+ str r9, [sp,#48+0]
+ str r10, [sp,#48+4]
+ str r11, [sp,#56+0]
+ str r12, [sp,#56+4]
+ ldr r5,[r0,#0+LO]
+ ldr r6,[r0,#0+HI]
+ ldr r3,[r0,#8+LO]
+ ldr r4,[r0,#8+HI]
+ ldr r9, [r0,#16+LO]
+ ldr r10, [r0,#16+HI]
+ ldr r11, [r0,#24+LO]
+ ldr r12, [r0,#24+HI]
+ str r3,[sp,#8+0]
+ str r4,[sp,#8+4]
+ str r9, [sp,#16+0]
+ str r10, [sp,#16+4]
+ str r11, [sp,#24+0]
+ str r12, [sp,#24+4]
+ ldr r3,[r0,#40+LO]
+ ldr r4,[r0,#40+HI]
+ str r3,[sp,#40+0]
+ str r4,[sp,#40+4]
+
+.L00_15:
+#if __ARM_ARCH__<7
+ ldrb r3,[r1,#7]
+ ldrb r9, [r1,#6]
+ ldrb r10, [r1,#5]
+ ldrb r11, [r1,#4]
+ ldrb r4,[r1,#3]
+ ldrb r12, [r1,#2]
+ orr r3,r3,r9,lsl#8
+ ldrb r9, [r1,#1]
+ orr r3,r3,r10,lsl#16
+ ldrb r10, [r1],#8
+ orr r3,r3,r11,lsl#24
+ orr r4,r4,r12,lsl#8
+ orr r4,r4,r9,lsl#16
+ orr r4,r4,r10,lsl#24
+#else
+ ldr r3,[r1,#4]
+ ldr r4,[r1],#8
+#ifdef __ARMEL__
+ rev r3,r3
+ rev r4,r4
+#endif
+#endif
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov r9,r7,lsr#14
+ str r3,[sp,#64+0]
+ mov r10,r8,lsr#14
+ str r4,[sp,#64+4]
+ eor r9,r9,r8,lsl#18
+ ldr r11,[sp,#56+0] @ h.lo
+ eor r10,r10,r7,lsl#18
+ ldr r12,[sp,#56+4] @ h.hi
+ eor r9,r9,r7,lsr#18
+ eor r10,r10,r8,lsr#18
+ eor r9,r9,r8,lsl#14
+ eor r10,r10,r7,lsl#14
+ eor r9,r9,r8,lsr#9
+ eor r10,r10,r7,lsr#9
+ eor r9,r9,r7,lsl#23
+ eor r10,r10,r8,lsl#23 @ Sigma1(e)
+ adds r3,r3,r9
+ ldr r9,[sp,#40+0] @ f.lo
+ adc r4,r4,r10 @ T += Sigma1(e)
+ ldr r10,[sp,#40+4] @ f.hi
+ adds r3,r3,r11
+ ldr r11,[sp,#48+0] @ g.lo
+ adc r4,r4,r12 @ T += h
+ ldr r12,[sp,#48+4] @ g.hi
+
+ eor r9,r9,r11
+ str r7,[sp,#32+0]
+ eor r10,r10,r12
+ str r8,[sp,#32+4]
+ and r9,r9,r7
+ str r5,[sp,#0+0]
+ and r10,r10,r8
+ str r6,[sp,#0+4]
+ eor r9,r9,r11
+ ldr r11,[r14,#LO] @ K[i].lo
+ eor r10,r10,r12 @ Ch(e,f,g)
+ ldr r12,[r14,#HI] @ K[i].hi
+
+ adds r3,r3,r9
+ ldr r7,[sp,#24+0] @ d.lo
+ adc r4,r4,r10 @ T += Ch(e,f,g)
+ ldr r8,[sp,#24+4] @ d.hi
+ adds r3,r3,r11
+ and r9,r11,#0xff
+ adc r4,r4,r12 @ T += K[i]
+ adds r7,r7,r3
+ ldr r11,[sp,#8+0] @ b.lo
+ adc r8,r8,r4 @ d += T
+ teq r9,#148
+
+ ldr r12,[sp,#16+0] @ c.lo
+#if __ARM_ARCH__>=7
+ it eq @ Thumb2 thing, sanity check in ARM
+#endif
+ orreq r14,r14,#1
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov r9,r5,lsr#28
+ mov r10,r6,lsr#28
+ eor r9,r9,r6,lsl#4
+ eor r10,r10,r5,lsl#4
+ eor r9,r9,r6,lsr#2
+ eor r10,r10,r5,lsr#2
+ eor r9,r9,r5,lsl#30
+ eor r10,r10,r6,lsl#30
+ eor r9,r9,r6,lsr#7
+ eor r10,r10,r5,lsr#7
+ eor r9,r9,r5,lsl#25
+ eor r10,r10,r6,lsl#25 @ Sigma0(a)
+ adds r3,r3,r9
+ and r9,r5,r11
+ adc r4,r4,r10 @ T += Sigma0(a)
+
+ ldr r10,[sp,#8+4] @ b.hi
+ orr r5,r5,r11
+ ldr r11,[sp,#16+4] @ c.hi
+ and r5,r5,r12
+ and r12,r6,r10
+ orr r6,r6,r10
+ orr r5,r5,r9 @ Maj(a,b,c).lo
+ and r6,r6,r11
+ adds r5,r5,r3
+ orr r6,r6,r12 @ Maj(a,b,c).hi
+ sub sp,sp,#8
+ adc r6,r6,r4 @ h += T
+ tst r14,#1
+ add r14,r14,#8
+ tst r14,#1
+ beq .L00_15
+ ldr r9,[sp,#184+0]
+ ldr r10,[sp,#184+4]
+ bic r14,r14,#1
+.L16_79:
+ @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+ @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+ @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
+ mov r3,r9,lsr#1
+ ldr r11,[sp,#80+0]
+ mov r4,r10,lsr#1
+ ldr r12,[sp,#80+4]
+ eor r3,r3,r10,lsl#31
+ eor r4,r4,r9,lsl#31
+ eor r3,r3,r9,lsr#8
+ eor r4,r4,r10,lsr#8
+ eor r3,r3,r10,lsl#24
+ eor r4,r4,r9,lsl#24
+ eor r3,r3,r9,lsr#7
+ eor r4,r4,r10,lsr#7
+ eor r3,r3,r10,lsl#25
+
+ @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+ @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+ @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+ mov r9,r11,lsr#19
+ mov r10,r12,lsr#19
+ eor r9,r9,r12,lsl#13
+ eor r10,r10,r11,lsl#13
+ eor r9,r9,r12,lsr#29
+ eor r10,r10,r11,lsr#29
+ eor r9,r9,r11,lsl#3
+ eor r10,r10,r12,lsl#3
+ eor r9,r9,r11,lsr#6
+ eor r10,r10,r12,lsr#6
+ ldr r11,[sp,#120+0]
+ eor r9,r9,r12,lsl#26
+
+ ldr r12,[sp,#120+4]
+ adds r3,r3,r9
+ ldr r9,[sp,#192+0]
+ adc r4,r4,r10
+
+ ldr r10,[sp,#192+4]
+ adds r3,r3,r11
+ adc r4,r4,r12
+ adds r3,r3,r9
+ adc r4,r4,r10
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov r9,r7,lsr#14
+ str r3,[sp,#64+0]
+ mov r10,r8,lsr#14
+ str r4,[sp,#64+4]
+ eor r9,r9,r8,lsl#18
+ ldr r11,[sp,#56+0] @ h.lo
+ eor r10,r10,r7,lsl#18
+ ldr r12,[sp,#56+4] @ h.hi
+ eor r9,r9,r7,lsr#18
+ eor r10,r10,r8,lsr#18
+ eor r9,r9,r8,lsl#14
+ eor r10,r10,r7,lsl#14
+ eor r9,r9,r8,lsr#9
+ eor r10,r10,r7,lsr#9
+ eor r9,r9,r7,lsl#23
+ eor r10,r10,r8,lsl#23 @ Sigma1(e)
+ adds r3,r3,r9
+ ldr r9,[sp,#40+0] @ f.lo
+ adc r4,r4,r10 @ T += Sigma1(e)
+ ldr r10,[sp,#40+4] @ f.hi
+ adds r3,r3,r11
+ ldr r11,[sp,#48+0] @ g.lo
+ adc r4,r4,r12 @ T += h
+ ldr r12,[sp,#48+4] @ g.hi
+
+ eor r9,r9,r11
+ str r7,[sp,#32+0]
+ eor r10,r10,r12
+ str r8,[sp,#32+4]
+ and r9,r9,r7
+ str r5,[sp,#0+0]
+ and r10,r10,r8
+ str r6,[sp,#0+4]
+ eor r9,r9,r11
+ ldr r11,[r14,#LO] @ K[i].lo
+ eor r10,r10,r12 @ Ch(e,f,g)
+ ldr r12,[r14,#HI] @ K[i].hi
+
+ adds r3,r3,r9
+ ldr r7,[sp,#24+0] @ d.lo
+ adc r4,r4,r10 @ T += Ch(e,f,g)
+ ldr r8,[sp,#24+4] @ d.hi
+ adds r3,r3,r11
+ and r9,r11,#0xff
+ adc r4,r4,r12 @ T += K[i]
+ adds r7,r7,r3
+ ldr r11,[sp,#8+0] @ b.lo
+ adc r8,r8,r4 @ d += T
+ teq r9,#23
+
+ ldr r12,[sp,#16+0] @ c.lo
+#if __ARM_ARCH__>=7
+ it eq @ Thumb2 thing, sanity check in ARM
+#endif
+ orreq r14,r14,#1
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov r9,r5,lsr#28
+ mov r10,r6,lsr#28
+ eor r9,r9,r6,lsl#4
+ eor r10,r10,r5,lsl#4
+ eor r9,r9,r6,lsr#2
+ eor r10,r10,r5,lsr#2
+ eor r9,r9,r5,lsl#30
+ eor r10,r10,r6,lsl#30
+ eor r9,r9,r6,lsr#7
+ eor r10,r10,r5,lsr#7
+ eor r9,r9,r5,lsl#25
+ eor r10,r10,r6,lsl#25 @ Sigma0(a)
+ adds r3,r3,r9
+ and r9,r5,r11
+ adc r4,r4,r10 @ T += Sigma0(a)
+
+ ldr r10,[sp,#8+4] @ b.hi
+ orr r5,r5,r11
+ ldr r11,[sp,#16+4] @ c.hi
+ and r5,r5,r12
+ and r12,r6,r10
+ orr r6,r6,r10
+ orr r5,r5,r9 @ Maj(a,b,c).lo
+ and r6,r6,r11
+ adds r5,r5,r3
+ orr r6,r6,r12 @ Maj(a,b,c).hi
+ sub sp,sp,#8
+ adc r6,r6,r4 @ h += T
+ tst r14,#1
+ add r14,r14,#8
+#if __ARM_ARCH__>=7
+ ittt eq @ Thumb2 thing, sanity check in ARM
+#endif
+ ldreq r9,[sp,#184+0]
+ ldreq r10,[sp,#184+4]
+ beq .L16_79
+ bic r14,r14,#1
+
+ ldr r3,[sp,#8+0]
+ ldr r4,[sp,#8+4]
+ ldr r9, [r0,#0+LO]
+ ldr r10, [r0,#0+HI]
+ ldr r11, [r0,#8+LO]
+ ldr r12, [r0,#8+HI]
+ adds r9,r5,r9
+ str r9, [r0,#0+LO]
+ adc r10,r6,r10
+ str r10, [r0,#0+HI]
+ adds r11,r3,r11
+ str r11, [r0,#8+LO]
+ adc r12,r4,r12
+ str r12, [r0,#8+HI]
+
+ ldr r5,[sp,#16+0]
+ ldr r6,[sp,#16+4]
+ ldr r3,[sp,#24+0]
+ ldr r4,[sp,#24+4]
+ ldr r9, [r0,#16+LO]
+ ldr r10, [r0,#16+HI]
+ ldr r11, [r0,#24+LO]
+ ldr r12, [r0,#24+HI]
+ adds r9,r5,r9
+ str r9, [r0,#16+LO]
+ adc r10,r6,r10
+ str r10, [r0,#16+HI]
+ adds r11,r3,r11
+ str r11, [r0,#24+LO]
+ adc r12,r4,r12
+ str r12, [r0,#24+HI]
+
+ ldr r3,[sp,#40+0]
+ ldr r4,[sp,#40+4]
+ ldr r9, [r0,#32+LO]
+ ldr r10, [r0,#32+HI]
+ ldr r11, [r0,#40+LO]
+ ldr r12, [r0,#40+HI]
+ adds r7,r7,r9
+ str r7,[r0,#32+LO]
+ adc r8,r8,r10
+ str r8,[r0,#32+HI]
+ adds r11,r3,r11
+ str r11, [r0,#40+LO]
+ adc r12,r4,r12
+ str r12, [r0,#40+HI]
+
+ ldr r5,[sp,#48+0]
+ ldr r6,[sp,#48+4]
+ ldr r3,[sp,#56+0]
+ ldr r4,[sp,#56+4]
+ ldr r9, [r0,#48+LO]
+ ldr r10, [r0,#48+HI]
+ ldr r11, [r0,#56+LO]
+ ldr r12, [r0,#56+HI]
+ adds r9,r5,r9
+ str r9, [r0,#48+LO]
+ adc r10,r6,r10
+ str r10, [r0,#48+HI]
+ adds r11,r3,r11
+ str r11, [r0,#56+LO]
+ adc r12,r4,r12
+ str r12, [r0,#56+HI]
+
+ add sp,sp,#640
+ sub r14,r14,#640
+
+ teq r1,r2
+ bne .Loop
+
+ add sp,sp,#8*9 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size sha512_block_data_order,.-sha512_block_data_order
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.global sha512_block_data_order_neon
+.type sha512_block_data_order_neon,%function
+.align 4
+sha512_block_data_order_neon:
+.LNEON:
+ dmb @ errata #451034 on early Cortex A8
+ add r2,r1,r2,lsl#7 @ len to point at the end of inp
+ VFP_ABI_PUSH
+ adrl r3,K512
+ vldmia r0,{d16-d23} @ load context
+.Loop_neon:
+ vshr.u64 d24,d20,#14 @ 0
+#if 0<16
+ vld1.64 {d0},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d20,#18
+#if 0>0
+ vadd.i64 d16,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d20,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vmov d29,d20
+ vsli.64 d26,d20,#23
+#if 0<16 && defined(__ARMEL__)
+ vrev64.8 d0,d0
+#endif
+ veor d25,d24
+ vbsl d29,d21,d22 @ Ch(e,f,g)
+ vshr.u64 d24,d16,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d23
+ vshr.u64 d25,d16,#34
+ vsli.64 d24,d16,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d16,#39
+ vadd.i64 d28,d0
+ vsli.64 d25,d16,#30
+ veor d30,d16,d17
+ vsli.64 d26,d16,#25
+ veor d23,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d18,d17 @ Maj(a,b,c)
+ veor d23,d26 @ Sigma0(a)
+ vadd.i64 d19,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 1
+#if 1<16
+ vld1.64 {d1},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+#if 1>0
+ vadd.i64 d23,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vmov d29,d19
+ vsli.64 d26,d19,#23
+#if 1<16 && defined(__ARMEL__)
+ vrev64.8 d1,d1
+#endif
+ veor d25,d24
+ vbsl d29,d20,d21 @ Ch(e,f,g)
+ vshr.u64 d24,d23,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d22
+ vshr.u64 d25,d23,#34
+ vsli.64 d24,d23,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d23,#39
+ vadd.i64 d28,d1
+ vsli.64 d25,d23,#30
+ veor d30,d23,d16
+ vsli.64 d26,d23,#25
+ veor d22,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d17,d16 @ Maj(a,b,c)
+ veor d22,d26 @ Sigma0(a)
+ vadd.i64 d18,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d22,d30
+ vshr.u64 d24,d18,#14 @ 2
+#if 2<16
+ vld1.64 {d2},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d18,#18
+#if 2>0
+ vadd.i64 d22,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d18,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vmov d29,d18
+ vsli.64 d26,d18,#23
+#if 2<16 && defined(__ARMEL__)
+ vrev64.8 d2,d2
+#endif
+ veor d25,d24
+ vbsl d29,d19,d20 @ Ch(e,f,g)
+ vshr.u64 d24,d22,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d21
+ vshr.u64 d25,d22,#34
+ vsli.64 d24,d22,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d22,#39
+ vadd.i64 d28,d2
+ vsli.64 d25,d22,#30
+ veor d30,d22,d23
+ vsli.64 d26,d22,#25
+ veor d21,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d16,d23 @ Maj(a,b,c)
+ veor d21,d26 @ Sigma0(a)
+ vadd.i64 d17,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 3
+#if 3<16
+ vld1.64 {d3},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+#if 3>0
+ vadd.i64 d21,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vmov d29,d17
+ vsli.64 d26,d17,#23
+#if 3<16 && defined(__ARMEL__)
+ vrev64.8 d3,d3
+#endif
+ veor d25,d24
+ vbsl d29,d18,d19 @ Ch(e,f,g)
+ vshr.u64 d24,d21,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d20
+ vshr.u64 d25,d21,#34
+ vsli.64 d24,d21,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d21,#39
+ vadd.i64 d28,d3
+ vsli.64 d25,d21,#30
+ veor d30,d21,d22
+ vsli.64 d26,d21,#25
+ veor d20,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d23,d22 @ Maj(a,b,c)
+ veor d20,d26 @ Sigma0(a)
+ vadd.i64 d16,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d20,d30
+ vshr.u64 d24,d16,#14 @ 4
+#if 4<16
+ vld1.64 {d4},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d16,#18
+#if 4>0
+ vadd.i64 d20,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d16,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vmov d29,d16
+ vsli.64 d26,d16,#23
+#if 4<16 && defined(__ARMEL__)
+ vrev64.8 d4,d4
+#endif
+ veor d25,d24
+ vbsl d29,d17,d18 @ Ch(e,f,g)
+ vshr.u64 d24,d20,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d19
+ vshr.u64 d25,d20,#34
+ vsli.64 d24,d20,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d20,#39
+ vadd.i64 d28,d4
+ vsli.64 d25,d20,#30
+ veor d30,d20,d21
+ vsli.64 d26,d20,#25
+ veor d19,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d22,d21 @ Maj(a,b,c)
+ veor d19,d26 @ Sigma0(a)
+ vadd.i64 d23,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 5
+#if 5<16
+ vld1.64 {d5},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+#if 5>0
+ vadd.i64 d19,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vmov d29,d23
+ vsli.64 d26,d23,#23
+#if 5<16 && defined(__ARMEL__)
+ vrev64.8 d5,d5
+#endif
+ veor d25,d24
+ vbsl d29,d16,d17 @ Ch(e,f,g)
+ vshr.u64 d24,d19,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d18
+ vshr.u64 d25,d19,#34
+ vsli.64 d24,d19,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d19,#39
+ vadd.i64 d28,d5
+ vsli.64 d25,d19,#30
+ veor d30,d19,d20
+ vsli.64 d26,d19,#25
+ veor d18,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d21,d20 @ Maj(a,b,c)
+ veor d18,d26 @ Sigma0(a)
+ vadd.i64 d22,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d18,d30
+ vshr.u64 d24,d22,#14 @ 6
+#if 6<16
+ vld1.64 {d6},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d22,#18
+#if 6>0
+ vadd.i64 d18,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d22,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vmov d29,d22
+ vsli.64 d26,d22,#23
+#if 6<16 && defined(__ARMEL__)
+ vrev64.8 d6,d6
+#endif
+ veor d25,d24
+ vbsl d29,d23,d16 @ Ch(e,f,g)
+ vshr.u64 d24,d18,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d17
+ vshr.u64 d25,d18,#34
+ vsli.64 d24,d18,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d18,#39
+ vadd.i64 d28,d6
+ vsli.64 d25,d18,#30
+ veor d30,d18,d19
+ vsli.64 d26,d18,#25
+ veor d17,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d20,d19 @ Maj(a,b,c)
+ veor d17,d26 @ Sigma0(a)
+ vadd.i64 d21,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 7
+#if 7<16
+ vld1.64 {d7},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+#if 7>0
+ vadd.i64 d17,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vmov d29,d21
+ vsli.64 d26,d21,#23
+#if 7<16 && defined(__ARMEL__)
+ vrev64.8 d7,d7
+#endif
+ veor d25,d24
+ vbsl d29,d22,d23 @ Ch(e,f,g)
+ vshr.u64 d24,d17,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d16
+ vshr.u64 d25,d17,#34
+ vsli.64 d24,d17,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d17,#39
+ vadd.i64 d28,d7
+ vsli.64 d25,d17,#30
+ veor d30,d17,d18
+ vsli.64 d26,d17,#25
+ veor d16,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d19,d18 @ Maj(a,b,c)
+ veor d16,d26 @ Sigma0(a)
+ vadd.i64 d20,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d16,d30
+ vshr.u64 d24,d20,#14 @ 8
+#if 8<16
+ vld1.64 {d8},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d20,#18
+#if 8>0
+ vadd.i64 d16,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d20,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vmov d29,d20
+ vsli.64 d26,d20,#23
+#if 8<16 && defined(__ARMEL__)
+ vrev64.8 d8,d8
+#endif
+ veor d25,d24
+ vbsl d29,d21,d22 @ Ch(e,f,g)
+ vshr.u64 d24,d16,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d23
+ vshr.u64 d25,d16,#34
+ vsli.64 d24,d16,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d16,#39
+ vadd.i64 d28,d8
+ vsli.64 d25,d16,#30
+ veor d30,d16,d17
+ vsli.64 d26,d16,#25
+ veor d23,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d18,d17 @ Maj(a,b,c)
+ veor d23,d26 @ Sigma0(a)
+ vadd.i64 d19,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 9
+#if 9<16
+ vld1.64 {d9},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+#if 9>0
+ vadd.i64 d23,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vmov d29,d19
+ vsli.64 d26,d19,#23
+#if 9<16 && defined(__ARMEL__)
+ vrev64.8 d9,d9
+#endif
+ veor d25,d24
+ vbsl d29,d20,d21 @ Ch(e,f,g)
+ vshr.u64 d24,d23,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d22
+ vshr.u64 d25,d23,#34
+ vsli.64 d24,d23,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d23,#39
+ vadd.i64 d28,d9
+ vsli.64 d25,d23,#30
+ veor d30,d23,d16
+ vsli.64 d26,d23,#25
+ veor d22,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d17,d16 @ Maj(a,b,c)
+ veor d22,d26 @ Sigma0(a)
+ vadd.i64 d18,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d22,d30
+ vshr.u64 d24,d18,#14 @ 10
+#if 10<16
+ vld1.64 {d10},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d18,#18
+#if 10>0
+ vadd.i64 d22,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d18,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vmov d29,d18
+ vsli.64 d26,d18,#23
+#if 10<16 && defined(__ARMEL__)
+ vrev64.8 d10,d10
+#endif
+ veor d25,d24
+ vbsl d29,d19,d20 @ Ch(e,f,g)
+ vshr.u64 d24,d22,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d21
+ vshr.u64 d25,d22,#34
+ vsli.64 d24,d22,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d22,#39
+ vadd.i64 d28,d10
+ vsli.64 d25,d22,#30
+ veor d30,d22,d23
+ vsli.64 d26,d22,#25
+ veor d21,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d16,d23 @ Maj(a,b,c)
+ veor d21,d26 @ Sigma0(a)
+ vadd.i64 d17,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 11
+#if 11<16
+ vld1.64 {d11},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+#if 11>0
+ vadd.i64 d21,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vmov d29,d17
+ vsli.64 d26,d17,#23
+#if 11<16 && defined(__ARMEL__)
+ vrev64.8 d11,d11
+#endif
+ veor d25,d24
+ vbsl d29,d18,d19 @ Ch(e,f,g)
+ vshr.u64 d24,d21,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d20
+ vshr.u64 d25,d21,#34
+ vsli.64 d24,d21,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d21,#39
+ vadd.i64 d28,d11
+ vsli.64 d25,d21,#30
+ veor d30,d21,d22
+ vsli.64 d26,d21,#25
+ veor d20,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d23,d22 @ Maj(a,b,c)
+ veor d20,d26 @ Sigma0(a)
+ vadd.i64 d16,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d20,d30
+ vshr.u64 d24,d16,#14 @ 12
+#if 12<16
+ vld1.64 {d12},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d16,#18
+#if 12>0
+ vadd.i64 d20,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d16,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vmov d29,d16
+ vsli.64 d26,d16,#23
+#if 12<16 && defined(__ARMEL__)
+ vrev64.8 d12,d12
+#endif
+ veor d25,d24
+ vbsl d29,d17,d18 @ Ch(e,f,g)
+ vshr.u64 d24,d20,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d19
+ vshr.u64 d25,d20,#34
+ vsli.64 d24,d20,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d20,#39
+ vadd.i64 d28,d12
+ vsli.64 d25,d20,#30
+ veor d30,d20,d21
+ vsli.64 d26,d20,#25
+ veor d19,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d22,d21 @ Maj(a,b,c)
+ veor d19,d26 @ Sigma0(a)
+ vadd.i64 d23,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 13
+#if 13<16
+ vld1.64 {d13},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+#if 13>0
+ vadd.i64 d19,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vmov d29,d23
+ vsli.64 d26,d23,#23
+#if 13<16 && defined(__ARMEL__)
+ vrev64.8 d13,d13
+#endif
+ veor d25,d24
+ vbsl d29,d16,d17 @ Ch(e,f,g)
+ vshr.u64 d24,d19,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d18
+ vshr.u64 d25,d19,#34
+ vsli.64 d24,d19,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d19,#39
+ vadd.i64 d28,d13
+ vsli.64 d25,d19,#30
+ veor d30,d19,d20
+ vsli.64 d26,d19,#25
+ veor d18,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d21,d20 @ Maj(a,b,c)
+ veor d18,d26 @ Sigma0(a)
+ vadd.i64 d22,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d18,d30
+ vshr.u64 d24,d22,#14 @ 14
+#if 14<16
+ vld1.64 {d14},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d22,#18
+#if 14>0
+ vadd.i64 d18,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d22,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vmov d29,d22
+ vsli.64 d26,d22,#23
+#if 14<16 && defined(__ARMEL__)
+ vrev64.8 d14,d14
+#endif
+ veor d25,d24
+ vbsl d29,d23,d16 @ Ch(e,f,g)
+ vshr.u64 d24,d18,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d17
+ vshr.u64 d25,d18,#34
+ vsli.64 d24,d18,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d18,#39
+ vadd.i64 d28,d14
+ vsli.64 d25,d18,#30
+ veor d30,d18,d19
+ vsli.64 d26,d18,#25
+ veor d17,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d20,d19 @ Maj(a,b,c)
+ veor d17,d26 @ Sigma0(a)
+ vadd.i64 d21,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 15
+#if 15<16
+ vld1.64 {d15},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+#if 15>0
+ vadd.i64 d17,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vmov d29,d21
+ vsli.64 d26,d21,#23
+#if 15<16 && defined(__ARMEL__)
+ vrev64.8 d15,d15
+#endif
+ veor d25,d24
+ vbsl d29,d22,d23 @ Ch(e,f,g)
+ vshr.u64 d24,d17,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d16
+ vshr.u64 d25,d17,#34
+ vsli.64 d24,d17,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d17,#39
+ vadd.i64 d28,d15
+ vsli.64 d25,d17,#30
+ veor d30,d17,d18
+ vsli.64 d26,d17,#25
+ veor d16,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d19,d18 @ Maj(a,b,c)
+ veor d16,d26 @ Sigma0(a)
+ vadd.i64 d20,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d16,d30
+ mov r12,#4
+.L16_79_neon:
+ subs r12,#1
+ vshr.u64 q12,q7,#19
+ vshr.u64 q13,q7,#61
+ vadd.i64 d16,d30 @ h+=Maj from the past
+ vshr.u64 q15,q7,#6
+ vsli.64 q12,q7,#45
+ vext.8 q14,q0,q1,#8 @ X[i+1]
+ vsli.64 q13,q7,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q0,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q4,q5,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d20,#14 @ from NEON_00_15
+ vadd.i64 q0,q14
+ vshr.u64 d25,d20,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d20,#41 @ from NEON_00_15
+ vadd.i64 q0,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vmov d29,d20
+ vsli.64 d26,d20,#23
+#if 16<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d21,d22 @ Ch(e,f,g)
+ vshr.u64 d24,d16,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d23
+ vshr.u64 d25,d16,#34
+ vsli.64 d24,d16,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d16,#39
+ vadd.i64 d28,d0
+ vsli.64 d25,d16,#30
+ veor d30,d16,d17
+ vsli.64 d26,d16,#25
+ veor d23,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d18,d17 @ Maj(a,b,c)
+ veor d23,d26 @ Sigma0(a)
+ vadd.i64 d19,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 17
+#if 17<16
+ vld1.64 {d1},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+#if 17>0
+ vadd.i64 d23,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vmov d29,d19
+ vsli.64 d26,d19,#23
+#if 17<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d20,d21 @ Ch(e,f,g)
+ vshr.u64 d24,d23,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d22
+ vshr.u64 d25,d23,#34
+ vsli.64 d24,d23,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d23,#39
+ vadd.i64 d28,d1
+ vsli.64 d25,d23,#30
+ veor d30,d23,d16
+ vsli.64 d26,d23,#25
+ veor d22,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d17,d16 @ Maj(a,b,c)
+ veor d22,d26 @ Sigma0(a)
+ vadd.i64 d18,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d22,d30
+ vshr.u64 q12,q0,#19
+ vshr.u64 q13,q0,#61
+ vadd.i64 d22,d30 @ h+=Maj from the past
+ vshr.u64 q15,q0,#6
+ vsli.64 q12,q0,#45
+ vext.8 q14,q1,q2,#8 @ X[i+1]
+ vsli.64 q13,q0,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q1,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q5,q6,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d18,#14 @ from NEON_00_15
+ vadd.i64 q1,q14
+ vshr.u64 d25,d18,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d18,#41 @ from NEON_00_15
+ vadd.i64 q1,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vmov d29,d18
+ vsli.64 d26,d18,#23
+#if 18<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d19,d20 @ Ch(e,f,g)
+ vshr.u64 d24,d22,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d21
+ vshr.u64 d25,d22,#34
+ vsli.64 d24,d22,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d22,#39
+ vadd.i64 d28,d2
+ vsli.64 d25,d22,#30
+ veor d30,d22,d23
+ vsli.64 d26,d22,#25
+ veor d21,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d16,d23 @ Maj(a,b,c)
+ veor d21,d26 @ Sigma0(a)
+ vadd.i64 d17,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 19
+#if 19<16
+ vld1.64 {d3},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+#if 19>0
+ vadd.i64 d21,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vmov d29,d17
+ vsli.64 d26,d17,#23
+#if 19<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d18,d19 @ Ch(e,f,g)
+ vshr.u64 d24,d21,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d20
+ vshr.u64 d25,d21,#34
+ vsli.64 d24,d21,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d21,#39
+ vadd.i64 d28,d3
+ vsli.64 d25,d21,#30
+ veor d30,d21,d22
+ vsli.64 d26,d21,#25
+ veor d20,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d23,d22 @ Maj(a,b,c)
+ veor d20,d26 @ Sigma0(a)
+ vadd.i64 d16,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d20,d30
+ vshr.u64 q12,q1,#19
+ vshr.u64 q13,q1,#61
+ vadd.i64 d20,d30 @ h+=Maj from the past
+ vshr.u64 q15,q1,#6
+ vsli.64 q12,q1,#45
+ vext.8 q14,q2,q3,#8 @ X[i+1]
+ vsli.64 q13,q1,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q2,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q6,q7,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d16,#14 @ from NEON_00_15
+ vadd.i64 q2,q14
+ vshr.u64 d25,d16,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d16,#41 @ from NEON_00_15
+ vadd.i64 q2,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vmov d29,d16
+ vsli.64 d26,d16,#23
+#if 20<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d17,d18 @ Ch(e,f,g)
+ vshr.u64 d24,d20,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d19
+ vshr.u64 d25,d20,#34
+ vsli.64 d24,d20,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d20,#39
+ vadd.i64 d28,d4
+ vsli.64 d25,d20,#30
+ veor d30,d20,d21
+ vsli.64 d26,d20,#25
+ veor d19,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d22,d21 @ Maj(a,b,c)
+ veor d19,d26 @ Sigma0(a)
+ vadd.i64 d23,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 21
+#if 21<16
+ vld1.64 {d5},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+#if 21>0
+ vadd.i64 d19,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vmov d29,d23
+ vsli.64 d26,d23,#23
+#if 21<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d16,d17 @ Ch(e,f,g)
+ vshr.u64 d24,d19,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d18
+ vshr.u64 d25,d19,#34
+ vsli.64 d24,d19,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d19,#39
+ vadd.i64 d28,d5
+ vsli.64 d25,d19,#30
+ veor d30,d19,d20
+ vsli.64 d26,d19,#25
+ veor d18,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d21,d20 @ Maj(a,b,c)
+ veor d18,d26 @ Sigma0(a)
+ vadd.i64 d22,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d18,d30
+ vshr.u64 q12,q2,#19
+ vshr.u64 q13,q2,#61
+ vadd.i64 d18,d30 @ h+=Maj from the past
+ vshr.u64 q15,q2,#6
+ vsli.64 q12,q2,#45
+ vext.8 q14,q3,q4,#8 @ X[i+1]
+ vsli.64 q13,q2,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q3,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q7,q0,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d22,#14 @ from NEON_00_15
+ vadd.i64 q3,q14
+ vshr.u64 d25,d22,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d22,#41 @ from NEON_00_15
+ vadd.i64 q3,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vmov d29,d22
+ vsli.64 d26,d22,#23
+#if 22<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d23,d16 @ Ch(e,f,g)
+ vshr.u64 d24,d18,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d17
+ vshr.u64 d25,d18,#34
+ vsli.64 d24,d18,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d18,#39
+ vadd.i64 d28,d6
+ vsli.64 d25,d18,#30
+ veor d30,d18,d19
+ vsli.64 d26,d18,#25
+ veor d17,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d20,d19 @ Maj(a,b,c)
+ veor d17,d26 @ Sigma0(a)
+ vadd.i64 d21,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 23
+#if 23<16
+ vld1.64 {d7},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+#if 23>0
+ vadd.i64 d17,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vmov d29,d21
+ vsli.64 d26,d21,#23
+#if 23<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d22,d23 @ Ch(e,f,g)
+ vshr.u64 d24,d17,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d16
+ vshr.u64 d25,d17,#34
+ vsli.64 d24,d17,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d17,#39
+ vadd.i64 d28,d7
+ vsli.64 d25,d17,#30
+ veor d30,d17,d18
+ vsli.64 d26,d17,#25
+ veor d16,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d19,d18 @ Maj(a,b,c)
+ veor d16,d26 @ Sigma0(a)
+ vadd.i64 d20,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d16,d30
+ vshr.u64 q12,q3,#19
+ vshr.u64 q13,q3,#61
+ vadd.i64 d16,d30 @ h+=Maj from the past
+ vshr.u64 q15,q3,#6
+ vsli.64 q12,q3,#45
+ vext.8 q14,q4,q5,#8 @ X[i+1]
+ vsli.64 q13,q3,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q4,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q0,q1,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d20,#14 @ from NEON_00_15
+ vadd.i64 q4,q14
+ vshr.u64 d25,d20,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d20,#41 @ from NEON_00_15
+ vadd.i64 q4,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vmov d29,d20
+ vsli.64 d26,d20,#23
+#if 24<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d21,d22 @ Ch(e,f,g)
+ vshr.u64 d24,d16,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d23
+ vshr.u64 d25,d16,#34
+ vsli.64 d24,d16,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d16,#39
+ vadd.i64 d28,d8
+ vsli.64 d25,d16,#30
+ veor d30,d16,d17
+ vsli.64 d26,d16,#25
+ veor d23,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d18,d17 @ Maj(a,b,c)
+ veor d23,d26 @ Sigma0(a)
+ vadd.i64 d19,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 25
+#if 25<16
+ vld1.64 {d9},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+#if 25>0
+ vadd.i64 d23,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vmov d29,d19
+ vsli.64 d26,d19,#23
+#if 25<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d20,d21 @ Ch(e,f,g)
+ vshr.u64 d24,d23,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d22
+ vshr.u64 d25,d23,#34
+ vsli.64 d24,d23,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d23,#39
+ vadd.i64 d28,d9
+ vsli.64 d25,d23,#30
+ veor d30,d23,d16
+ vsli.64 d26,d23,#25
+ veor d22,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d17,d16 @ Maj(a,b,c)
+ veor d22,d26 @ Sigma0(a)
+ vadd.i64 d18,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d22,d30
+ vshr.u64 q12,q4,#19
+ vshr.u64 q13,q4,#61
+ vadd.i64 d22,d30 @ h+=Maj from the past
+ vshr.u64 q15,q4,#6
+ vsli.64 q12,q4,#45
+ vext.8 q14,q5,q6,#8 @ X[i+1]
+ vsli.64 q13,q4,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q5,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q1,q2,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d18,#14 @ from NEON_00_15
+ vadd.i64 q5,q14
+ vshr.u64 d25,d18,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d18,#41 @ from NEON_00_15
+ vadd.i64 q5,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vmov d29,d18
+ vsli.64 d26,d18,#23
+#if 26<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d19,d20 @ Ch(e,f,g)
+ vshr.u64 d24,d22,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d21
+ vshr.u64 d25,d22,#34
+ vsli.64 d24,d22,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d22,#39
+ vadd.i64 d28,d10
+ vsli.64 d25,d22,#30
+ veor d30,d22,d23
+ vsli.64 d26,d22,#25
+ veor d21,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d16,d23 @ Maj(a,b,c)
+ veor d21,d26 @ Sigma0(a)
+ vadd.i64 d17,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 27
+#if 27<16
+ vld1.64 {d11},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+#if 27>0
+ vadd.i64 d21,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vmov d29,d17
+ vsli.64 d26,d17,#23
+#if 27<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d18,d19 @ Ch(e,f,g)
+ vshr.u64 d24,d21,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d20
+ vshr.u64 d25,d21,#34
+ vsli.64 d24,d21,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d21,#39
+ vadd.i64 d28,d11
+ vsli.64 d25,d21,#30
+ veor d30,d21,d22
+ vsli.64 d26,d21,#25
+ veor d20,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d23,d22 @ Maj(a,b,c)
+ veor d20,d26 @ Sigma0(a)
+ vadd.i64 d16,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d20,d30
+ vshr.u64 q12,q5,#19
+ vshr.u64 q13,q5,#61
+ vadd.i64 d20,d30 @ h+=Maj from the past
+ vshr.u64 q15,q5,#6
+ vsli.64 q12,q5,#45
+ vext.8 q14,q6,q7,#8 @ X[i+1]
+ vsli.64 q13,q5,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q6,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q2,q3,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d16,#14 @ from NEON_00_15
+ vadd.i64 q6,q14
+ vshr.u64 d25,d16,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d16,#41 @ from NEON_00_15
+ vadd.i64 q6,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vmov d29,d16
+ vsli.64 d26,d16,#23
+#if 28<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d17,d18 @ Ch(e,f,g)
+ vshr.u64 d24,d20,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d19
+ vshr.u64 d25,d20,#34
+ vsli.64 d24,d20,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d20,#39
+ vadd.i64 d28,d12
+ vsli.64 d25,d20,#30
+ veor d30,d20,d21
+ vsli.64 d26,d20,#25
+ veor d19,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d22,d21 @ Maj(a,b,c)
+ veor d19,d26 @ Sigma0(a)
+ vadd.i64 d23,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 29
+#if 29<16
+ vld1.64 {d13},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+#if 29>0
+ vadd.i64 d19,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vmov d29,d23
+ vsli.64 d26,d23,#23
+#if 29<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d16,d17 @ Ch(e,f,g)
+ vshr.u64 d24,d19,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d18
+ vshr.u64 d25,d19,#34
+ vsli.64 d24,d19,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d19,#39
+ vadd.i64 d28,d13
+ vsli.64 d25,d19,#30
+ veor d30,d19,d20
+ vsli.64 d26,d19,#25
+ veor d18,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d21,d20 @ Maj(a,b,c)
+ veor d18,d26 @ Sigma0(a)
+ vadd.i64 d22,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d18,d30
+ vshr.u64 q12,q6,#19
+ vshr.u64 q13,q6,#61
+ vadd.i64 d18,d30 @ h+=Maj from the past
+ vshr.u64 q15,q6,#6
+ vsli.64 q12,q6,#45
+ vext.8 q14,q7,q0,#8 @ X[i+1]
+ vsli.64 q13,q6,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q7,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q3,q4,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d22,#14 @ from NEON_00_15
+ vadd.i64 q7,q14
+ vshr.u64 d25,d22,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d22,#41 @ from NEON_00_15
+ vadd.i64 q7,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vmov d29,d22
+ vsli.64 d26,d22,#23
+#if 30<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d23,d16 @ Ch(e,f,g)
+ vshr.u64 d24,d18,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d17
+ vshr.u64 d25,d18,#34
+ vsli.64 d24,d18,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d18,#39
+ vadd.i64 d28,d14
+ vsli.64 d25,d18,#30
+ veor d30,d18,d19
+ vsli.64 d26,d18,#25
+ veor d17,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d20,d19 @ Maj(a,b,c)
+ veor d17,d26 @ Sigma0(a)
+ vadd.i64 d21,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 31
+#if 31<16
+ vld1.64 {d15},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+#if 31>0
+ vadd.i64 d17,d30 @ h+=Maj from the past
+#endif
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vmov d29,d21
+ vsli.64 d26,d21,#23
+#if 31<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ veor d25,d24
+ vbsl d29,d22,d23 @ Ch(e,f,g)
+ vshr.u64 d24,d17,#28
+ veor d26,d25 @ Sigma1(e)
+ vadd.i64 d27,d29,d16
+ vshr.u64 d25,d17,#34
+ vsli.64 d24,d17,#36
+ vadd.i64 d27,d26
+ vshr.u64 d26,d17,#39
+ vadd.i64 d28,d15
+ vsli.64 d25,d17,#30
+ veor d30,d17,d18
+ vsli.64 d26,d17,#25
+ veor d16,d24,d25
+ vadd.i64 d27,d28
+ vbsl d30,d19,d18 @ Maj(a,b,c)
+ veor d16,d26 @ Sigma0(a)
+ vadd.i64 d20,d27
+ vadd.i64 d30,d27
+ @ vadd.i64 d16,d30
+ bne .L16_79_neon
+
+ vadd.i64 d16,d30 @ h+=Maj from the past
+ vldmia r0,{d24-d31} @ load context to temp
+ vadd.i64 q8,q12 @ vectorized accumulate
+ vadd.i64 q9,q13
+ vadd.i64 q10,q14
+ vadd.i64 q11,q15
+ vstmia r0,{d16-d23} @ save context
+ teq r1,r2
+ sub r3,#640 @ rewind K512
+ bne .Loop_neon
+
+ VFP_ABI_POP
+ bx lr @ .word 0xe12fff1e
+.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
+#endif
+.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by "
+.align 2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
new file mode 100644
index 000000000000..269a394e4a53
--- /dev/null
+++ b/arch/arm/crypto/sha512-glue.c
@@ -0,0 +1,121 @@
+/*
+ * sha512-glue.c - accelerated SHA-384/512 for ARM
+ *
+ * Copyright (C) 2015 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include "sha512.h"
+
+MODULE_DESCRIPTION("Accelerated SHA-384/SHA-512 secure hash for ARM");
+MODULE_AUTHOR("Ard Biesheuvel ");
+MODULE_LICENSE("GPL v2");
+
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha384-arm");
+MODULE_ALIAS_CRYPTO("sha512-arm");
+
+asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
+
+int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order);
+}
+
+int sha512_arm_final(struct shash_desc *desc, u8 *out)
+{
+ sha512_base_do_finalize(desc,
+ (sha512_block_fn *)sha512_block_data_order);
+ return sha512_base_finish(desc, out);
+}
+
+int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order);
+ return sha512_arm_final(desc, out);
+}
+
+static struct shash_alg sha512_arm_algs[] = { {
+ .init = sha384_base_init,
+ .update = sha512_arm_update,
+ .final = sha512_arm_final,
+ .finup = sha512_arm_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-arm",
+ .cra_priority = 250,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+}, {
+ .init = sha512_base_init,
+ .update = sha512_arm_update,
+ .final = sha512_arm_final,
+ .finup = sha512_arm_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-arm",
+ .cra_priority = 250,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init sha512_arm_mod_init(void)
+{
+ int err;
+
+ err = crypto_register_shashes(sha512_arm_algs,
+ ARRAY_SIZE(sha512_arm_algs));
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) {
+ err = crypto_register_shashes(sha512_neon_algs,
+ ARRAY_SIZE(sha512_neon_algs));
+ if (err)
+ goto err_unregister;
+ }
+ return 0;
+
+err_unregister:
+ crypto_unregister_shashes(sha512_arm_algs,
+ ARRAY_SIZE(sha512_arm_algs));
+
+ return err;
+}
+
+static void __exit sha512_arm_mod_fini(void)
+{
+ crypto_unregister_shashes(sha512_arm_algs,
+ ARRAY_SIZE(sha512_arm_algs));
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon())
+ crypto_unregister_shashes(sha512_neon_algs,
+ ARRAY_SIZE(sha512_neon_algs));
+}
+
+module_init(sha512_arm_mod_init);
+module_exit(sha512_arm_mod_fini);
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
new file mode 100644
index 000000000000..32693684a3ab
--- /dev/null
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -0,0 +1,98 @@
+/*
+ * sha512-neon-glue.c - accelerated SHA-384/512 for ARM NEON
+ *
+ * Copyright (C) 2015 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include "sha512.h"
+
+MODULE_ALIAS_CRYPTO("sha384-neon");
+MODULE_ALIAS_CRYPTO("sha512-neon");
+
+asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src,
+ int blocks);
+
+static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ if (!may_use_simd() ||
+ (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
+ return sha512_arm_update(desc, data, len);
+
+ kernel_neon_begin();
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order_neon);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd())
+ return sha512_arm_finup(desc, data, len, out);
+
+ kernel_neon_begin();
+ if (len)
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order_neon);
+ sha512_base_do_finalize(desc,
+ (sha512_block_fn *)sha512_block_data_order_neon);
+ kernel_neon_end();
+
+ return sha512_base_finish(desc, out);
+}
+
+static int sha512_neon_final(struct shash_desc *desc, u8 *out)
+{
+ return sha512_neon_finup(desc, NULL, 0, out);
+}
+
+struct shash_alg sha512_neon_algs[] = { {
+ .init = sha384_base_init,
+ .update = sha512_neon_update,
+ .final = sha512_neon_final,
+ .finup = sha512_neon_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA384_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "sha384-neon",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+
+ }
+}, {
+ .init = sha512_base_init,
+ .update = sha512_neon_update,
+ .final = sha512_neon_final,
+ .finup = sha512_neon_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA512_DIGEST_SIZE,
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "sha512-neon",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
diff --git a/arch/arm/crypto/sha512.h b/arch/arm/crypto/sha512.h
new file mode 100644
index 000000000000..a75d9a82988a
--- /dev/null
+++ b/arch/arm/crypto/sha512.h
@@ -0,0 +1,8 @@
+
+int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
+
+extern struct shash_alg sha512_neon_algs[2];
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
deleted file mode 100644
index b124dce838d6..000000000000
--- a/arch/arm/crypto/sha512_neon_glue.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Glue code for the SHA512 Secure Hash Algorithm assembly implementation
- * using NEON instructions.
- *
- * Copyright © 2014 Jussi Kivilinna
- *
- * This file is based on sha512_ssse3_glue.c:
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-
-static const u64 sha512_k[] = {
- 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
- 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
- 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
- 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
- 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
- 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
- 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
- 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
- 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
- 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
- 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
- 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
- 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
- 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
- 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
- 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
- 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
- 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
- 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
- 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
- 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
- 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
- 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
- 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
- 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
- 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
- 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
- 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
- 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
- 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
- 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
- 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
- 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
- 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
- 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
- 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
- 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
- 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
- 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
- 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
-};
-
-
-asmlinkage void sha512_transform_neon(u64 *digest, const void *data,
- const u64 k[], unsigned int num_blks);
-
-
-static int sha512_neon_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA512_H0;
- sctx->state[1] = SHA512_H1;
- sctx->state[2] = SHA512_H2;
- sctx->state[3] = SHA512_H3;
- sctx->state[4] = SHA512_H4;
- sctx->state[5] = SHA512_H5;
- sctx->state[6] = SHA512_H6;
- sctx->state[7] = SHA512_H7;
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
-
-static int __sha512_neon_update(struct shash_desc *desc, const u8 *data,
- unsigned int len, unsigned int partial)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int done = 0;
-
- sctx->count[0] += len;
- if (sctx->count[0] < len)
- sctx->count[1]++;
-
- if (partial) {
- done = SHA512_BLOCK_SIZE - partial;
- memcpy(sctx->buf + partial, data, done);
- sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1);
- }
-
- if (len - done >= SHA512_BLOCK_SIZE) {
- const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
-
- sha512_transform_neon(sctx->state, data + done, sha512_k,
- rounds);
-
- done += rounds * SHA512_BLOCK_SIZE;
- }
-
- memcpy(sctx->buf, data + done, len - done);
-
- return 0;
-}
-
-static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
- int res;
-
- /* Handle the fast case right here */
- if (partial + len < SHA512_BLOCK_SIZE) {
- sctx->count[0] += len;
- if (sctx->count[0] < len)
- sctx->count[1]++;
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
- }
-
- if (!may_use_simd()) {
- res = crypto_sha512_update(desc, data, len);
- } else {
- kernel_neon_begin();
- res = __sha512_neon_update(desc, data, len, partial);
- kernel_neon_end();
- }
-
- return res;
-}
-
-
-/* Add padding and return the message digest. */
-static int sha512_neon_final(struct shash_desc *desc, u8 *out)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int i, index, padlen;
- __be64 *dst = (__be64 *)out;
- __be64 bits[2];
- static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
-
- /* save number of bits */
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
-
- /* Pad out to 112 mod 128 and append length */
- index = sctx->count[0] & 0x7f;
- padlen = (index < 112) ? (112 - index) : ((128+112) - index);
-
- if (!may_use_simd()) {
- crypto_sha512_update(desc, padding, padlen);
- crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits));
- } else {
- kernel_neon_begin();
- /* We need to fill a whole block for __sha512_neon_update() */
- if (padlen <= 112) {
- sctx->count[0] += padlen;
- if (sctx->count[0] < padlen)
- sctx->count[1]++;
- memcpy(sctx->buf + index, padding, padlen);
- } else {
- __sha512_neon_update(desc, padding, padlen, index);
- }
- __sha512_neon_update(desc, (const u8 *)&bits,
- sizeof(bits), 112);
- kernel_neon_end();
- }
-
- /* Store state in digest */
- for (i = 0; i < 8; i++)
- dst[i] = cpu_to_be64(sctx->state[i]);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_neon_export(struct shash_desc *desc, void *out)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_neon_import(struct shash_desc *desc, const void *in)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha384_neon_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA384_H0;
- sctx->state[1] = SHA384_H1;
- sctx->state[2] = SHA384_H2;
- sctx->state[3] = SHA384_H3;
- sctx->state[4] = SHA384_H4;
- sctx->state[5] = SHA384_H5;
- sctx->state[6] = SHA384_H6;
- sctx->state[7] = SHA384_H7;
-
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
-
-static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
-{
- u8 D[SHA512_DIGEST_SIZE];
-
- sha512_neon_final(desc, D);
-
- memcpy(hash, D, SHA384_DIGEST_SIZE);
- memzero_explicit(D, SHA512_DIGEST_SIZE);
-
- return 0;
-}
-
-static struct shash_alg algs[] = { {
- .digestsize = SHA512_DIGEST_SIZE,
- .init = sha512_neon_init,
- .update = sha512_neon_update,
- .final = sha512_neon_final,
- .export = sha512_neon_export,
- .import = sha512_neon_import,
- .descsize = sizeof(struct sha512_state),
- .statesize = sizeof(struct sha512_state),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "sha512-neon",
- .cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-}, {
- .digestsize = SHA384_DIGEST_SIZE,
- .init = sha384_neon_init,
- .update = sha512_neon_update,
- .final = sha384_neon_final,
- .export = sha512_neon_export,
- .import = sha512_neon_import,
- .descsize = sizeof(struct sha512_state),
- .statesize = sizeof(struct sha512_state),
- .base = {
- .cra_name = "sha384",
- .cra_driver_name = "sha384-neon",
- .cra_priority = 250,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA384_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-} };
-
-static int __init sha512_neon_mod_init(void)
-{
- if (!cpu_has_neon())
- return -ENODEV;
-
- return crypto_register_shashes(algs, ARRAY_SIZE(algs));
-}
-
-static void __exit sha512_neon_mod_fini(void)
-{
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
-}
-
-module_init(sha512_neon_mod_init);
-module_exit(sha512_neon_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
-
-MODULE_ALIAS_CRYPTO("sha512");
-MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 6c348df5bf36..3303e8a7b837 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -13,7 +13,7 @@
#include
#include
#include
-#include
+#include
#include
#include "aes-ce-setkey.h"
diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c
index 12dccdb38286..af4c712f7afc 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-md5.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c
@@ -69,10 +69,10 @@ static int octeon_md5_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- mctx->hash[0] = cpu_to_le32(0x67452301);
- mctx->hash[1] = cpu_to_le32(0xefcdab89);
- mctx->hash[2] = cpu_to_le32(0x98badcfe);
- mctx->hash[3] = cpu_to_le32(0x10325476);
+ mctx->hash[0] = cpu_to_le32(MD5_H0);
+ mctx->hash[1] = cpu_to_le32(MD5_H1);
+ mctx->hash[2] = cpu_to_le32(MD5_H2);
+ mctx->hash[3] = cpu_to_le32(MD5_H3);
mctx->byte_count = 0;
return 0;
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index 7f4547418ee1..be186a75f622 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -8,6 +8,7 @@
* for more details.
*/
+#include
#include
#include
#include
@@ -106,6 +107,7 @@ cycles_t get_cycles(void)
{
return nios2_timer_read(&nios2_cs.cs);
}
+EXPORT_SYMBOL(get_cycles);
static void nios2_timer_start(struct nios2_timer *timer)
{
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index 452fb4dc575f..92289679b4c4 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -37,10 +37,10 @@ static int ppc_md5_init(struct shash_desc *desc)
{
struct md5_state *sctx = shash_desc_ctx(desc);
- sctx->hash[0] = 0x67452301;
- sctx->hash[1] = 0xefcdab89;
- sctx->hash[2] = 0x98badcfe;
- sctx->hash[3] = 0x10325476;
+ sctx->hash[0] = MD5_H0;
+ sctx->hash[1] = MD5_H1;
+ sctx->hash[2] = MD5_H2;
+ sctx->hash[3] = MD5_H3;
sctx->byte_count = 0;
return 0;
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
new file mode 100644
index 000000000000..9f8402b35115
--- /dev/null
+++ b/arch/powerpc/include/asm/icswx.h
@@ -0,0 +1,184 @@
+/*
+ * ICSWX api
+ *
+ * Copyright (C) 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This provides the Initiate Coprocessor Store Word Indexed (ICSWX)
+ * instruction. This instruction is used to communicate with PowerPC
+ * coprocessors. This also provides definitions of the structures used
+ * to communicate with the coprocessor.
+ *
+ * The RFC02130: Coprocessor Architecture document is the reference for
+ * everything in this file unless otherwise noted.
+ */
+#ifndef _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
+#define _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_
+
+#include /* for PPC_ICSWX */
+
+/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */
+
+#define CCB_VALUE (0x3fffffffffffffff)
+#define CCB_ADDRESS (0xfffffffffffffff8)
+#define CCB_CM (0x0000000000000007)
+#define CCB_CM0 (0x0000000000000004)
+#define CCB_CM12 (0x0000000000000003)
+
+#define CCB_CM0_ALL_COMPLETIONS (0x0)
+#define CCB_CM0_LAST_IN_CHAIN (0x4)
+#define CCB_CM12_STORE (0x0)
+#define CCB_CM12_INTERRUPT (0x1)
+
+#define CCB_SIZE (0x10)
+#define CCB_ALIGN CCB_SIZE
+
+struct coprocessor_completion_block {
+ __be64 value;
+ __be64 address;
+} __packed __aligned(CCB_ALIGN);
+
+
+/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */
+
+#define CSB_V (0x80)
+#define CSB_F (0x04)
+#define CSB_CH (0x03)
+#define CSB_CE_INCOMPLETE (0x80)
+#define CSB_CE_TERMINATION (0x40)
+#define CSB_CE_TPBC (0x20)
+
+#define CSB_CC_SUCCESS (0)
+#define CSB_CC_INVALID_ALIGN (1)
+#define CSB_CC_OPERAND_OVERLAP (2)
+#define CSB_CC_DATA_LENGTH (3)
+#define CSB_CC_TRANSLATION (5)
+#define CSB_CC_PROTECTION (6)
+#define CSB_CC_RD_EXTERNAL (7)
+#define CSB_CC_INVALID_OPERAND (8)
+#define CSB_CC_PRIVILEGE (9)
+#define CSB_CC_INTERNAL (10)
+#define CSB_CC_WR_EXTERNAL (12)
+#define CSB_CC_NOSPC (13)
+#define CSB_CC_EXCESSIVE_DDE (14)
+#define CSB_CC_WR_TRANSLATION (15)
+#define CSB_CC_WR_PROTECTION (16)
+#define CSB_CC_UNKNOWN_CODE (17)
+#define CSB_CC_ABORT (18)
+#define CSB_CC_TRANSPORT (20)
+#define CSB_CC_SEGMENTED_DDL (31)
+#define CSB_CC_PROGRESS_POINT (32)
+#define CSB_CC_DDE_OVERFLOW (33)
+#define CSB_CC_SESSION (34)
+#define CSB_CC_PROVISION (36)
+#define CSB_CC_CHAIN (37)
+#define CSB_CC_SEQUENCE (38)
+#define CSB_CC_HW (39)
+
+#define CSB_SIZE (0x10)
+#define CSB_ALIGN CSB_SIZE
+
+struct coprocessor_status_block {
+ u8 flags;
+ u8 cs;
+ u8 cc;
+ u8 ce;
+ __be32 count;
+ __be64 address;
+} __packed __aligned(CSB_ALIGN);
+
+
+/* Chapter 6.5.10 Data-Descriptor List (DDL)
+ * each list contains one or more Data-Descriptor Entries (DDE)
+ */
+
+#define DDE_P (0x8000)
+
+#define DDE_SIZE (0x10)
+#define DDE_ALIGN DDE_SIZE
+
+struct data_descriptor_entry {
+ __be16 flags;
+ u8 count;
+ u8 index;
+ __be32 length;
+ __be64 address;
+} __packed __aligned(DDE_ALIGN);
+
+
+/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
+
+#define CRB_SIZE (0x80)
+#define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */
+
+/* Coprocessor Status Block field
+ * ADDRESS address of CSB
+ * C CCB is valid
+ * AT 0 = addrs are virtual, 1 = addrs are phys
+ * M enable perf monitor
+ */
+#define CRB_CSB_ADDRESS (0xfffffffffffffff0)
+#define CRB_CSB_C (0x0000000000000008)
+#define CRB_CSB_AT (0x0000000000000002)
+#define CRB_CSB_M (0x0000000000000001)
+
+struct coprocessor_request_block {
+ __be32 ccw;
+ __be32 flags;
+ __be64 csb_addr;
+
+ struct data_descriptor_entry source;
+ struct data_descriptor_entry target;
+
+ struct coprocessor_completion_block ccb;
+
+ u8 reserved[48];
+
+ struct coprocessor_status_block csb;
+} __packed __aligned(CRB_ALIGN);
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1.1.1 RS
+ * Chapter 8.2.3 Coprocessor Directive
+ * Chapter 8.2.4 Execution
+ *
+ * The CCW must be converted to BE before passing to icswx()
+ */
+
+#define CCW_PS (0xff000000)
+#define CCW_CT (0x00ff0000)
+#define CCW_CD (0x0000ffff)
+#define CCW_CL (0x0000c000)
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1 Initiate Coprocessor Store Word Indexed (ICSWX)
+ * Chapter 8.2.4.1 Condition Register 0
+ */
+
+#define ICSWX_INITIATED (0x8)
+#define ICSWX_BUSY (0x4)
+#define ICSWX_REJECTED (0x2)
+
+static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
+{
+ __be64 ccw_reg = ccw;
+ u32 cr;
+
+ __asm__ __volatile__(
+ PPC_ICSWX(%1,0,%2) "\n"
+ "mfcr %0\n"
+ : "=r" (cr)
+ : "r" (ccw_reg), "r" (crb)
+ : "cr0", "memory");
+
+ return (int)((cr >> 28) & 0xf);
+}
+
+
+#endif /* _ARCH_POWERPC_INCLUDE_ASM_ICSWX_H_ */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 5c93f691b495..8452335661a5 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -136,6 +136,8 @@
#define PPC_INST_DCBAL 0x7c2005ec
#define PPC_INST_DCBZL 0x7c2007ec
#define PPC_INST_ICBT 0x7c00002c
+#define PPC_INST_ICSWX 0x7c00032d
+#define PPC_INST_ICSWEPX 0x7c00076d
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
@@ -403,4 +405,15 @@
#define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \
TMRN(tmr) | ___PPC_RT(r))
+/* Coprocessor instructions */
+#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_INST_ICSWX | \
+ ___PPC_RS(s) | \
+ ___PPC_RA(a) | \
+ ___PPC_RB(b))
+#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_INST_ICSWEPX | \
+ ___PPC_RS(s) | \
+ ___PPC_RA(a) | \
+ ___PPC_RB(b))
+
+
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 308c5e15676b..ea2cea7eaef1 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -800,6 +800,7 @@ int of_get_ibm_chip_id(struct device_node *np)
}
return -1;
}
+EXPORT_SYMBOL(of_get_ibm_chip_id);
/**
* cpu_to_chip_id - Return the cpus chip-id
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index b688731d7ede..c9d2b922734b 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -33,10 +33,10 @@ static int md5_sparc64_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- mctx->hash[0] = cpu_to_le32(0x67452301);
- mctx->hash[1] = cpu_to_le32(0xefcdab89);
- mctx->hash[2] = cpu_to_le32(0x98badcfe);
- mctx->hash[3] = cpu_to_le32(0x10325476);
+ mctx->hash[0] = cpu_to_le32(MD5_H0);
+ mctx->hash[1] = cpu_to_le32(MD5_H1);
+ mctx->hash[2] = cpu_to_le32(MD5_H2);
+ mctx->hash[3] = cpu_to_le32(MD5_H3);
mctx->byte_count = 0;
return 0;
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index b419f43ce0c5..2bfc8a7c88c1 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -44,15 +44,19 @@
#endif
+#define AESNI_ALIGN 16
+#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
+#define RFC4106_HASH_SUBKEY_SIZE 16
+
/* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location.
* This needs to be 16 byte aligned.
*/
struct aesni_rfc4106_gcm_ctx {
- u8 hash_subkey[16];
- struct crypto_aes_ctx aes_key_expanded;
+ u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+ struct crypto_aes_ctx aes_key_expanded
+ __attribute__ ((__aligned__(AESNI_ALIGN)));
u8 nonce[4];
- struct cryptd_aead *cryptd_tfm;
};
struct aesni_gcm_set_hash_subkey_result {
@@ -66,10 +70,6 @@ struct aesni_hash_subkey_req_data {
struct scatterlist sg;
};
-#define AESNI_ALIGN (16)
-#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
-#define RFC4106_HASH_SUBKEY_SIZE 16
-
struct aesni_lrw_ctx {
struct lrw_table_ctx lrw_table;
u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
@@ -283,10 +283,11 @@ static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
- return
- (struct aesni_rfc4106_gcm_ctx *)
- PTR_ALIGN((u8 *)
- crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
+ unsigned long align = AESNI_ALIGN;
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+ return PTR_ALIGN(crypto_aead_ctx(tfm), align);
}
#endif
@@ -790,36 +791,30 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
#endif
#ifdef CONFIG_X86_64
-static int rfc4106_init(struct crypto_tfm *tfm)
+static int rfc4106_init(struct crypto_aead *aead)
{
struct cryptd_aead *cryptd_tfm;
- struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
- PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
- struct crypto_aead *cryptd_child;
- struct aesni_rfc4106_gcm_ctx *child_ctx;
+ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
+
cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
- cryptd_child = cryptd_aead_child(cryptd_tfm);
- child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
- memcpy(child_ctx, ctx, sizeof(*ctx));
- ctx->cryptd_tfm = cryptd_tfm;
- tfm->crt_aead.reqsize = sizeof(struct aead_request)
- + crypto_aead_reqsize(&cryptd_tfm->base);
+ *ctx = cryptd_tfm;
+ crypto_aead_set_reqsize(
+ aead,
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(&cryptd_tfm->base));
return 0;
}
-static void rfc4106_exit(struct crypto_tfm *tfm)
+static void rfc4106_exit(struct crypto_aead *aead)
{
- struct aesni_rfc4106_gcm_ctx *ctx =
- (struct aesni_rfc4106_gcm_ctx *)
- PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
- if (!IS_ERR(ctx->cryptd_tfm))
- cryptd_free_aead(ctx->cryptd_tfm);
- return;
+ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
+
+ cryptd_free_aead(*ctx);
}
static void
@@ -845,8 +840,6 @@ rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
if (IS_ERR(ctr_tfm))
return PTR_ERR(ctr_tfm);
- crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
-
ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
if (ret)
goto out_free_ablkcipher;
@@ -895,73 +888,29 @@ out_free_ablkcipher:
static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
unsigned int key_len)
{
- int ret = 0;
- struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
- u8 *new_key_align, *new_key_mem = NULL;
if (key_len < 4) {
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
/*Account for 4 byte nonce at the end.*/
key_len -= 4;
- if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256) {
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
- /*This must be on a 16 byte boundary!*/
- if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
- return -EINVAL;
- if ((unsigned long)key % AESNI_ALIGN) {
- /*key is not aligned: use an auxuliar aligned pointer*/
- new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
- if (!new_key_mem)
- return -ENOMEM;
-
- new_key_align = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
- memcpy(new_key_align, key, key_len);
- key = new_key_align;
- }
-
- if (!irq_fpu_usable())
- ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
- key, key_len);
- else {
- kernel_fpu_begin();
- ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
- kernel_fpu_end();
- }
- /*This must be on a 16 byte boundary!*/
- if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
- ret = -EINVAL;
- goto exit;
- }
- ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
-exit:
- kfree(new_key_mem);
- return ret;
+ return aes_set_key_common(crypto_aead_tfm(aead),
+ &ctx->aes_key_expanded, key, key_len) ?:
+ rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
unsigned int key_len)
{
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
- struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
- struct aesni_rfc4106_gcm_ctx *c_ctx = aesni_rfc4106_gcm_ctx_get(child);
- struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm;
- int ret;
+ struct cryptd_aead **ctx = crypto_aead_ctx(parent);
+ struct cryptd_aead *cryptd_tfm = *ctx;
- ret = crypto_aead_setkey(child, key, key_len);
- if (!ret) {
- memcpy(ctx, c_ctx, sizeof(*ctx));
- ctx->cryptd_tfm = cryptd_tfm;
- }
- return ret;
+ return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
}
static int common_rfc4106_set_authsize(struct crypto_aead *aead,
@@ -975,7 +924,7 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
default:
return -EINVAL;
}
- crypto_aead_crt(aead)->authsize = authsize;
+
return 0;
}
@@ -984,30 +933,23 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
static int rfc4106_set_authsize(struct crypto_aead *parent,
unsigned int authsize)
{
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
- struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
- int ret;
+ struct cryptd_aead **ctx = crypto_aead_ctx(parent);
+ struct cryptd_aead *cryptd_tfm = *ctx;
- ret = crypto_aead_setauthsize(child, authsize);
- if (!ret)
- crypto_aead_crt(parent)->authsize = authsize;
- return ret;
+ return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
}
-static int __driver_rfc4106_encrypt(struct aead_request *req)
+static int helper_rfc4106_encrypt(struct aead_request *req)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
- u32 key_len = ctx->aes_key_expanded.key_length;
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- u8 iv_tab[16+AESNI_ALIGN];
- u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
+ u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
struct scatter_walk src_sg_walk;
- struct scatter_walk assoc_sg_walk;
struct scatter_walk dst_sg_walk;
unsigned int i;
@@ -1016,12 +958,6 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
/* to 8 or 12 bytes */
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
return -EINVAL;
- if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
- return -EINVAL;
- if (unlikely(key_len != AES_KEYSIZE_128 &&
- key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256))
- return -EINVAL;
/* IV below built */
for (i = 0; i < 4; i++)
@@ -1030,55 +966,57 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
- if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+ if (sg_is_last(req->src) &&
+ req->src->offset + req->src->length <= PAGE_SIZE &&
+ sg_is_last(req->dst) &&
+ req->dst->offset + req->dst->length <= PAGE_SIZE) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
- scatterwalk_start(&assoc_sg_walk, req->assoc);
- src = scatterwalk_map(&src_sg_walk);
- assoc = scatterwalk_map(&assoc_sg_walk);
+ assoc = scatterwalk_map(&src_sg_walk);
+ src = assoc + req->assoclen;
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk);
+ dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
}
-
} else {
/* Allocate memory for src, dst, assoc */
- src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
+ assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
GFP_ATOMIC);
- if (unlikely(!src))
+ if (unlikely(!assoc))
return -ENOMEM;
- assoc = (src + req->cryptlen + auth_tag_len);
- scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
- scatterwalk_map_and_copy(assoc, req->assoc, 0,
- req->assoclen, 0);
+ scatterwalk_map_and_copy(assoc, req->src, 0,
+ req->assoclen + req->cryptlen, 0);
+ src = assoc + req->assoclen;
dst = src;
}
+ kernel_fpu_begin();
aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+ ((unsigned long)req->cryptlen), auth_tag_len);
+ kernel_fpu_end();
/* The authTag (aka the Integrity Check Value) needs to be written
* back to the packet. */
if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst);
- scatterwalk_done(&dst_sg_walk, 0, 0);
+ scatterwalk_unmap(dst - req->assoclen);
+ scatterwalk_advance(&dst_sg_walk, req->dst->length);
+ scatterwalk_done(&dst_sg_walk, 1, 0);
}
- scatterwalk_unmap(src);
scatterwalk_unmap(assoc);
- scatterwalk_done(&src_sg_walk, 0, 0);
- scatterwalk_done(&assoc_sg_walk, 0, 0);
+ scatterwalk_advance(&src_sg_walk, req->src->length);
+ scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
} else {
- scatterwalk_map_and_copy(dst, req->dst, 0,
- req->cryptlen + auth_tag_len, 1);
- kfree(src);
+ scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
+ req->cryptlen + auth_tag_len, 1);
+ kfree(assoc);
}
return 0;
}
-static int __driver_rfc4106_decrypt(struct aead_request *req)
+static int helper_rfc4106_decrypt(struct aead_request *req)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
@@ -1087,26 +1025,16 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
int retval = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
- u32 key_len = ctx->aes_key_expanded.key_length;
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- u8 iv_and_authTag[32+AESNI_ALIGN];
- u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
- u8 *authTag = iv + 16;
+ u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+ u8 authTag[16];
struct scatter_walk src_sg_walk;
- struct scatter_walk assoc_sg_walk;
struct scatter_walk dst_sg_walk;
unsigned int i;
- if (unlikely((req->cryptlen < auth_tag_len) ||
- (req->assoclen != 8 && req->assoclen != 12)))
+ if (unlikely(req->assoclen != 8 && req->assoclen != 12))
return -EINVAL;
- if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
- return -EINVAL;
- if (unlikely(key_len != AES_KEYSIZE_128 &&
- key_len != AES_KEYSIZE_192 &&
- key_len != AES_KEYSIZE_256))
- return -EINVAL;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length */
@@ -1120,33 +1048,36 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
- if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+ if (sg_is_last(req->src) &&
+ req->src->offset + req->src->length <= PAGE_SIZE &&
+ sg_is_last(req->dst) &&
+ req->dst->offset + req->dst->length <= PAGE_SIZE) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
- scatterwalk_start(&assoc_sg_walk, req->assoc);
- src = scatterwalk_map(&src_sg_walk);
- assoc = scatterwalk_map(&assoc_sg_walk);
+ assoc = scatterwalk_map(&src_sg_walk);
+ src = assoc + req->assoclen;
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk);
+ dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
}
} else {
/* Allocate memory for src, dst, assoc */
- src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
- if (!src)
+ assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
+ if (!assoc)
return -ENOMEM;
- assoc = (src + req->cryptlen);
- scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
- scatterwalk_map_and_copy(assoc, req->assoc, 0,
- req->assoclen, 0);
+ scatterwalk_map_and_copy(assoc, req->src, 0,
+ req->assoclen + req->cryptlen, 0);
+ src = assoc + req->assoclen;
dst = src;
}
+ kernel_fpu_begin();
aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
authTag, auth_tag_len);
+ kernel_fpu_end();
/* Compare generated tag with passed in tag. */
retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
@@ -1154,90 +1085,59 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst);
- scatterwalk_done(&dst_sg_walk, 0, 0);
+ scatterwalk_unmap(dst - req->assoclen);
+ scatterwalk_advance(&dst_sg_walk, req->dst->length);
+ scatterwalk_done(&dst_sg_walk, 1, 0);
}
- scatterwalk_unmap(src);
scatterwalk_unmap(assoc);
- scatterwalk_done(&src_sg_walk, 0, 0);
- scatterwalk_done(&assoc_sg_walk, 0, 0);
+ scatterwalk_advance(&src_sg_walk, req->src->length);
+ scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
} else {
- scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
- kfree(src);
+ scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
+ tempCipherLen, 1);
+ kfree(assoc);
}
return retval;
}
static int rfc4106_encrypt(struct aead_request *req)
{
- int ret;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
+ struct cryptd_aead *cryptd_tfm = *ctx;
+ struct aead_request *subreq = aead_request_ctx(req);
- if (!irq_fpu_usable()) {
- struct aead_request *cryptd_req =
- (struct aead_request *) aead_request_ctx(req);
+ aead_request_set_tfm(subreq, irq_fpu_usable() ?
+ cryptd_aead_child(cryptd_tfm) :
+ &cryptd_tfm->base);
- memcpy(cryptd_req, req, sizeof(*req));
- aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
- ret = crypto_aead_encrypt(cryptd_req);
- } else {
- kernel_fpu_begin();
- ret = __driver_rfc4106_encrypt(req);
- kernel_fpu_end();
- }
- return ret;
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return crypto_aead_encrypt(subreq);
}
static int rfc4106_decrypt(struct aead_request *req)
{
- int ret;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
+ struct cryptd_aead *cryptd_tfm = *ctx;
+ struct aead_request *subreq = aead_request_ctx(req);
- if (!irq_fpu_usable()) {
- struct aead_request *cryptd_req =
- (struct aead_request *) aead_request_ctx(req);
+ aead_request_set_tfm(subreq, irq_fpu_usable() ?
+ cryptd_aead_child(cryptd_tfm) :
+ &cryptd_tfm->base);
- memcpy(cryptd_req, req, sizeof(*req));
- aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
- ret = crypto_aead_decrypt(cryptd_req);
- } else {
- kernel_fpu_begin();
- ret = __driver_rfc4106_decrypt(req);
- kernel_fpu_end();
- }
- return ret;
-}
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
-static int helper_rfc4106_encrypt(struct aead_request *req)
-{
- int ret;
-
- if (unlikely(!irq_fpu_usable())) {
- WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
- ret = -EINVAL;
- } else {
- kernel_fpu_begin();
- ret = __driver_rfc4106_encrypt(req);
- kernel_fpu_end();
- }
- return ret;
-}
-
-static int helper_rfc4106_decrypt(struct aead_request *req)
-{
- int ret;
-
- if (unlikely(!irq_fpu_usable())) {
- WARN_ONCE(1, "__gcm-aes-aesni alg used in invalid context");
- ret = -EINVAL;
- } else {
- kernel_fpu_begin();
- ret = __driver_rfc4106_decrypt(req);
- kernel_fpu_end();
- }
- return ret;
+ return crypto_aead_decrypt(subreq);
}
#endif
@@ -1410,51 +1310,6 @@ static struct crypto_alg aesni_algs[] = { {
.geniv = "chainiv",
},
},
-}, {
- .cra_name = "__gcm-aes-aesni",
- .cra_driver_name = "__driver-gcm-aes-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
- AESNI_ALIGN,
- .cra_alignmask = 0,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .aead = {
- .setkey = common_rfc4106_set_key,
- .setauthsize = common_rfc4106_set_authsize,
- .encrypt = helper_rfc4106_encrypt,
- .decrypt = helper_rfc4106_decrypt,
- .ivsize = 8,
- .maxauthsize = 16,
- },
- },
-}, {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) +
- AESNI_ALIGN,
- .cra_alignmask = 0,
- .cra_type = &crypto_nivaead_type,
- .cra_module = THIS_MODULE,
- .cra_init = rfc4106_init,
- .cra_exit = rfc4106_exit,
- .cra_u = {
- .aead = {
- .setkey = rfc4106_set_key,
- .setauthsize = rfc4106_set_authsize,
- .encrypt = rfc4106_encrypt,
- .decrypt = rfc4106_decrypt,
- .geniv = "seqiv",
- .ivsize = 8,
- .maxauthsize = 16,
- },
- },
#endif
#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
}, {
@@ -1569,6 +1424,46 @@ static struct crypto_alg aesni_algs[] = { {
},
} };
+#ifdef CONFIG_X86_64
+static struct aead_alg aesni_aead_algs[] = { {
+ .setkey = common_rfc4106_set_key,
+ .setauthsize = common_rfc4106_set_authsize,
+ .encrypt = helper_rfc4106_encrypt,
+ .decrypt = helper_rfc4106_decrypt,
+ .ivsize = 8,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "__gcm-aes-aesni",
+ .cra_driver_name = "__driver-gcm-aes-aesni",
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
+ .cra_alignmask = AESNI_ALIGN - 1,
+ .cra_module = THIS_MODULE,
+ },
+}, {
+ .init = rfc4106_init,
+ .exit = rfc4106_exit,
+ .setkey = rfc4106_set_key,
+ .setauthsize = rfc4106_set_authsize,
+ .encrypt = rfc4106_encrypt,
+ .decrypt = rfc4106_decrypt,
+ .ivsize = 8,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aesni",
+ .cra_priority = 400,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct cryptd_aead *),
+ .cra_module = THIS_MODULE,
+ },
+} };
+#else
+static struct aead_alg aesni_aead_algs[0];
+#endif
+
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AES),
@@ -1616,11 +1511,27 @@ static int __init aesni_init(void)
if (err)
return err;
- return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+ err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+ if (err)
+ goto fpu_exit;
+
+ err = crypto_register_aeads(aesni_aead_algs,
+ ARRAY_SIZE(aesni_aead_algs));
+ if (err)
+ goto unregister_algs;
+
+ return err;
+
+unregister_algs:
+ crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
+fpu_exit:
+ crypto_fpu_exit();
+ return err;
}
static void __exit aesni_exit(void)
{
+ crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
crypto_fpu_exit();
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index 5a2f30f9f52d..e7d679e2a018 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -156,7 +156,7 @@ int __init crypto_fpu_init(void)
return crypto_register_template(&crypto_fpu_tmpl);
}
-void __exit crypto_fpu_exit(void)
+void crypto_fpu_exit(void)
{
crypto_unregister_template(&crypto_fpu_tmpl);
}
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index f53ed1dc88ea..a841e9765bd6 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -882,7 +882,8 @@ static int __init sha1_mb_mod_init(void)
INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
cpu_state->cpu = cpu;
cpu_state->alg_state = &sha1_mb_alg_state;
- cpu_state->mgr = (struct sha1_ctx_mgr *) kzalloc(sizeof(struct sha1_ctx_mgr), GFP_KERNEL);
+ cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
+ GFP_KERNEL);
if (!cpu_state->mgr)
goto err2;
sha1_ctx_mgr_init(cpu_state->mgr);
diff --git a/crypto/842.c b/crypto/842.c
index b48f4f108c47..98e387efb8c8 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -1,5 +1,5 @@
/*
- * Cryptographic API for the 842 compression algorithm.
+ * Cryptographic API for the 842 software compression algorithm.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,173 +11,73 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * Copyright (C) IBM Corporation, 2011-2015
*
- * Copyright (C) IBM Corporation, 2011
+ * Original Authors: Robert Jennings
+ * Seth Jennings
*
- * Authors: Robert Jennings
- * Seth Jennings
+ * Rewrite: Dan Streetman
+ *
+ * This is the software implementation of compression and decompression using
+ * the 842 format. This uses the software 842 library at lib/842/ which is
+ * only a reference implementation, and is very, very slow as compared to other
+ * software compressors. You probably do not want to use this software
+ * compression. If you have access to the PowerPC 842 compression hardware, you
+ * want to use the 842 hardware compression interface, which is at:
+ * drivers/crypto/nx/nx-842-crypto.c
*/
#include
#include
#include
-#include
-#include
-#include
-#include
+#include
-static int nx842_uselzo;
-
-struct nx842_ctx {
- void *nx842_wmem; /* working memory for 842/lzo */
+struct crypto842_ctx {
+ char wmem[SW842_MEM_COMPRESS]; /* working memory for compress */
};
-enum nx842_crypto_type {
- NX842_CRYPTO_TYPE_842,
- NX842_CRYPTO_TYPE_LZO
-};
-
-#define NX842_SENTINEL 0xdeadbeef
-
-struct nx842_crypto_header {
- unsigned int sentinel; /* debug */
- enum nx842_crypto_type type;
-};
-
-static int nx842_init(struct crypto_tfm *tfm)
+static int crypto842_compress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
- struct nx842_ctx *ctx = crypto_tfm_ctx(tfm);
- int wmemsize;
+ struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
- wmemsize = max_t(int, nx842_get_workmem_size(), LZO1X_MEM_COMPRESS);
- ctx->nx842_wmem = kmalloc(wmemsize, GFP_NOFS);
- if (!ctx->nx842_wmem)
- return -ENOMEM;
-
- return 0;
+ return sw842_compress(src, slen, dst, dlen, ctx->wmem);
}
-static void nx842_exit(struct crypto_tfm *tfm)
+static int crypto842_decompress(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
{
- struct nx842_ctx *ctx = crypto_tfm_ctx(tfm);
-
- kfree(ctx->nx842_wmem);
-}
-
-static void nx842_reset_uselzo(unsigned long data)
-{
- nx842_uselzo = 0;
-}
-
-static DEFINE_TIMER(failover_timer, nx842_reset_uselzo, 0, 0);
-
-static int nx842_crypto_compress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct nx842_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr;
- unsigned int tmp_len = *dlen;
- size_t lzodlen; /* needed for lzo */
- int err;
-
- *dlen = 0;
- hdr = (struct nx842_crypto_header *)dst;
- hdr->sentinel = NX842_SENTINEL; /* debug */
- dst += sizeof(struct nx842_crypto_header);
- tmp_len -= sizeof(struct nx842_crypto_header);
- lzodlen = tmp_len;
-
- if (likely(!nx842_uselzo)) {
- err = nx842_compress(src, slen, dst, &tmp_len, ctx->nx842_wmem);
-
- if (likely(!err)) {
- hdr->type = NX842_CRYPTO_TYPE_842;
- *dlen = tmp_len + sizeof(struct nx842_crypto_header);
- return 0;
- }
-
- /* hardware failed */
- nx842_uselzo = 1;
-
- /* set timer to check for hardware again in 1 second */
- mod_timer(&failover_timer, jiffies + msecs_to_jiffies(1000));
- }
-
- /* no hardware, use lzo */
- err = lzo1x_1_compress(src, slen, dst, &lzodlen, ctx->nx842_wmem);
- if (err != LZO_E_OK)
- return -EINVAL;
-
- hdr->type = NX842_CRYPTO_TYPE_LZO;
- *dlen = lzodlen + sizeof(struct nx842_crypto_header);
- return 0;
-}
-
-static int nx842_crypto_decompress(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen)
-{
- struct nx842_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_crypto_header *hdr;
- unsigned int tmp_len = *dlen;
- size_t lzodlen; /* needed for lzo */
- int err;
-
- *dlen = 0;
- hdr = (struct nx842_crypto_header *)src;
-
- if (unlikely(hdr->sentinel != NX842_SENTINEL))
- return -EINVAL;
-
- src += sizeof(struct nx842_crypto_header);
- slen -= sizeof(struct nx842_crypto_header);
-
- if (likely(hdr->type == NX842_CRYPTO_TYPE_842)) {
- err = nx842_decompress(src, slen, dst, &tmp_len,
- ctx->nx842_wmem);
- if (err)
- return -EINVAL;
- *dlen = tmp_len;
- } else if (hdr->type == NX842_CRYPTO_TYPE_LZO) {
- lzodlen = tmp_len;
- err = lzo1x_decompress_safe(src, slen, dst, &lzodlen);
- if (err != LZO_E_OK)
- return -EINVAL;
- *dlen = lzodlen;
- } else
- return -EINVAL;
-
- return 0;
+ return sw842_decompress(src, slen, dst, dlen);
}
static struct crypto_alg alg = {
.cra_name = "842",
+ .cra_driver_name = "842-generic",
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_ctx),
+ .cra_ctxsize = sizeof(struct crypto842_ctx),
.cra_module = THIS_MODULE,
- .cra_init = nx842_init,
- .cra_exit = nx842_exit,
.cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
+ .coa_compress = crypto842_compress,
+ .coa_decompress = crypto842_decompress } }
};
-static int __init nx842_mod_init(void)
+static int __init crypto842_mod_init(void)
{
- del_timer(&failover_timer);
return crypto_register_alg(&alg);
}
+module_init(crypto842_mod_init);
-static void __exit nx842_mod_exit(void)
+static void __exit crypto842_mod_exit(void)
{
crypto_unregister_alg(&alg);
}
-
-module_init(nx842_mod_init);
-module_exit(nx842_mod_exit);
+module_exit(crypto842_mod_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("842 Compression Algorithm");
+MODULE_DESCRIPTION("842 Software Compression Algorithm");
MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-generic");
+MODULE_AUTHOR("Dan Streetman ");
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 362905e7c841..b4cfc5754033 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -78,6 +78,10 @@ config CRYPTO_RNG2
tristate
select CRYPTO_ALGAPI2
+config CRYPTO_RNG_DEFAULT
+ tristate
+ select CRYPTO_DRBG_MENU
+
config CRYPTO_PCOMP
tristate
select CRYPTO_PCOMP2
@@ -87,6 +91,23 @@ config CRYPTO_PCOMP2
tristate
select CRYPTO_ALGAPI2
+config CRYPTO_AKCIPHER2
+ tristate
+ select CRYPTO_ALGAPI2
+
+config CRYPTO_AKCIPHER
+ tristate
+ select CRYPTO_AKCIPHER2
+ select CRYPTO_ALGAPI
+
+config CRYPTO_RSA
+ tristate "RSA algorithm"
+ select CRYPTO_AKCIPHER
+ select MPILIB
+ select ASN1
+ help
+ Generic implementation of the RSA public key algorithm.
+
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_MANAGER2
@@ -100,6 +121,7 @@ config CRYPTO_MANAGER2
select CRYPTO_HASH2
select CRYPTO_BLKCIPHER2
select CRYPTO_PCOMP2
+ select CRYPTO_AKCIPHER2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
@@ -217,15 +239,39 @@ config CRYPTO_GCM
Support for Galois/Counter Mode (GCM) and Galois Message
Authentication Code (GMAC). Required for IPSec.
+config CRYPTO_CHACHA20POLY1305
+ tristate "ChaCha20-Poly1305 AEAD support"
+ select CRYPTO_CHACHA20
+ select CRYPTO_POLY1305
+ select CRYPTO_AEAD
+ help
+ ChaCha20-Poly1305 AEAD support, RFC7539.
+
+ Support for the AEAD wrapper using the ChaCha20 stream cipher combined
+ with the Poly1305 authenticator. It is defined in RFC7539 for use in
+ IETF protocols.
+
config CRYPTO_SEQIV
tristate "Sequence Number IV Generator"
select CRYPTO_AEAD
select CRYPTO_BLKCIPHER
- select CRYPTO_RNG
+ select CRYPTO_NULL
+ select CRYPTO_RNG_DEFAULT
help
This IV generator generates an IV based on a sequence number by
xoring it with a salt. This algorithm is mainly useful for CTR
+config CRYPTO_ECHAINIV
+ tristate "Encrypted Chain IV Generator"
+ select CRYPTO_AEAD
+ select CRYPTO_NULL
+ select CRYPTO_RNG_DEFAULT
+ default m
+ help
+ This IV generator generates an IV based on the encryption of
+ a sequence number xored with a salt. This is the default
+ algorithm for CBC.
+
comment "Block modes"
config CRYPTO_CBC
@@ -415,6 +461,15 @@ config CRYPTO_GHASH
help
GHASH is message digest algorithm for GCM (Galois/Counter Mode).
+config CRYPTO_POLY1305
+ tristate "Poly1305 authenticator algorithm"
+ help
+ Poly1305 authenticator algorithm, RFC7539.
+
+ Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
+ It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
+ in IETF protocols. This is the portable C implementation of Poly1305.
+
config CRYPTO_MD4
tristate "MD4 digest algorithm"
select CRYPTO_HASH
@@ -1145,6 +1200,19 @@ config CRYPTO_SALSA20_X86_64
The Salsa20 stream cipher algorithm is designed by Daniel J.
Bernstein . See
+config CRYPTO_CHACHA20
+ tristate "ChaCha20 cipher algorithm"
+ select CRYPTO_BLKCIPHER
+ help
+ ChaCha20 cipher algorithm, RFC7539.
+
+ ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
+ Bernstein and further specified in RFC7539 for use in IETF protocols.
+ This is the portable C implementation of ChaCha20.
+
+ See also:
+
+
config CRYPTO_SEED
tristate "SEED cipher algorithm"
select CRYPTO_ALGAPI
@@ -1412,10 +1480,9 @@ config CRYPTO_LZO
config CRYPTO_842
tristate "842 compression algorithm"
- depends on CRYPTO_DEV_NX_COMPRESS
- # 842 uses lzo if the hardware becomes unavailable
- select LZO_COMPRESS
- select LZO_DECOMPRESS
+ select CRYPTO_ALGAPI
+ select 842_COMPRESS
+ select 842_DECOMPRESS
help
This is the 842 algorithm.
@@ -1439,7 +1506,6 @@ comment "Random Number Generation"
config CRYPTO_ANSI_CPRNG
tristate "Pseudo Random Number Generation for Cryptographic modules"
- default m
select CRYPTO_AES
select CRYPTO_RNG
help
@@ -1457,15 +1523,14 @@ menuconfig CRYPTO_DRBG_MENU
if CRYPTO_DRBG_MENU
config CRYPTO_DRBG_HMAC
- bool "Enable HMAC DRBG"
+ bool
default y
select CRYPTO_HMAC
- help
- Enable the HMAC DRBG variant as defined in NIST SP800-90A.
+ select CRYPTO_SHA256
config CRYPTO_DRBG_HASH
bool "Enable Hash DRBG"
- select CRYPTO_HASH
+ select CRYPTO_SHA256
help
Enable the Hash DRBG variant as defined in NIST SP800-90A.
@@ -1477,11 +1542,21 @@ config CRYPTO_DRBG_CTR
config CRYPTO_DRBG
tristate
- default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR)
+ default CRYPTO_DRBG_MENU
select CRYPTO_RNG
+ select CRYPTO_JITTERENTROPY
endif # if CRYPTO_DRBG_MENU
+config CRYPTO_JITTERENTROPY
+ tristate "Jitterentropy Non-Deterministic Random Number Generator"
+ help
+ The Jitterentropy RNG is a noise that is intended
+ to provide seed to another RNG. The RNG does not
+ perform any cryptographic whitening of the generated
+ random numbers. This Jitterentropy RNG registers with
+ the kernel crypto API and can be used by any caller.
+
config CRYPTO_USER_API
tristate
@@ -1512,6 +1587,15 @@ config CRYPTO_USER_API_RNG
This option enables the user-spaces interface for random
number generator algorithms.
+config CRYPTO_USER_API_AEAD
+ tristate "User-space interface for AEAD cipher algorithms"
+ depends on NET
+ select CRYPTO_AEAD
+ select CRYPTO_USER_API
+ help
+ This option enables the user-spaces interface for AEAD
+ cipher algorithms.
+
config CRYPTO_HASH_INFO
bool
diff --git a/crypto/Makefile b/crypto/Makefile
index 97b7d3ac87e7..0077476f5024 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -21,12 +21,22 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
+obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
crypto_hash-y += ahash.o
crypto_hash-y += shash.o
obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
+obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
+
+$(obj)/rsakey-asn1.o: $(obj)/rsakey-asn1.c $(obj)/rsakey-asn1.h
+clean-files += rsakey-asn1.c rsakey-asn1.h
+
+rsa_generic-y := rsakey-asn1.o
+rsa_generic-y += rsa.o
+rsa_generic-y += rsa_helper.o
+obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
cryptomgr-y := algboss.o testmgr.o
@@ -58,6 +68,7 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
+obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
@@ -79,6 +90,8 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
+obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
+obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
@@ -91,9 +104,9 @@ obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
obj-$(CONFIG_CRYPTO_842) += 842.o
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
-obj-$(CONFIG_CRYPTO_RNG2) += krng.o
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
+obj-$(CONFIG_CRYPTO_JITTERENTROPY) += jitterentropy.o
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index db201bca1581..b788f169cc98 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -454,7 +454,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
alg->setkey : setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
- crt->givencrypt = alg->givencrypt;
+ crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
crt->base = __crypto_ablkcipher_cast(tfm);
crt->ivsize = alg->ivsize;
@@ -586,6 +586,13 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
if (!tmpl)
goto kill_larval;
+ if (tmpl->create) {
+ err = tmpl->create(tmpl, tb);
+ if (err)
+ goto put_tmpl;
+ goto ok;
+ }
+
inst = tmpl->alloc(tb);
err = PTR_ERR(inst);
if (IS_ERR(inst))
@@ -597,6 +604,7 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
goto put_tmpl;
}
+ok:
/* Redo the lookup to use the instance we just registered. */
err = -EAGAIN;
@@ -636,7 +644,7 @@ struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_GIVCIPHER) {
- if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
+ if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
crypto_mod_put(alg);
alg = ERR_PTR(-ENOENT);
}
diff --git a/crypto/aead.c b/crypto/aead.c
index 222271070b49..07bf99773548 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -12,7 +12,8 @@
*
*/
-#include
+#include
+#include
#include
#include
#include
@@ -26,10 +27,20 @@
#include "internal.h"
+struct compat_request_ctx {
+ struct scatterlist src[2];
+ struct scatterlist dst[2];
+ struct scatterlist ivbuf[2];
+ struct scatterlist *ivsg;
+ struct aead_givcrypt_request subreq;
+};
+
+static int aead_null_givencrypt(struct aead_givcrypt_request *req);
+static int aead_null_givdecrypt(struct aead_givcrypt_request *req);
+
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
- struct aead_alg *aead = crypto_aead_alg(tfm);
unsigned long alignmask = crypto_aead_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
@@ -42,47 +53,95 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
- ret = aead->setkey(tfm, alignbuffer, keylen);
+ ret = tfm->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
-static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+int crypto_aead_setkey(struct crypto_aead *tfm,
+ const u8 *key, unsigned int keylen)
{
- struct aead_alg *aead = crypto_aead_alg(tfm);
unsigned long alignmask = crypto_aead_alignmask(tfm);
+ tfm = tfm->child;
+
if ((unsigned long)key & alignmask)
return setkey_unaligned(tfm, key, keylen);
- return aead->setkey(tfm, key, keylen);
+ return tfm->setkey(tfm, key, keylen);
}
+EXPORT_SYMBOL_GPL(crypto_aead_setkey);
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
- struct aead_tfm *crt = crypto_aead_crt(tfm);
int err;
- if (authsize > crypto_aead_alg(tfm)->maxauthsize)
+ if (authsize > crypto_aead_maxauthsize(tfm))
return -EINVAL;
- if (crypto_aead_alg(tfm)->setauthsize) {
- err = crypto_aead_alg(tfm)->setauthsize(crt->base, authsize);
+ if (tfm->setauthsize) {
+ err = tfm->setauthsize(tfm->child, authsize);
if (err)
return err;
}
- crypto_aead_crt(crt->base)->authsize = authsize;
- crt->authsize = authsize;
+ tfm->child->authsize = authsize;
+ tfm->authsize = authsize;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
-static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
+struct aead_old_request {
+ struct scatterlist srcbuf[2];
+ struct scatterlist dstbuf[2];
+ struct aead_request subreq;
+};
+
+unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
{
- return alg->cra_ctxsize;
+ return tfm->reqsize + sizeof(struct aead_old_request);
+}
+EXPORT_SYMBOL_GPL(crypto_aead_reqsize);
+
+static int old_crypt(struct aead_request *req,
+ int (*crypt)(struct aead_request *req))
+{
+ struct aead_old_request *nreq = aead_request_ctx(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct scatterlist *src, *dst;
+
+ if (req->old)
+ return crypt(req);
+
+ src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen);
+ dst = req->src == req->dst ?
+ src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen);
+
+ aead_request_set_tfm(&nreq->subreq, aead);
+ aead_request_set_callback(&nreq->subreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen,
+ req->iv);
+ aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen);
+
+ return crypt(&nreq->subreq);
+}
+
+static int old_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct old_aead_alg *alg = crypto_old_aead_alg(aead);
+
+ return old_crypt(req, alg->encrypt);
+}
+
+static int old_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct old_aead_alg *alg = crypto_old_aead_alg(aead);
+
+ return old_crypt(req, alg->decrypt);
}
static int no_givcrypt(struct aead_givcrypt_request *req)
@@ -90,35 +149,129 @@ static int no_givcrypt(struct aead_givcrypt_request *req)
return -ENOSYS;
}
-static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+static int crypto_old_aead_init_tfm(struct crypto_tfm *tfm)
{
- struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
- struct aead_tfm *crt = &tfm->crt_aead;
+ struct old_aead_alg *alg = &tfm->__crt_alg->cra_aead;
+ struct crypto_aead *crt = __crypto_aead_cast(tfm);
if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
return -EINVAL;
- crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
- alg->setkey : setkey;
- crt->encrypt = alg->encrypt;
- crt->decrypt = alg->decrypt;
- crt->givencrypt = alg->givencrypt ?: no_givcrypt;
- crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
- crt->base = __crypto_aead_cast(tfm);
- crt->ivsize = alg->ivsize;
+ crt->setkey = alg->setkey;
+ crt->setauthsize = alg->setauthsize;
+ crt->encrypt = old_encrypt;
+ crt->decrypt = old_decrypt;
+ if (alg->ivsize) {
+ crt->givencrypt = alg->givencrypt ?: no_givcrypt;
+ crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
+ } else {
+ crt->givencrypt = aead_null_givencrypt;
+ crt->givdecrypt = aead_null_givdecrypt;
+ }
+ crt->child = __crypto_aead_cast(tfm);
crt->authsize = alg->maxauthsize;
return 0;
}
+static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *aead = __crypto_aead_cast(tfm);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+
+ alg->exit(aead);
+}
+
+static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *aead = __crypto_aead_cast(tfm);
+ struct aead_alg *alg = crypto_aead_alg(aead);
+
+ if (crypto_old_aead_alg(aead)->encrypt)
+ return crypto_old_aead_init_tfm(tfm);
+
+ aead->setkey = alg->setkey;
+ aead->setauthsize = alg->setauthsize;
+ aead->encrypt = alg->encrypt;
+ aead->decrypt = alg->decrypt;
+ aead->child = __crypto_aead_cast(tfm);
+ aead->authsize = alg->maxauthsize;
+
+ if (alg->exit)
+ aead->base.exit = crypto_aead_exit_tfm;
+
+ if (alg->init)
+ return alg->init(aead);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET
+static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_aead raead;
+ struct old_aead_alg *aead = &alg->cra_aead;
+
+ strncpy(raead.type, "aead", sizeof(raead.type));
+ strncpy(raead.geniv, aead->geniv ?: "", sizeof(raead.geniv));
+
+ raead.blocksize = alg->cra_blocksize;
+ raead.maxauthsize = aead->maxauthsize;
+ raead.ivsize = aead->ivsize;
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
+ sizeof(struct crypto_report_aead), &raead))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ struct old_aead_alg *aead = &alg->cra_aead;
+
+ seq_printf(m, "type : aead\n");
+ seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
+ "yes" : "no");
+ seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
+ seq_printf(m, "ivsize : %u\n", aead->ivsize);
+ seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
+ seq_printf(m, "geniv : %s\n", aead->geniv ?: "");
+}
+
+const struct crypto_type crypto_aead_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_aead_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_old_aead_show,
+#endif
+ .report = crypto_old_aead_report,
+ .lookup = crypto_lookup_aead,
+ .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .tfmsize = offsetof(struct crypto_aead, base),
+};
+EXPORT_SYMBOL_GPL(crypto_aead_type);
+
#ifdef CONFIG_NET
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
- struct aead_alg *aead = &alg->cra_aead;
+ struct aead_alg *aead = container_of(alg, struct aead_alg, base);
strncpy(raead.type, "aead", sizeof(raead.type));
- strncpy(raead.geniv, aead->geniv ?: "", sizeof(raead.geniv));
+ strncpy(raead.geniv, "", sizeof(raead.geniv));
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
@@ -143,7 +296,7 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
{
- struct aead_alg *aead = &alg->cra_aead;
+ struct aead_alg *aead = container_of(alg, struct aead_alg, base);
seq_printf(m, "type : aead\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
@@ -151,18 +304,21 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "ivsize : %u\n", aead->ivsize);
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
- seq_printf(m, "geniv : %s\n", aead->geniv ?: "");
+ seq_printf(m, "geniv : \n");
}
-const struct crypto_type crypto_aead_type = {
- .ctxsize = crypto_aead_ctxsize,
- .init = crypto_init_aead_ops,
+static const struct crypto_type crypto_new_aead_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_aead_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
.report = crypto_aead_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .tfmsize = offsetof(struct crypto_aead, base),
};
-EXPORT_SYMBOL_GPL(crypto_aead_type);
static int aead_null_givencrypt(struct aead_givcrypt_request *req)
{
@@ -174,33 +330,11 @@ static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
return crypto_aead_decrypt(&req->areq);
}
-static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- struct aead_alg *alg = &tfm->__crt_alg->cra_aead;
- struct aead_tfm *crt = &tfm->crt_aead;
-
- if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
- return -EINVAL;
-
- crt->setkey = setkey;
- crt->encrypt = alg->encrypt;
- crt->decrypt = alg->decrypt;
- if (!alg->ivsize) {
- crt->givencrypt = aead_null_givencrypt;
- crt->givdecrypt = aead_null_givdecrypt;
- }
- crt->base = __crypto_aead_cast(tfm);
- crt->ivsize = alg->ivsize;
- crt->authsize = alg->maxauthsize;
-
- return 0;
-}
-
#ifdef CONFIG_NET
static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
- struct aead_alg *aead = &alg->cra_aead;
+ struct old_aead_alg *aead = &alg->cra_aead;
strncpy(raead.type, "nivaead", sizeof(raead.type));
strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
@@ -229,7 +363,7 @@ static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
__attribute__ ((unused));
static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
{
- struct aead_alg *aead = &alg->cra_aead;
+ struct old_aead_alg *aead = &alg->cra_aead;
seq_printf(m, "type : nivaead\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
@@ -241,43 +375,215 @@ static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
}
const struct crypto_type crypto_nivaead_type = {
- .ctxsize = crypto_aead_ctxsize,
- .init = crypto_init_nivaead_ops,
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_aead_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_nivaead_show,
#endif
.report = crypto_nivaead_report,
+ .maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
+ .maskset = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .tfmsize = offsetof(struct crypto_aead, base),
};
EXPORT_SYMBOL_GPL(crypto_nivaead_type);
static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
const char *name, u32 type, u32 mask)
{
- struct crypto_alg *alg;
+ spawn->base.frontend = &crypto_nivaead_type;
+ return crypto_grab_spawn(&spawn->base, name, type, mask);
+}
+
+static int aead_geniv_setkey(struct crypto_aead *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return crypto_aead_setkey(ctx->child, key, keylen);
+}
+
+static int aead_geniv_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return crypto_aead_setauthsize(ctx->child, authsize);
+}
+
+static void compat_encrypt_complete2(struct aead_request *req, int err)
+{
+ struct compat_request_ctx *rctx = aead_request_ctx(req);
+ struct aead_givcrypt_request *subreq = &rctx->subreq;
+ struct crypto_aead *geniv;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (err)
+ goto out;
+
+ geniv = crypto_aead_reqtfm(req);
+ scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0,
+ crypto_aead_ivsize(geniv), 1);
+
+out:
+ kzfree(subreq->giv);
+}
+
+static void compat_encrypt_complete(struct crypto_async_request *base, int err)
+{
+ struct aead_request *req = base->data;
+
+ compat_encrypt_complete2(req, err);
+ aead_request_complete(req, err);
+}
+
+static int compat_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
+ struct compat_request_ctx *rctx = aead_request_ctx(req);
+ struct aead_givcrypt_request *subreq = &rctx->subreq;
+ unsigned int ivsize = crypto_aead_ivsize(geniv);
+ struct scatterlist *src, *dst;
+ crypto_completion_t compl;
+ void *data;
+ u8 *info;
+ __be64 seq;
int err;
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_AEAD;
- mask |= CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV;
+ if (req->cryptlen < ivsize)
+ return -EINVAL;
- alg = crypto_alg_mod_lookup(name, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
+ compl = req->base.complete;
+ data = req->base.data;
- err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
- crypto_mod_put(alg);
+ rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen);
+ info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg);
+
+ if (!info) {
+ info = kmalloc(ivsize, req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
+ GFP_ATOMIC);
+ if (!info)
+ return -ENOMEM;
+
+ compl = compat_encrypt_complete;
+ data = req;
+ }
+
+ memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq));
+
+ src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
+ dst = req->src == req->dst ?
+ src : scatterwalk_ffwd(rctx->dst, rctx->ivsg, ivsize);
+
+ aead_givcrypt_set_tfm(subreq, ctx->child);
+ aead_givcrypt_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_givcrypt_set_crypt(subreq, src, dst,
+ req->cryptlen - ivsize, req->iv);
+ aead_givcrypt_set_assoc(subreq, req->src, req->assoclen);
+ aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq));
+
+ err = crypto_aead_givencrypt(subreq);
+ if (unlikely(PageHighMem(sg_page(rctx->ivsg))))
+ compat_encrypt_complete2(req, err);
return err;
}
-struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type,
- u32 mask)
+static int compat_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
+ struct compat_request_ctx *rctx = aead_request_ctx(req);
+ struct aead_request *subreq = &rctx->subreq.areq;
+ unsigned int ivsize = crypto_aead_ivsize(geniv);
+ struct scatterlist *src, *dst;
+ crypto_completion_t compl;
+ void *data;
+
+ if (req->cryptlen < ivsize)
+ return -EINVAL;
+
+ aead_request_set_tfm(subreq, ctx->child);
+
+ compl = req->base.complete;
+ data = req->base.data;
+
+ src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
+ dst = req->src == req->dst ?
+ src : scatterwalk_ffwd(rctx->dst, req->dst,
+ req->assoclen + ivsize);
+
+ aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_crypt(subreq, src, dst,
+ req->cryptlen - ivsize, req->iv);
+ aead_request_set_assoc(subreq, req->src, req->assoclen);
+
+ scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
+
+ return crypto_aead_decrypt(subreq);
+}
+
+static int compat_encrypt_first(struct aead_request *req)
+{
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
+ int err = 0;
+
+ spin_lock_bh(&ctx->lock);
+ if (geniv->encrypt != compat_encrypt_first)
+ goto unlock;
+
+ geniv->encrypt = compat_encrypt;
+
+unlock:
+ spin_unlock_bh(&ctx->lock);
+
+ if (err)
+ return err;
+
+ return compat_encrypt(req);
+}
+
+static int aead_geniv_init_compat(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *geniv = __crypto_aead_cast(tfm);
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
+ int err;
+
+ spin_lock_init(&ctx->lock);
+
+ crypto_aead_set_reqsize(geniv, sizeof(struct compat_request_ctx));
+
+ err = aead_geniv_init(tfm);
+
+ ctx->child = geniv->child;
+ geniv->child = geniv;
+
+ return err;
+}
+
+static void aead_geniv_exit_compat(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *geniv = __crypto_aead_cast(tfm);
+ struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
+
+ crypto_free_aead(ctx->child);
+}
+
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type, u32 mask)
{
const char *name;
struct crypto_aead_spawn *spawn;
struct crypto_attr_type *algt;
- struct crypto_instance *inst;
- struct crypto_alg *alg;
+ struct aead_instance *inst;
+ struct aead_alg *alg;
+ unsigned int ivsize;
+ unsigned int maxauthsize;
int err;
algt = crypto_get_attr_type(tb);
@@ -296,20 +602,25 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
if (!inst)
return ERR_PTR(-ENOMEM);
- spawn = crypto_instance_ctx(inst);
+ spawn = aead_instance_ctx(inst);
/* Ignore async algorithms if necessary. */
mask |= crypto_requires_sync(algt->type, algt->mask);
- crypto_set_aead_spawn(spawn, inst);
- err = crypto_grab_nivaead(spawn, name, type, mask);
+ crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
+ err = (algt->mask & CRYPTO_ALG_GENIV) ?
+ crypto_grab_nivaead(spawn, name, type, mask) :
+ crypto_grab_aead(spawn, name, type, mask);
if (err)
goto err_free_inst;
- alg = crypto_aead_spawn_alg(spawn);
+ alg = crypto_spawn_aead_alg(spawn);
+
+ ivsize = crypto_aead_alg_ivsize(alg);
+ maxauthsize = crypto_aead_alg_maxauthsize(alg);
err = -EINVAL;
- if (!alg->cra_aead.ivsize)
+ if (ivsize < sizeof(u64))
goto err_drop_alg;
/*
@@ -318,39 +629,64 @@ struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
* template name and double-check the IV generator.
*/
if (algt->mask & CRYPTO_ALG_GENIV) {
- if (strcmp(tmpl->name, alg->cra_aead.geniv))
+ if (!alg->base.cra_aead.encrypt)
+ goto err_drop_alg;
+ if (strcmp(tmpl->name, alg->base.cra_aead.geniv))
goto err_drop_alg;
- memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
- memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
+ memcpy(inst->alg.base.cra_name, alg->base.cra_name,
CRYPTO_MAX_ALG_NAME);
- } else {
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s)", tmpl->name, alg->cra_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto err_drop_alg;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s)", tmpl->name, alg->cra_driver_name) >=
- CRYPTO_MAX_ALG_NAME)
- goto err_drop_alg;
+ memcpy(inst->alg.base.cra_driver_name,
+ alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME);
+
+ inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_GENIV;
+ inst->alg.base.cra_flags |= alg->base.cra_flags &
+ CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
+ inst->alg.base.cra_type = &crypto_aead_type;
+
+ inst->alg.base.cra_aead.ivsize = ivsize;
+ inst->alg.base.cra_aead.maxauthsize = maxauthsize;
+
+ inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey;
+ inst->alg.base.cra_aead.setauthsize =
+ alg->base.cra_aead.setauthsize;
+ inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt;
+ inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt;
+
+ goto out;
}
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV;
- inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_aead_type;
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", tmpl->name, alg->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_drop_alg;
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s(%s)", tmpl->name, alg->base.cra_driver_name) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto err_drop_alg;
- inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
- inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
- inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
+ inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
- inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
- inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
- inst->alg.cra_aead.encrypt = alg->cra_aead.encrypt;
- inst->alg.cra_aead.decrypt = alg->cra_aead.decrypt;
+ inst->alg.setkey = aead_geniv_setkey;
+ inst->alg.setauthsize = aead_geniv_setauthsize;
+
+ inst->alg.ivsize = ivsize;
+ inst->alg.maxauthsize = maxauthsize;
+
+ inst->alg.encrypt = compat_encrypt_first;
+ inst->alg.decrypt = compat_decrypt;
+
+ inst->alg.base.cra_init = aead_geniv_init_compat;
+ inst->alg.base.cra_exit = aead_geniv_exit_compat;
out:
return inst;
@@ -364,9 +700,9 @@ err_free_inst:
}
EXPORT_SYMBOL_GPL(aead_geniv_alloc);
-void aead_geniv_free(struct crypto_instance *inst)
+void aead_geniv_free(struct aead_instance *inst)
{
- crypto_drop_aead(crypto_instance_ctx(inst));
+ crypto_drop_aead(aead_instance_ctx(inst));
kfree(inst);
}
EXPORT_SYMBOL_GPL(aead_geniv_free);
@@ -374,14 +710,17 @@ EXPORT_SYMBOL_GPL(aead_geniv_free);
int aead_geniv_init(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_aead *child;
struct crypto_aead *aead;
- aead = crypto_spawn_aead(crypto_instance_ctx(inst));
- if (IS_ERR(aead))
- return PTR_ERR(aead);
+ aead = __crypto_aead_cast(tfm);
- tfm->crt_aead.base = aead;
- tfm->crt_aead.reqsize += crypto_aead_reqsize(aead);
+ child = crypto_spawn_aead(crypto_instance_ctx(inst));
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ aead->child = child;
+ aead->reqsize += crypto_aead_reqsize(child);
return 0;
}
@@ -389,7 +728,7 @@ EXPORT_SYMBOL_GPL(aead_geniv_init);
void aead_geniv_exit(struct crypto_tfm *tfm)
{
- crypto_free_aead(tfm->crt_aead.base);
+ crypto_free_aead(__crypto_aead_cast(tfm)->child);
}
EXPORT_SYMBOL_GPL(aead_geniv_exit);
@@ -443,6 +782,13 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
if (!tmpl)
goto kill_larval;
+ if (tmpl->create) {
+ err = tmpl->create(tmpl, tb);
+ if (err)
+ goto put_tmpl;
+ goto ok;
+ }
+
inst = tmpl->alloc(tb);
err = PTR_ERR(inst);
if (IS_ERR(inst))
@@ -454,6 +800,7 @@ static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
goto put_tmpl;
}
+ok:
/* Redo the lookup to use the instance we just registered. */
err = -EAGAIN;
@@ -489,7 +836,7 @@ struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
return alg;
if (alg->cra_type == &crypto_aead_type) {
- if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
+ if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
crypto_mod_put(alg);
alg = ERR_PTR(-ENOENT);
}
@@ -505,62 +852,91 @@ EXPORT_SYMBOL_GPL(crypto_lookup_aead);
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask)
{
- struct crypto_alg *alg;
- int err;
-
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_AEAD;
- mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- alg = crypto_lookup_aead(name, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
-
- err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
- crypto_mod_put(alg);
- return err;
+ spawn->base.frontend = &crypto_aead_type;
+ return crypto_grab_spawn(&spawn->base, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_aead);
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
{
- struct crypto_tfm *tfm;
- int err;
-
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_AEAD;
- mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- for (;;) {
- struct crypto_alg *alg;
-
- alg = crypto_lookup_aead(alg_name, type, mask);
- if (IS_ERR(alg)) {
- err = PTR_ERR(alg);
- goto err;
- }
-
- tfm = __crypto_alloc_tfm(alg, type, mask);
- if (!IS_ERR(tfm))
- return __crypto_aead_cast(tfm);
-
- crypto_mod_put(alg);
- err = PTR_ERR(tfm);
-
-err:
- if (err != -EAGAIN)
- break;
- if (signal_pending(current)) {
- err = -EINTR;
- break;
- }
- }
-
- return ERR_PTR(err);
+ return crypto_alloc_tfm(alg_name, &crypto_aead_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_aead);
+static int aead_prepare_alg(struct aead_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ base->cra_type = &crypto_new_aead_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
+
+ return 0;
+}
+
+int crypto_register_aead(struct aead_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int err;
+
+ err = aead_prepare_alg(alg);
+ if (err)
+ return err;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_aead);
+
+void crypto_unregister_aead(struct aead_alg *alg)
+{
+ crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_aead);
+
+int crypto_register_aeads(struct aead_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_register_aead(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (--i; i >= 0; --i)
+ crypto_unregister_aead(&algs[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_aeads);
+
+void crypto_unregister_aeads(struct aead_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_aead(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_aeads);
+
+int aead_register_instance(struct crypto_template *tmpl,
+ struct aead_instance *inst)
+{
+ int err;
+
+ err = aead_prepare_alg(&inst->alg);
+ if (err)
+ return err;
+
+ return crypto_register_instance(tmpl, aead_crypto_instance(inst));
+}
+EXPORT_SYMBOL_GPL(aead_register_instance);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index f22cc56fd1b3..2bc180e02115 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -127,6 +127,7 @@ EXPORT_SYMBOL_GPL(af_alg_release);
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
+ const u32 forbidden = CRYPTO_ALG_INTERNAL;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg *sa = (void *)uaddr;
@@ -151,7 +152,9 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(type))
return PTR_ERR(type);
- private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
+ private = type->bind(sa->salg_name,
+ sa->salg_feat & ~forbidden,
+ sa->salg_mask & ~forbidden);
if (IS_ERR(private)) {
module_put(type->owner);
return PTR_ERR(private);
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
new file mode 100644
index 000000000000..d7986414814e
--- /dev/null
+++ b/crypto/akcipher.c
@@ -0,0 +1,117 @@
+/*
+ * Public Key Encryption
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "internal.h"
+
+#ifdef CONFIG_NET
+static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_akcipher rakcipher;
+
+ strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
+ sizeof(struct crypto_report_akcipher), &rakcipher))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+
+static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : akcipher\n");
+}
+
+static void crypto_akcipher_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm);
+ struct akcipher_alg *alg = crypto_akcipher_alg(akcipher);
+
+ alg->exit(akcipher);
+}
+
+static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm);
+ struct akcipher_alg *alg = crypto_akcipher_alg(akcipher);
+
+ if (alg->exit)
+ akcipher->base.exit = crypto_akcipher_exit_tfm;
+
+ if (alg->init)
+ return alg->init(akcipher);
+
+ return 0;
+}
+
+static const struct crypto_type crypto_akcipher_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_akcipher_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_akcipher_show,
+#endif
+ .report = crypto_akcipher_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_AKCIPHER,
+ .tfmsize = offsetof(struct crypto_akcipher, base),
+};
+
+struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_akcipher_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
+
+int crypto_register_akcipher(struct akcipher_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ base->cra_type = &crypto_akcipher_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_akcipher);
+
+void crypto_unregister_akcipher(struct akcipher_alg *alg)
+{
+ crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic public key cihper type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d2627a3d4ed8..3c079b7f23f6 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -12,6 +12,7 @@
#include
#include
+#include
#include
#include
#include
@@ -43,12 +44,9 @@ static inline int crypto_set_driver_name(struct crypto_alg *alg)
static inline void crypto_check_module_sig(struct module *mod)
{
-#ifdef CONFIG_CRYPTO_FIPS
- if (fips_enabled && mod && !mod->sig_ok)
+ if (fips_enabled && mod && !module_sig_ok(mod))
panic("Module %s signature verification failed in FIPS mode\n",
- mod->name);
-#endif
- return;
+ module_name(mod));
}
static int crypto_check_alg(struct crypto_alg *alg)
@@ -614,6 +612,22 @@ out:
}
EXPORT_SYMBOL_GPL(crypto_init_spawn2);
+int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
+ u32 type, u32 mask)
+{
+ struct crypto_alg *alg;
+ int err;
+
+ alg = crypto_find_alg(name, spawn->frontend, type, mask);
+ if (IS_ERR(alg))
+ return PTR_ERR(alg);
+
+ err = crypto_init_spawn(spawn, alg, spawn->inst, mask);
+ crypto_mod_put(alg);
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_grab_spawn);
+
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
if (!spawn->alg)
@@ -964,6 +978,13 @@ void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
}
EXPORT_SYMBOL_GPL(crypto_xor);
+unsigned int crypto_alg_extsize(struct crypto_alg *alg)
+{
+ return alg->cra_ctxsize +
+ (alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1));
+}
+EXPORT_SYMBOL_GPL(crypto_alg_extsize);
+
static int __init crypto_algapi_init(void)
{
crypto_init_proc();
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 69abada22373..e0408a480d2f 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -13,6 +13,7 @@
* any later version.
*/
+#include
#include
#include
#include
@@ -71,7 +72,7 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
{
unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
- return (ctx->used >= (ctx->aead_assoclen + (ctx->enc ? 0 : as)));
+ return ctx->used >= ctx->aead_assoclen + as;
}
static void aead_put_sgl(struct sock *sk)
@@ -352,12 +353,8 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct aead_ctx *ctx = ask->private;
- unsigned bs = crypto_aead_blocksize(crypto_aead_reqtfm(&ctx->aead_req));
unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
struct aead_sg_list *sgl = &ctx->tsgl;
- struct scatterlist *sg = NULL;
- struct scatterlist assoc[ALG_MAX_PAGES];
- size_t assoclen = 0;
unsigned int i = 0;
int err = -EINVAL;
unsigned long used = 0;
@@ -406,23 +403,13 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
if (!aead_sufficient_data(ctx))
goto unlock;
+ outlen = used;
+
/*
* The cipher operation input data is reduced by the associated data
* length as this data is processed separately later on.
*/
- used -= ctx->aead_assoclen;
-
- if (ctx->enc) {
- /* round up output buffer to multiple of block size */
- outlen = ((used + bs - 1) / bs * bs);
- /* add the size needed for the auth tag to be created */
- outlen += as;
- } else {
- /* output data size is input without the authentication tag */
- outlen = used - as;
- /* round up output buffer to multiple of block size */
- outlen = ((outlen + bs - 1) / bs * bs);
- }
+ used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
/* convert iovecs of output buffers into scatterlists */
while (iov_iter_count(&msg->msg_iter)) {
@@ -451,47 +438,11 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
if (usedpages < outlen)
goto unlock;
- sg_init_table(assoc, ALG_MAX_PAGES);
- assoclen = ctx->aead_assoclen;
- /*
- * Split scatterlist into two: first part becomes AD, second part
- * is plaintext / ciphertext. The first part is assigned to assoc
- * scatterlist. When this loop finishes, sg points to the start of the
- * plaintext / ciphertext.
- */
- for (i = 0; i < ctx->tsgl.cur; i++) {
- sg = sgl->sg + i;
- if (sg->length <= assoclen) {
- /* AD is larger than one page */
- sg_set_page(assoc + i, sg_page(sg),
- sg->length, sg->offset);
- assoclen -= sg->length;
- if (i >= ctx->tsgl.cur)
- goto unlock;
- } else if (!assoclen) {
- /* current page is to start of plaintext / ciphertext */
- if (i)
- /* AD terminates at page boundary */
- sg_mark_end(assoc + i - 1);
- else
- /* AD size is zero */
- sg_mark_end(assoc);
- break;
- } else {
- /* AD does not terminate at page boundary */
- sg_set_page(assoc + i, sg_page(sg),
- assoclen, sg->offset);
- sg_mark_end(assoc + i);
- /* plaintext / ciphertext starts after AD */
- sg->length -= assoclen;
- sg->offset += assoclen;
- break;
- }
- }
+ sg_mark_end(sgl->sg + sgl->cur - 1);
- aead_request_set_assoc(&ctx->aead_req, assoc, ctx->aead_assoclen);
- aead_request_set_crypt(&ctx->aead_req, sg, ctx->rsgl[0].sg, used,
- ctx->iv);
+ aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg,
+ used, ctx->iv);
+ aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
err = af_alg_wait_for_completion(ctx->enc ?
crypto_aead_encrypt(&ctx->aead_req) :
@@ -563,7 +514,8 @@ static struct proto_ops algif_aead_ops = {
static void *aead_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_aead(name, type, mask);
+ return crypto_alloc_aead(name, type | CRYPTO_ALG_AEAD_NEW,
+ mask | CRYPTO_ALG_AEAD_NEW);
}
static void aead_release(void *private)
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 8109aaad2726..150c2b6480ed 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -164,7 +164,7 @@ static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen)
* Check whether seedlen is of sufficient size is done in RNG
* implementations.
*/
- return crypto_rng_reset(private, (u8 *)seed, seedlen);
+ return crypto_rng_reset(private, seed, seedlen);
}
static const struct af_alg_type algif_type_rng = {
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 765fe7609348..eff337ce9003 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -20,8 +20,6 @@
#include
#include
-#include "internal.h"
-
#define DEFAULT_PRNG_KEY "0123456789abcdef"
#define DEFAULT_PRNG_KSZ 16
#define DEFAULT_BLK_SZ 16
@@ -281,11 +279,11 @@ static void free_prng_context(struct prng_context *ctx)
}
static int reset_prng_context(struct prng_context *ctx,
- unsigned char *key, size_t klen,
- unsigned char *V, unsigned char *DT)
+ const unsigned char *key, size_t klen,
+ const unsigned char *V, const unsigned char *DT)
{
int ret;
- unsigned char *prng_key;
+ const unsigned char *prng_key;
spin_lock_bh(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
@@ -353,8 +351,9 @@ static void cprng_exit(struct crypto_tfm *tfm)
free_prng_context(crypto_tfm_ctx(tfm));
}
-static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen)
+static int cprng_get_random(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *rdata, unsigned int dlen)
{
struct prng_context *prng = crypto_rng_ctx(tfm);
@@ -367,11 +366,12 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
* V and KEY are required during reset, and DT is optional, detected
* as being present by testing the length of the seed
*/
-static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+static int cprng_reset(struct crypto_rng *tfm,
+ const u8 *seed, unsigned int slen)
{
struct prng_context *prng = crypto_rng_ctx(tfm);
- u8 *key = seed + DEFAULT_BLK_SZ;
- u8 *dt = NULL;
+ const u8 *key = seed + DEFAULT_BLK_SZ;
+ const u8 *dt = NULL;
if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
return -EINVAL;
@@ -387,18 +387,20 @@ static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
}
#ifdef CONFIG_CRYPTO_FIPS
-static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen)
+static int fips_cprng_get_random(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *rdata, unsigned int dlen)
{
struct prng_context *prng = crypto_rng_ctx(tfm);
return get_prng_bytes(rdata, dlen, prng, 1);
}
-static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+static int fips_cprng_reset(struct crypto_rng *tfm,
+ const u8 *seed, unsigned int slen)
{
u8 rdata[DEFAULT_BLK_SZ];
- u8 *key = seed + DEFAULT_BLK_SZ;
+ const u8 *key = seed + DEFAULT_BLK_SZ;
int rc;
struct prng_context *prng = crypto_rng_ctx(tfm);
@@ -424,40 +426,32 @@ out:
}
#endif
-static struct crypto_alg rng_algs[] = { {
- .cra_name = "stdrng",
- .cra_driver_name = "ansi_cprng",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_RNG,
- .cra_ctxsize = sizeof(struct prng_context),
- .cra_type = &crypto_rng_type,
- .cra_module = THIS_MODULE,
- .cra_init = cprng_init,
- .cra_exit = cprng_exit,
- .cra_u = {
- .rng = {
- .rng_make_random = cprng_get_random,
- .rng_reset = cprng_reset,
- .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
- }
+static struct rng_alg rng_algs[] = { {
+ .generate = cprng_get_random,
+ .seed = cprng_reset,
+ .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "ansi_cprng",
+ .cra_priority = 100,
+ .cra_ctxsize = sizeof(struct prng_context),
+ .cra_module = THIS_MODULE,
+ .cra_init = cprng_init,
+ .cra_exit = cprng_exit,
}
#ifdef CONFIG_CRYPTO_FIPS
}, {
- .cra_name = "fips(ansi_cprng)",
- .cra_driver_name = "fips_ansi_cprng",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_RNG,
- .cra_ctxsize = sizeof(struct prng_context),
- .cra_type = &crypto_rng_type,
- .cra_module = THIS_MODULE,
- .cra_init = cprng_init,
- .cra_exit = cprng_exit,
- .cra_u = {
- .rng = {
- .rng_make_random = fips_cprng_get_random,
- .rng_reset = fips_cprng_reset,
- .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ,
- }
+ .generate = fips_cprng_get_random,
+ .seed = fips_cprng_reset,
+ .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
+ .base = {
+ .cra_name = "fips(ansi_cprng)",
+ .cra_driver_name = "fips_ansi_cprng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct prng_context),
+ .cra_module = THIS_MODULE,
+ .cra_init = cprng_init,
+ .cra_exit = cprng_exit,
}
#endif
} };
@@ -465,12 +459,12 @@ static struct crypto_alg rng_algs[] = { {
/* Module initalization */
static int __init prng_mod_init(void)
{
- return crypto_register_algs(rng_algs, ARRAY_SIZE(rng_algs));
+ return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
}
static void __exit prng_mod_fini(void)
{
- crypto_unregister_algs(rng_algs, ARRAY_SIZE(rng_algs));
+ crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
}
MODULE_LICENSE("GPL");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 78fb16cab13f..3e852299afb4 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -10,7 +10,7 @@
*
*/
-#include
+#include
#include
#include
#include
@@ -570,13 +570,14 @@ static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
crypto_ahash_alignmask(auth) + 1) +
crypto_ablkcipher_ivsize(enc);
- tfm->crt_aead.reqsize = sizeof(struct authenc_request_ctx) +
- ctx->reqoff +
- max_t(unsigned int,
- crypto_ahash_reqsize(auth) +
- sizeof(struct ahash_request),
- sizeof(struct skcipher_givcrypt_request) +
- crypto_ablkcipher_reqsize(enc));
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct authenc_request_ctx) +
+ ctx->reqoff +
+ max_t(unsigned int,
+ crypto_ahash_reqsize(auth) +
+ sizeof(struct ahash_request),
+ sizeof(struct skcipher_givcrypt_request) +
+ crypto_ablkcipher_reqsize(enc)));
return 0;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 024bff2344fc..a3da6770bc9e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -12,7 +12,7 @@
*
*/
-#include
+#include
#include
#include
#include
@@ -662,13 +662,14 @@ static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
crypto_ahash_alignmask(auth) + 1) +
crypto_ablkcipher_ivsize(enc);
- tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) +
- ctx->reqoff +
- max_t(unsigned int,
- crypto_ahash_reqsize(auth) +
- sizeof(struct ahash_request),
- sizeof(struct skcipher_givcrypt_request) +
- crypto_ablkcipher_reqsize(enc));
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct authenc_esn_request_ctx) +
+ ctx->reqoff +
+ max_t(unsigned int,
+ crypto_ahash_reqsize(auth) +
+ sizeof(struct ahash_request),
+ sizeof(struct skcipher_givcrypt_request) +
+ crypto_ablkcipher_reqsize(enc)));
return 0;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 0122bec38564..11b981492031 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -14,6 +14,7 @@
*
*/
+#include
#include
#include
#include
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 003bbbd21a2b..a4d1a5eda18b 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -453,9 +453,9 @@ static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
align = crypto_tfm_alg_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
- tfm->crt_aead.reqsize = align +
- sizeof(struct crypto_ccm_req_priv_ctx) +
- crypto_ablkcipher_reqsize(ctr);
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ align + sizeof(struct crypto_ccm_req_priv_ctx) +
+ crypto_ablkcipher_reqsize(ctr));
return 0;
@@ -729,10 +729,10 @@ static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
- tfm->crt_aead.reqsize = sizeof(struct aead_request) +
- ALIGN(crypto_aead_reqsize(aead),
- crypto_tfm_ctx_alignment()) +
- align + 16;
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct aead_request) +
+ ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
+ align + 16);
return 0;
}
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
new file mode 100644
index 000000000000..fa42e708aa96
--- /dev/null
+++ b/crypto/chacha20_generic.c
@@ -0,0 +1,216 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include
+#include
+#include
+#include
+
+#define CHACHA20_NONCE_SIZE 16
+#define CHACHA20_KEY_SIZE 32
+#define CHACHA20_BLOCK_SIZE 64
+
+struct chacha20_ctx {
+ u32 key[8];
+};
+
+static inline u32 rotl32(u32 v, u8 n)
+{
+ return (v << n) | (v >> (sizeof(v) * 8 - n));
+}
+
+static inline u32 le32_to_cpuvp(const void *p)
+{
+ return le32_to_cpup(p);
+}
+
+static void chacha20_block(u32 *state, void *stream)
+{
+ u32 x[16], *out = stream;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(x); i++)
+ x[i] = state[i];
+
+ for (i = 0; i < 20; i += 2) {
+ x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16);
+ x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16);
+ x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16);
+ x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16);
+
+ x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12);
+ x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12);
+ x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12);
+ x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12);
+
+ x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8);
+ x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8);
+ x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8);
+ x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8);
+
+ x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7);
+ x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7);
+ x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7);
+ x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7);
+
+ x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16);
+ x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16);
+ x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16);
+ x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16);
+
+ x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12);
+ x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12);
+ x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12);
+ x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12);
+
+ x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8);
+ x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8);
+ x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8);
+ x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8);
+
+ x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7);
+ x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7);
+ x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7);
+ x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(x); i++)
+ out[i] = cpu_to_le32(x[i] + state[i]);
+
+ state[12]++;
+}
+
+static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes)
+{
+ u8 stream[CHACHA20_BLOCK_SIZE];
+
+ if (dst != src)
+ memcpy(dst, src, bytes);
+
+ while (bytes >= CHACHA20_BLOCK_SIZE) {
+ chacha20_block(state, stream);
+ crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
+ bytes -= CHACHA20_BLOCK_SIZE;
+ dst += CHACHA20_BLOCK_SIZE;
+ }
+ if (bytes) {
+ chacha20_block(state, stream);
+ crypto_xor(dst, stream, bytes);
+ }
+}
+
+static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
+{
+ static const char constant[16] = "expand 32-byte k";
+
+ state[0] = le32_to_cpuvp(constant + 0);
+ state[1] = le32_to_cpuvp(constant + 4);
+ state[2] = le32_to_cpuvp(constant + 8);
+ state[3] = le32_to_cpuvp(constant + 12);
+ state[4] = ctx->key[0];
+ state[5] = ctx->key[1];
+ state[6] = ctx->key[2];
+ state[7] = ctx->key[3];
+ state[8] = ctx->key[4];
+ state[9] = ctx->key[5];
+ state[10] = ctx->key[6];
+ state[11] = ctx->key[7];
+ state[12] = le32_to_cpuvp(iv + 0);
+ state[13] = le32_to_cpuvp(iv + 4);
+ state[14] = le32_to_cpuvp(iv + 8);
+ state[15] = le32_to_cpuvp(iv + 12);
+}
+
+static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA20_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32));
+
+ return 0;
+}
+
+static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
+
+ chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
+
+ while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
+ chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
+ err = blkcipher_walk_done(desc, &walk,
+ walk.nbytes % CHACHA20_BLOCK_SIZE);
+ }
+
+ if (walk.nbytes) {
+ chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes);
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+
+ return err;
+}
+
+static struct crypto_alg alg = {
+ .cra_name = "chacha20",
+ .cra_driver_name = "chacha20-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_blocksize = 1,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_ctxsize = sizeof(struct chacha20_ctx),
+ .cra_alignmask = sizeof(u32) - 1,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = CHACHA20_KEY_SIZE,
+ .max_keysize = CHACHA20_KEY_SIZE,
+ .ivsize = CHACHA20_NONCE_SIZE,
+ .geniv = "seqiv",
+ .setkey = chacha20_setkey,
+ .encrypt = chacha20_crypt,
+ .decrypt = chacha20_crypt,
+ },
+ },
+};
+
+static int __init chacha20_generic_mod_init(void)
+{
+ return crypto_register_alg(&alg);
+}
+
+static void __exit chacha20_generic_mod_fini(void)
+{
+ crypto_unregister_alg(&alg);
+}
+
+module_init(chacha20_generic_mod_init);
+module_exit(chacha20_generic_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi ");
+MODULE_DESCRIPTION("chacha20 cipher algorithm");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-generic");
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
new file mode 100644
index 000000000000..7b46ed799a64
--- /dev/null
+++ b/crypto/chacha20poly1305.c
@@ -0,0 +1,695 @@
+/*
+ * ChaCha20-Poly1305 AEAD, RFC7539
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "internal.h"
+
+#define POLY1305_BLOCK_SIZE 16
+#define POLY1305_DIGEST_SIZE 16
+#define POLY1305_KEY_SIZE 32
+#define CHACHA20_KEY_SIZE 32
+#define CHACHA20_IV_SIZE 16
+#define CHACHAPOLY_IV_SIZE 12
+
+struct chachapoly_instance_ctx {
+ struct crypto_skcipher_spawn chacha;
+ struct crypto_ahash_spawn poly;
+ unsigned int saltlen;
+};
+
+struct chachapoly_ctx {
+ struct crypto_ablkcipher *chacha;
+ struct crypto_ahash *poly;
+ /* key bytes we use for the ChaCha20 IV */
+ unsigned int saltlen;
+ u8 salt[];
+};
+
+struct poly_req {
+ /* zero byte padding for AD/ciphertext, as needed */
+ u8 pad[POLY1305_BLOCK_SIZE];
+ /* tail data with AD/ciphertext lengths */
+ struct {
+ __le64 assoclen;
+ __le64 cryptlen;
+ } tail;
+ struct scatterlist src[1];
+ struct ahash_request req; /* must be last member */
+};
+
+struct chacha_req {
+ u8 iv[CHACHA20_IV_SIZE];
+ struct scatterlist src[1];
+ struct ablkcipher_request req; /* must be last member */
+};
+
+struct chachapoly_req_ctx {
+ /* the key we generate for Poly1305 using Chacha20 */
+ u8 key[POLY1305_KEY_SIZE];
+ /* calculated Poly1305 tag */
+ u8 tag[POLY1305_DIGEST_SIZE];
+ /* length of data to en/decrypt, without ICV */
+ unsigned int cryptlen;
+ union {
+ struct poly_req poly;
+ struct chacha_req chacha;
+ } u;
+};
+
+static inline void async_done_continue(struct aead_request *req, int err,
+ int (*cont)(struct aead_request *))
+{
+ if (!err)
+ err = cont(req);
+
+ if (err != -EINPROGRESS && err != -EBUSY)
+ aead_request_complete(req, err);
+}
+
+static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ __le32 leicb = cpu_to_le32(icb);
+
+ memcpy(iv, &leicb, sizeof(leicb));
+ memcpy(iv + sizeof(leicb), ctx->salt, ctx->saltlen);
+ memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv,
+ CHACHA20_IV_SIZE - sizeof(leicb) - ctx->saltlen);
+}
+
+static int poly_verify_tag(struct aead_request *req)
+{
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ u8 tag[sizeof(rctx->tag)];
+
+ scatterwalk_map_and_copy(tag, req->src, rctx->cryptlen, sizeof(tag), 0);
+ if (crypto_memneq(tag, rctx->tag, sizeof(tag)))
+ return -EBADMSG;
+ return 0;
+}
+
+static int poly_copy_tag(struct aead_request *req)
+{
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ scatterwalk_map_and_copy(rctx->tag, req->dst, rctx->cryptlen,
+ sizeof(rctx->tag), 1);
+ return 0;
+}
+
+static void chacha_decrypt_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_verify_tag);
+}
+
+static int chacha_decrypt(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct chacha_req *creq = &rctx->u.chacha;
+ int err;
+
+ chacha_iv(creq->iv, req, 1);
+
+ ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ chacha_decrypt_done, req);
+ ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
+ ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
+ rctx->cryptlen, creq->iv);
+ err = crypto_ablkcipher_decrypt(&creq->req);
+ if (err)
+ return err;
+
+ return poly_verify_tag(req);
+}
+
+static int poly_tail_continue(struct aead_request *req)
+{
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ if (rctx->cryptlen == req->cryptlen) /* encrypting */
+ return poly_copy_tag(req);
+
+ return chacha_decrypt(req);
+}
+
+static void poly_tail_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_tail_continue);
+}
+
+static int poly_tail(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct poly_req *preq = &rctx->u.poly;
+ __le64 len;
+ int err;
+
+ sg_init_table(preq->src, 1);
+ len = cpu_to_le64(req->assoclen);
+ memcpy(&preq->tail.assoclen, &len, sizeof(len));
+ len = cpu_to_le64(rctx->cryptlen);
+ memcpy(&preq->tail.cryptlen, &len, sizeof(len));
+ sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
+
+ ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ poly_tail_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src,
+ rctx->tag, sizeof(preq->tail));
+
+ err = crypto_ahash_finup(&preq->req);
+ if (err)
+ return err;
+
+ return poly_tail_continue(req);
+}
+
+static void poly_cipherpad_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_tail);
+}
+
+static int poly_cipherpad(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct poly_req *preq = &rctx->u.poly;
+ unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
+ int err;
+
+ padlen = (bs - (rctx->cryptlen % bs)) % bs;
+ memset(preq->pad, 0, sizeof(preq->pad));
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, &preq->pad, padlen);
+
+ ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ poly_cipherpad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+
+ err = crypto_ahash_update(&preq->req);
+ if (err)
+ return err;
+
+ return poly_tail(req);
+}
+
+static void poly_cipher_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_cipherpad);
+}
+
+static int poly_cipher(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct poly_req *preq = &rctx->u.poly;
+ struct scatterlist *crypt = req->src;
+ int err;
+
+ if (rctx->cryptlen == req->cryptlen) /* encrypting */
+ crypt = req->dst;
+
+ ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ poly_cipher_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
+
+ err = crypto_ahash_update(&preq->req);
+ if (err)
+ return err;
+
+ return poly_cipherpad(req);
+}
+
+static void poly_adpad_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_cipher);
+}
+
+static int poly_adpad(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct poly_req *preq = &rctx->u.poly;
+ unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
+ int err;
+
+ padlen = (bs - (req->assoclen % bs)) % bs;
+ memset(preq->pad, 0, sizeof(preq->pad));
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, preq->pad, padlen);
+
+ ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ poly_adpad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+
+ err = crypto_ahash_update(&preq->req);
+ if (err)
+ return err;
+
+ return poly_cipher(req);
+}
+
+static void poly_ad_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_adpad);
+}
+
+static int poly_ad(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct poly_req *preq = &rctx->u.poly;
+ int err;
+
+ ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ poly_ad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, req->assoc, NULL, req->assoclen);
+
+ err = crypto_ahash_update(&preq->req);
+ if (err)
+ return err;
+
+ return poly_adpad(req);
+}
+
+static void poly_setkey_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_ad);
+}
+
+static int poly_setkey(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct poly_req *preq = &rctx->u.poly;
+ int err;
+
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
+
+ ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ poly_setkey_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
+
+ err = crypto_ahash_update(&preq->req);
+ if (err)
+ return err;
+
+ return poly_ad(req);
+}
+
+static void poly_init_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_setkey);
+}
+
+static int poly_init(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct poly_req *preq = &rctx->u.poly;
+ int err;
+
+ ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ poly_init_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+
+ err = crypto_ahash_init(&preq->req);
+ if (err)
+ return err;
+
+ return poly_setkey(req);
+}
+
+static void poly_genkey_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_init);
+}
+
+static int poly_genkey(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct chacha_req *creq = &rctx->u.chacha;
+ int err;
+
+ sg_init_table(creq->src, 1);
+ memset(rctx->key, 0, sizeof(rctx->key));
+ sg_set_buf(creq->src, rctx->key, sizeof(rctx->key));
+
+ chacha_iv(creq->iv, req, 0);
+
+ ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ poly_genkey_done, req);
+ ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
+ ablkcipher_request_set_crypt(&creq->req, creq->src, creq->src,
+ POLY1305_KEY_SIZE, creq->iv);
+
+ err = crypto_ablkcipher_decrypt(&creq->req);
+ if (err)
+ return err;
+
+ return poly_init(req);
+}
+
+static void chacha_encrypt_done(struct crypto_async_request *areq, int err)
+{
+ async_done_continue(areq->data, err, poly_genkey);
+}
+
+static int chacha_encrypt(struct aead_request *req)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+ struct chacha_req *creq = &rctx->u.chacha;
+ int err;
+
+ chacha_iv(creq->iv, req, 1);
+
+ ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ chacha_encrypt_done, req);
+ ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
+ ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
+ req->cryptlen, creq->iv);
+ err = crypto_ablkcipher_encrypt(&creq->req);
+ if (err)
+ return err;
+
+ return poly_genkey(req);
+}
+
+static int chachapoly_encrypt(struct aead_request *req)
+{
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->cryptlen = req->cryptlen;
+
+ /* encrypt call chain:
+ * - chacha_encrypt/done()
+ * - poly_genkey/done()
+ * - poly_init/done()
+ * - poly_setkey/done()
+ * - poly_ad/done()
+ * - poly_adpad/done()
+ * - poly_cipher/done()
+ * - poly_cipherpad/done()
+ * - poly_tail/done/continue()
+ * - poly_copy_tag()
+ */
+ return chacha_encrypt(req);
+}
+
+static int chachapoly_decrypt(struct aead_request *req)
+{
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ if (req->cryptlen < POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+ rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
+
+ /* decrypt call chain:
+ * - poly_genkey/done()
+ * - poly_init/done()
+ * - poly_setkey/done()
+ * - poly_ad/done()
+ * - poly_adpad/done()
+ * - poly_cipher/done()
+ * - poly_cipherpad/done()
+ * - poly_tail/done/continue()
+ * - chacha_decrypt/done()
+ * - poly_verify_tag()
+ */
+ return poly_genkey(req);
+}
+
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chachapoly_ctx *ctx = crypto_aead_ctx(aead);
+ int err;
+
+ if (keylen != ctx->saltlen + CHACHA20_KEY_SIZE)
+ return -EINVAL;
+
+ keylen -= ctx->saltlen;
+ memcpy(ctx->salt, key + keylen, ctx->saltlen);
+
+ crypto_ablkcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
+ crypto_ablkcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
+ CRYPTO_TFM_REQ_MASK);
+
+ err = crypto_ablkcipher_setkey(ctx->chacha, key, keylen);
+ crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctx->chacha) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int chachapoly_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int chachapoly_init(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct chachapoly_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_ablkcipher *chacha;
+ struct crypto_ahash *poly;
+ unsigned long align;
+
+ poly = crypto_spawn_ahash(&ictx->poly);
+ if (IS_ERR(poly))
+ return PTR_ERR(poly);
+
+ chacha = crypto_spawn_skcipher(&ictx->chacha);
+ if (IS_ERR(chacha)) {
+ crypto_free_ahash(poly);
+ return PTR_ERR(chacha);
+ }
+
+ ctx->chacha = chacha;
+ ctx->poly = poly;
+ ctx->saltlen = ictx->saltlen;
+
+ align = crypto_tfm_alg_alignmask(tfm);
+ align &= ~(crypto_tfm_ctx_alignment() - 1);
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ align + offsetof(struct chachapoly_req_ctx, u) +
+ max(offsetof(struct chacha_req, req) +
+ sizeof(struct ablkcipher_request) +
+ crypto_ablkcipher_reqsize(chacha),
+ offsetof(struct poly_req, req) +
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(poly)));
+
+ return 0;
+}
+
+static void chachapoly_exit(struct crypto_tfm *tfm)
+{
+ struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_ahash(ctx->poly);
+ crypto_free_ablkcipher(ctx->chacha);
+}
+
+static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
+ const char *name,
+ unsigned int ivsize)
+{
+ struct crypto_attr_type *algt;
+ struct crypto_instance *inst;
+ struct crypto_alg *chacha;
+ struct crypto_alg *poly;
+ struct ahash_alg *poly_ahash;
+ struct chachapoly_instance_ctx *ctx;
+ const char *chacha_name, *poly_name;
+ int err;
+
+ if (ivsize > CHACHAPOLY_IV_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return ERR_CAST(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
+ return ERR_PTR(-EINVAL);
+
+ chacha_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(chacha_name))
+ return ERR_CAST(chacha_name);
+ poly_name = crypto_attr_alg_name(tb[2]);
+ if (IS_ERR(poly_name))
+ return ERR_CAST(poly_name);
+
+ poly = crypto_find_alg(poly_name, &crypto_ahash_type,
+ CRYPTO_ALG_TYPE_HASH,
+ CRYPTO_ALG_TYPE_AHASH_MASK);
+ if (IS_ERR(poly))
+ return ERR_CAST(poly);
+
+ err = -ENOMEM;
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ goto out_put_poly;
+
+ ctx = crypto_instance_ctx(inst);
+ ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+ poly_ahash = container_of(poly, struct ahash_alg, halg.base);
+ err = crypto_init_ahash_spawn(&ctx->poly, &poly_ahash->halg, inst);
+ if (err)
+ goto err_free_inst;
+
+ crypto_set_skcipher_spawn(&ctx->chacha, inst);
+ err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
+ crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err)
+ goto err_drop_poly;
+
+ chacha = crypto_skcipher_spawn_alg(&ctx->chacha);
+
+ err = -EINVAL;
+ /* Need 16-byte IV size, including Initial Block Counter value */
+ if (chacha->cra_ablkcipher.ivsize != CHACHA20_IV_SIZE)
+ goto out_drop_chacha;
+ /* Not a stream cipher? */
+ if (chacha->cra_blocksize != 1)
+ goto out_drop_chacha;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+ "%s(%s,%s)", name, chacha_name,
+ poly_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_drop_chacha;
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s(%s,%s)", name, chacha->cra_driver_name,
+ poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_drop_chacha;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
+ inst->alg.cra_flags |= (chacha->cra_flags |
+ poly->cra_flags) & CRYPTO_ALG_ASYNC;
+ inst->alg.cra_priority = (chacha->cra_priority +
+ poly->cra_priority) / 2;
+ inst->alg.cra_blocksize = 1;
+ inst->alg.cra_alignmask = chacha->cra_alignmask | poly->cra_alignmask;
+ inst->alg.cra_type = &crypto_nivaead_type;
+ inst->alg.cra_aead.ivsize = ivsize;
+ inst->alg.cra_aead.maxauthsize = POLY1305_DIGEST_SIZE;
+ inst->alg.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen;
+ inst->alg.cra_init = chachapoly_init;
+ inst->alg.cra_exit = chachapoly_exit;
+ inst->alg.cra_aead.encrypt = chachapoly_encrypt;
+ inst->alg.cra_aead.decrypt = chachapoly_decrypt;
+ inst->alg.cra_aead.setkey = chachapoly_setkey;
+ inst->alg.cra_aead.setauthsize = chachapoly_setauthsize;
+ inst->alg.cra_aead.geniv = "seqiv";
+
+out:
+ crypto_mod_put(poly);
+ return inst;
+
+out_drop_chacha:
+ crypto_drop_skcipher(&ctx->chacha);
+err_drop_poly:
+ crypto_drop_ahash(&ctx->poly);
+err_free_inst:
+ kfree(inst);
+out_put_poly:
+ inst = ERR_PTR(err);
+ goto out;
+}
+
+static struct crypto_instance *rfc7539_alloc(struct rtattr **tb)
+{
+ return chachapoly_alloc(tb, "rfc7539", 12);
+}
+
+static struct crypto_instance *rfc7539esp_alloc(struct rtattr **tb)
+{
+ return chachapoly_alloc(tb, "rfc7539esp", 8);
+}
+
+static void chachapoly_free(struct crypto_instance *inst)
+{
+ struct chachapoly_instance_ctx *ctx = crypto_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ctx->chacha);
+ crypto_drop_ahash(&ctx->poly);
+ kfree(inst);
+}
+
+static struct crypto_template rfc7539_tmpl = {
+ .name = "rfc7539",
+ .alloc = rfc7539_alloc,
+ .free = chachapoly_free,
+ .module = THIS_MODULE,
+};
+
+static struct crypto_template rfc7539esp_tmpl = {
+ .name = "rfc7539esp",
+ .alloc = rfc7539esp_alloc,
+ .free = chachapoly_free,
+ .module = THIS_MODULE,
+};
+
+static int __init chacha20poly1305_module_init(void)
+{
+ int err;
+
+ err = crypto_register_template(&rfc7539_tmpl);
+ if (err)
+ return err;
+
+ err = crypto_register_template(&rfc7539esp_tmpl);
+ if (err)
+ crypto_unregister_template(&rfc7539_tmpl);
+
+ return err;
+}
+
+static void __exit chacha20poly1305_module_exit(void)
+{
+ crypto_unregister_template(&rfc7539esp_tmpl);
+ crypto_unregister_template(&rfc7539_tmpl);
+}
+
+module_init(chacha20poly1305_module_init);
+module_exit(chacha20poly1305_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi ");
+MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD");
+MODULE_ALIAS_CRYPTO("chacha20poly1305");
+MODULE_ALIAS_CRYPTO("rfc7539");
+MODULE_ALIAS_CRYPTO("rfc7539esp");
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 63c17d5992f7..b4340018c8d4 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -80,44 +80,37 @@ unlock:
return err;
}
-static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
+static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
+ struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
int err = 0;
- spin_lock_bh(&ctx->lock);
- if (crypto_ablkcipher_crt(geniv)->givencrypt !=
- chainiv_givencrypt_first)
- goto unlock;
-
- crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
- crypto_ablkcipher_ivsize(geniv));
-
-unlock:
- spin_unlock_bh(&ctx->lock);
-
- if (err)
- return err;
-
- return chainiv_givencrypt(req);
-}
-
-static int chainiv_init_common(struct crypto_tfm *tfm)
-{
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
- return skcipher_geniv_init(tfm);
+ if (iv) {
+ err = crypto_rng_get_bytes(crypto_default_rng, iv,
+ crypto_ablkcipher_ivsize(geniv));
+ crypto_put_default_rng();
+ }
+
+ return err ?: skcipher_geniv_init(tfm);
}
static int chainiv_init(struct crypto_tfm *tfm)
{
+ struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
+ char *iv;
spin_lock_init(&ctx->lock);
- return chainiv_init_common(tfm);
+ iv = NULL;
+ if (!crypto_get_default_rng()) {
+ crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
+ iv = ctx->iv;
+ }
+
+ return chainiv_init_common(tfm, iv);
}
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
@@ -205,33 +198,6 @@ postpone:
return async_chainiv_postpone_request(req);
}
-static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- int err = 0;
-
- if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
- goto out;
-
- if (crypto_ablkcipher_crt(geniv)->givencrypt !=
- async_chainiv_givencrypt_first)
- goto unlock;
-
- crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
- crypto_ablkcipher_ivsize(geniv));
-
-unlock:
- clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
-
- if (err)
- return err;
-
-out:
- return async_chainiv_givencrypt(req);
-}
-
static void async_chainiv_do_postponed(struct work_struct *work)
{
struct async_chainiv_ctx *ctx = container_of(work,
@@ -263,14 +229,23 @@ static void async_chainiv_do_postponed(struct work_struct *work)
static int async_chainiv_init(struct crypto_tfm *tfm)
{
+ struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
+ char *iv;
spin_lock_init(&ctx->lock);
crypto_init_queue(&ctx->queue, 100);
INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
- return chainiv_init_common(tfm);
+ iv = NULL;
+ if (!crypto_get_default_rng()) {
+ crypto_ablkcipher_crt(geniv)->givencrypt =
+ async_chainiv_givencrypt;
+ iv = ctx->iv;
+ }
+
+ return chainiv_init_common(tfm, iv);
}
static void async_chainiv_exit(struct crypto_tfm *tfm)
@@ -288,21 +263,14 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
- int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_CAST(algt);
- err = crypto_get_default_rng();
- if (err)
- return ERR_PTR(err);
-
inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
if (IS_ERR(inst))
- goto put_rng;
-
- inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first;
+ goto out;
inst->alg.cra_init = chainiv_init;
inst->alg.cra_exit = skcipher_geniv_exit;
@@ -312,9 +280,6 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
if (!crypto_requires_sync(algt->type, algt->mask)) {
inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
- inst->alg.cra_ablkcipher.givencrypt =
- async_chainiv_givencrypt_first;
-
inst->alg.cra_init = async_chainiv_init;
inst->alg.cra_exit = async_chainiv_exit;
@@ -325,22 +290,12 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
out:
return inst;
-
-put_rng:
- crypto_put_default_rng();
- goto out;
-}
-
-static void chainiv_free(struct crypto_instance *inst)
-{
- skcipher_geniv_free(inst);
- crypto_put_default_rng();
}
static struct crypto_template chainiv_tmpl = {
.name = "chainiv",
.alloc = chainiv_alloc,
- .free = chainiv_free,
+ .free = skcipher_geniv_free,
.module = THIS_MODULE,
};
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index b0602ba03111..22ba81f76764 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -295,6 +295,23 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
crypto_free_blkcipher(ctx->child);
}
+static int cryptd_init_instance(struct crypto_instance *inst,
+ struct crypto_alg *alg)
+{
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "cryptd(%s)",
+ alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ return -ENAMETOOLONG;
+
+ memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
+
+ inst->alg.cra_priority = alg->cra_priority + 50;
+ inst->alg.cra_blocksize = alg->cra_blocksize;
+ inst->alg.cra_alignmask = alg->cra_alignmask;
+
+ return 0;
+}
+
static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
unsigned int tail)
{
@@ -308,17 +325,10 @@ static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
inst = (void *)(p + head);
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ err = cryptd_init_instance(inst, alg);
+ if (err)
goto out_free_inst;
- memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
-
- inst->alg.cra_priority = alg->cra_priority + 50;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
-
out:
return p;
@@ -654,6 +664,24 @@ out_put_alg:
return err;
}
+static int cryptd_aead_setkey(struct crypto_aead *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
+ struct crypto_aead *child = ctx->child;
+
+ return crypto_aead_setkey(child, key, keylen);
+}
+
+static int cryptd_aead_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
+ struct crypto_aead *child = ctx->child;
+
+ return crypto_aead_setauthsize(child, authsize);
+}
+
static void cryptd_aead_crypt(struct aead_request *req,
struct crypto_aead *child,
int err,
@@ -715,27 +743,26 @@ static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
}
-static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
+static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct aead_instance *inst = aead_alg_instance(tfm);
+ struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
- struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *cipher;
cipher = crypto_spawn_aead(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
ctx->child = cipher;
- tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
+ crypto_aead_set_reqsize(tfm, sizeof(struct cryptd_aead_request_ctx));
return 0;
}
-static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
+static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
{
- struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
}
@@ -744,57 +771,57 @@ static int cryptd_create_aead(struct crypto_template *tmpl,
struct cryptd_queue *queue)
{
struct aead_instance_ctx *ctx;
- struct crypto_instance *inst;
- struct crypto_alg *alg;
- u32 type = CRYPTO_ALG_TYPE_AEAD;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
+ struct aead_instance *inst;
+ struct aead_alg *alg;
+ const char *name;
+ u32 type = 0;
+ u32 mask = 0;
int err;
cryptd_check_internal(tb, &type, &mask);
- alg = crypto_get_attr_alg(tb, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
+ name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
- inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
- ctx = crypto_instance_ctx(inst);
+ ctx = aead_instance_ctx(inst);
ctx->queue = queue;
- err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+ crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
+ err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.cra_flags = type;
- inst->alg.cra_type = alg->cra_type;
- inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
- inst->alg.cra_init = cryptd_aead_init_tfm;
- inst->alg.cra_exit = cryptd_aead_exit_tfm;
- inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
- inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
- inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
- inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
- inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
- inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
- inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
- inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
- inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
+ alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
+ err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
+ if (err)
+ goto out_drop_aead;
- err = crypto_register_instance(tmpl, inst);
+ inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
+ inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
+
+ inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
+ inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
+
+ inst->alg.init = cryptd_aead_init_tfm;
+ inst->alg.exit = cryptd_aead_exit_tfm;
+ inst->alg.setkey = cryptd_aead_setkey;
+ inst->alg.setauthsize = cryptd_aead_setauthsize;
+ inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
+ inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
+
+ err = aead_register_instance(tmpl, inst);
if (err) {
- crypto_drop_spawn(&ctx->aead_spawn.base);
+out_drop_aead:
+ crypto_drop_aead(&ctx->aead_spawn);
out_free_inst:
kfree(inst);
}
-out_put_alg:
- crypto_mod_put(alg);
return err;
}
@@ -832,8 +859,8 @@ static void cryptd_free(struct crypto_instance *inst)
kfree(ahash_instance(inst));
return;
case CRYPTO_ALG_TYPE_AEAD:
- crypto_drop_spawn(&aead_ctx->aead_spawn.base);
- kfree(inst);
+ crypto_drop_aead(&aead_ctx->aead_spawn);
+ kfree(aead_instance(inst));
return;
default:
crypto_drop_spawn(&ctx->spawn);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index a20319132e33..941c9a434d50 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -25,6 +25,10 @@
#include
#include
+static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
+static struct crypto_blkcipher *crypto_default_null_skcipher;
+static int crypto_default_null_skcipher_refcnt;
+
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
@@ -149,6 +153,41 @@ MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
+struct crypto_blkcipher *crypto_get_default_null_skcipher(void)
+{
+ struct crypto_blkcipher *tfm;
+
+ mutex_lock(&crypto_default_null_skcipher_lock);
+ tfm = crypto_default_null_skcipher;
+
+ if (!tfm) {
+ tfm = crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0);
+ if (IS_ERR(tfm))
+ goto unlock;
+
+ crypto_default_null_skcipher = tfm;
+ }
+
+ crypto_default_null_skcipher_refcnt++;
+
+unlock:
+ mutex_unlock(&crypto_default_null_skcipher_lock);
+
+ return tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
+
+void crypto_put_default_null_skcipher(void)
+{
+ mutex_lock(&crypto_default_null_skcipher_lock);
+ if (!--crypto_default_null_skcipher_refcnt) {
+ crypto_free_blkcipher(crypto_default_null_skcipher);
+ crypto_default_null_skcipher = NULL;
+ }
+ mutex_unlock(&crypto_default_null_skcipher_lock);
+}
+EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
+
static int __init crypto_null_mod_init(void)
{
int ret = 0;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 41dfe762b7fb..08ea2867fc8a 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -27,6 +27,8 @@
#include
#include
#include
+#include
+#include
#include "internal.h"
@@ -110,6 +112,21 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_akcipher rakcipher;
+
+ strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
+ sizeof(struct crypto_report_akcipher), &rakcipher))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
@@ -154,6 +171,12 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
+
+ case CRYPTO_ALG_TYPE_AKCIPHER:
+ if (crypto_report_akcipher(skb, alg))
+ goto nla_put_failure;
+
+ break;
}
out:
@@ -450,13 +473,21 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
}
+static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct nlattr **attrs)
+{
+ if (!netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+ return crypto_del_default_rng();
+}
+
#define MSGSIZE(type) sizeof(struct type)
static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
[CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
- [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+ [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
};
static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
@@ -476,6 +507,7 @@ static const struct crypto_link {
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report,
.dump = crypto_dump_report,
.done = crypto_dump_report_done},
+ [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
};
static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
diff --git a/crypto/drbg.c b/crypto/drbg.c
index b69409cb7e6a..a7c23146b87f 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,6 +98,7 @@
*/
#include
+#include
/***************************************************************
* Backend cipher definitions available to DRBG
@@ -190,6 +191,8 @@ static const struct drbg_core drbg_cores[] = {
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
};
+static int drbg_uninstantiate(struct drbg_state *drbg);
+
/******************************************************************
* Generic helper functions
******************************************************************/
@@ -235,7 +238,7 @@ static bool drbg_fips_continuous_test(struct drbg_state *drbg,
#ifdef CONFIG_CRYPTO_FIPS
int ret = 0;
/* skip test if we test the overall system */
- if (drbg->test_data)
+ if (list_empty(&drbg->test_data.list))
return true;
/* only perform test in FIPS mode */
if (0 == fips_enabled)
@@ -487,7 +490,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
out:
memset(iv, 0, drbg_blocklen(drbg));
- memset(temp, 0, drbg_statelen(drbg));
+ memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
memset(pad, 0, drbg_blocklen(drbg));
return ret;
}
@@ -1041,6 +1044,58 @@ static struct drbg_state_ops drbg_hash_ops = {
* Functions common for DRBG implementations
******************************************************************/
+static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
+ int reseed)
+{
+ int ret = drbg->d_ops->update(drbg, seed, reseed);
+
+ if (ret)
+ return ret;
+
+ drbg->seeded = true;
+ /* 10.1.1.2 / 10.1.1.3 step 5 */
+ drbg->reseed_ctr = 1;
+
+ return ret;
+}
+
+static void drbg_async_seed(struct work_struct *work)
+{
+ struct drbg_string data;
+ LIST_HEAD(seedlist);
+ struct drbg_state *drbg = container_of(work, struct drbg_state,
+ seed_work);
+ unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
+ unsigned char entropy[32];
+
+ BUG_ON(!entropylen);
+ BUG_ON(entropylen > sizeof(entropy));
+ get_random_bytes(entropy, entropylen);
+
+ drbg_string_fill(&data, entropy, entropylen);
+ list_add_tail(&data.list, &seedlist);
+
+ mutex_lock(&drbg->drbg_mutex);
+
+ /* If nonblocking pool is initialized, deactivate Jitter RNG */
+ crypto_free_rng(drbg->jent);
+ drbg->jent = NULL;
+
+ /* Set seeded to false so that if __drbg_seed fails the
+ * next generate call will trigger a reseed.
+ */
+ drbg->seeded = false;
+
+ __drbg_seed(drbg, &seedlist, true);
+
+ if (drbg->seeded)
+ drbg->reseed_threshold = drbg_max_requests(drbg);
+
+ mutex_unlock(&drbg->drbg_mutex);
+
+ memzero_explicit(entropy, entropylen);
+}
+
/*
* Seeding or reseeding of the DRBG
*
@@ -1055,9 +1110,9 @@ static struct drbg_state_ops drbg_hash_ops = {
static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
bool reseed)
{
- int ret = 0;
- unsigned char *entropy = NULL;
- size_t entropylen = 0;
+ int ret;
+ unsigned char entropy[((32 + 16) * 2)];
+ unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
struct drbg_string data1;
LIST_HEAD(seedlist);
@@ -1068,31 +1123,45 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
return -EINVAL;
}
- if (drbg->test_data && drbg->test_data->testentropy) {
- drbg_string_fill(&data1, drbg->test_data->testentropy->buf,
- drbg->test_data->testentropy->len);
+ if (list_empty(&drbg->test_data.list)) {
+ drbg_string_fill(&data1, drbg->test_data.buf,
+ drbg->test_data.len);
pr_devel("DRBG: using test entropy\n");
} else {
/*
* Gather entropy equal to the security strength of the DRBG.
* With a derivation function, a nonce is required in addition
* to the entropy. A nonce must be at least 1/2 of the security
- * strength of the DRBG in size. Thus, entropy * nonce is 3/2
+ * strength of the DRBG in size. Thus, entropy + nonce is 3/2
* of the strength. The consideration of a nonce is only
* applicable during initial seeding.
*/
- entropylen = drbg_sec_strength(drbg->core->flags);
- if (!entropylen)
- return -EFAULT;
+ BUG_ON(!entropylen);
if (!reseed)
entropylen = ((entropylen + 1) / 2) * 3;
- pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n",
- entropylen);
- entropy = kzalloc(entropylen, GFP_KERNEL);
- if (!entropy)
- return -ENOMEM;
+ BUG_ON((entropylen * 2) > sizeof(entropy));
+
+ /* Get seed from in-kernel /dev/urandom */
get_random_bytes(entropy, entropylen);
- drbg_string_fill(&data1, entropy, entropylen);
+
+ if (!drbg->jent) {
+ drbg_string_fill(&data1, entropy, entropylen);
+ pr_devel("DRBG: (re)seeding with %u bytes of entropy\n",
+ entropylen);
+ } else {
+ /* Get seed from Jitter RNG */
+ ret = crypto_rng_get_bytes(drbg->jent,
+ entropy + entropylen,
+ entropylen);
+ if (ret) {
+ pr_devel("DRBG: jent failed with %d\n", ret);
+ return ret;
+ }
+
+ drbg_string_fill(&data1, entropy, entropylen * 2);
+ pr_devel("DRBG: (re)seeding with %u bytes of entropy\n",
+ entropylen * 2);
+ }
}
list_add_tail(&data1.list, &seedlist);
@@ -1111,16 +1180,10 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
memset(drbg->C, 0, drbg_statelen(drbg));
}
- ret = drbg->d_ops->update(drbg, &seedlist, reseed);
- if (ret)
- goto out;
+ ret = __drbg_seed(drbg, &seedlist, reseed);
- drbg->seeded = true;
- /* 10.1.1.2 / 10.1.1.3 step 5 */
- drbg->reseed_ctr = 1;
+ memzero_explicit(entropy, entropylen * 2);
-out:
- kzfree(entropy);
return ret;
}
@@ -1136,6 +1199,8 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
kzfree(drbg->scratchpad);
drbg->scratchpad = NULL;
drbg->reseed_ctr = 0;
+ drbg->d_ops = NULL;
+ drbg->core = NULL;
#ifdef CONFIG_CRYPTO_FIPS
kzfree(drbg->prev);
drbg->prev = NULL;
@@ -1152,6 +1217,27 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
int ret = -ENOMEM;
unsigned int sb_size = 0;
+ switch (drbg->core->flags & DRBG_TYPE_MASK) {
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+ case DRBG_HMAC:
+ drbg->d_ops = &drbg_hmac_ops;
+ break;
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+ case DRBG_HASH:
+ drbg->d_ops = &drbg_hash_ops;
+ break;
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+ case DRBG_CTR:
+ drbg->d_ops = &drbg_ctr_ops;
+ break;
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+ default:
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
if (!drbg->V)
goto err;
@@ -1181,7 +1267,7 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
if (!drbg->scratchpad)
goto err;
}
- spin_lock_init(&drbg->drbg_lock);
+
return 0;
err:
@@ -1189,79 +1275,6 @@ err:
return ret;
}
-/*
- * Strategy to avoid holding long term locks: generate a shadow copy of DRBG
- * and perform all operations on this shadow copy. After finishing, restore
- * the updated state of the shadow copy into original drbg state. This way,
- * only the read and write operations of the original drbg state must be
- * locked
- */
-static inline void drbg_copy_drbg(struct drbg_state *src,
- struct drbg_state *dst)
-{
- if (!src || !dst)
- return;
- memcpy(dst->V, src->V, drbg_statelen(src));
- memcpy(dst->C, src->C, drbg_statelen(src));
- dst->reseed_ctr = src->reseed_ctr;
- dst->seeded = src->seeded;
- dst->pr = src->pr;
-#ifdef CONFIG_CRYPTO_FIPS
- dst->fips_primed = src->fips_primed;
- memcpy(dst->prev, src->prev, drbg_blocklen(src));
-#endif
- /*
- * Not copied:
- * scratchpad is initialized drbg_alloc_state;
- * priv_data is initialized with call to crypto_init;
- * d_ops and core are set outside, as these parameters are const;
- * test_data is set outside to prevent it being copied back.
- */
-}
-
-static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow)
-{
- int ret = -ENOMEM;
- struct drbg_state *tmp = NULL;
-
- tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- /* read-only data as they are defined as const, no lock needed */
- tmp->core = drbg->core;
- tmp->d_ops = drbg->d_ops;
-
- ret = drbg_alloc_state(tmp);
- if (ret)
- goto err;
-
- spin_lock_bh(&drbg->drbg_lock);
- drbg_copy_drbg(drbg, tmp);
- /* only make a link to the test buffer, as we only read that data */
- tmp->test_data = drbg->test_data;
- spin_unlock_bh(&drbg->drbg_lock);
- *shadow = tmp;
- return 0;
-
-err:
- kzfree(tmp);
- return ret;
-}
-
-static void drbg_restore_shadow(struct drbg_state *drbg,
- struct drbg_state **shadow)
-{
- struct drbg_state *tmp = *shadow;
-
- spin_lock_bh(&drbg->drbg_lock);
- drbg_copy_drbg(tmp, drbg);
- spin_unlock_bh(&drbg->drbg_lock);
- drbg_dealloc_state(tmp);
- kzfree(tmp);
- *shadow = NULL;
-}
-
/*************************************************************************
* DRBG interface functions
*************************************************************************/
@@ -1287,14 +1300,12 @@ static int drbg_generate(struct drbg_state *drbg,
struct drbg_string *addtl)
{
int len = 0;
- struct drbg_state *shadow = NULL;
LIST_HEAD(addtllist);
- struct drbg_string timestamp;
- union {
- cycles_t cycles;
- unsigned char char_cycles[sizeof(cycles_t)];
- } now;
+ if (!drbg->core) {
+ pr_devel("DRBG: not yet seeded\n");
+ return -EINVAL;
+ }
if (0 == buflen || !buf) {
pr_devel("DRBG: no output buffer provided\n");
return -EINVAL;
@@ -1304,15 +1315,9 @@ static int drbg_generate(struct drbg_state *drbg,
return -EINVAL;
}
- len = drbg_make_shadow(drbg, &shadow);
- if (len) {
- pr_devel("DRBG: shadow copy cannot be generated\n");
- return len;
- }
-
/* 9.3.1 step 2 */
len = -EINVAL;
- if (buflen > (drbg_max_request_bytes(shadow))) {
+ if (buflen > (drbg_max_request_bytes(drbg))) {
pr_devel("DRBG: requested random numbers too large %u\n",
buflen);
goto err;
@@ -1321,7 +1326,7 @@ static int drbg_generate(struct drbg_state *drbg,
/* 9.3.1 step 3 is implicit with the chosen DRBG */
/* 9.3.1 step 4 */
- if (addtl && addtl->len > (drbg_max_addtl(shadow))) {
+ if (addtl && addtl->len > (drbg_max_addtl(drbg))) {
pr_devel("DRBG: additional information string too long %zu\n",
addtl->len);
goto err;
@@ -1332,46 +1337,29 @@ static int drbg_generate(struct drbg_state *drbg,
* 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented
* here. The spec is a bit convoluted here, we make it simpler.
*/
- if ((drbg_max_requests(shadow)) < shadow->reseed_ctr)
- shadow->seeded = false;
+ if (drbg->reseed_threshold < drbg->reseed_ctr)
+ drbg->seeded = false;
- /* allocate cipher handle */
- len = shadow->d_ops->crypto_init(shadow);
- if (len)
- goto err;
-
- if (shadow->pr || !shadow->seeded) {
+ if (drbg->pr || !drbg->seeded) {
pr_devel("DRBG: reseeding before generation (prediction "
"resistance: %s, state %s)\n",
drbg->pr ? "true" : "false",
drbg->seeded ? "seeded" : "unseeded");
/* 9.3.1 steps 7.1 through 7.3 */
- len = drbg_seed(shadow, addtl, true);
+ len = drbg_seed(drbg, addtl, true);
if (len)
goto err;
/* 9.3.1 step 7.4 */
addtl = NULL;
}
- /*
- * Mix the time stamp into the DRBG state if the DRBG is not in
- * test mode. If there are two callers invoking the DRBG at the same
- * time, i.e. before the first caller merges its shadow state back,
- * both callers would obtain the same random number stream without
- * changing the state here.
- */
- if (!drbg->test_data) {
- now.cycles = random_get_entropy();
- drbg_string_fill(×tamp, now.char_cycles, sizeof(cycles_t));
- list_add_tail(×tamp.list, &addtllist);
- }
if (addtl && 0 < addtl->len)
list_add_tail(&addtl->list, &addtllist);
/* 9.3.1 step 8 and 10 */
- len = shadow->d_ops->generate(shadow, buf, buflen, &addtllist);
+ len = drbg->d_ops->generate(drbg, buf, buflen, &addtllist);
/* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */
- shadow->reseed_ctr++;
+ drbg->reseed_ctr++;
if (0 >= len)
goto err;
@@ -1391,7 +1379,7 @@ static int drbg_generate(struct drbg_state *drbg,
* case somebody has a need to implement the test of 11.3.3.
*/
#if 0
- if (shadow->reseed_ctr && !(shadow->reseed_ctr % 4096)) {
+ if (drbg->reseed_ctr && !(drbg->reseed_ctr % 4096)) {
int err = 0;
pr_devel("DRBG: start to perform self test\n");
if (drbg->core->flags & DRBG_HMAC)
@@ -1410,8 +1398,6 @@ static int drbg_generate(struct drbg_state *drbg,
* are returned when reusing this DRBG cipher handle
*/
drbg_uninstantiate(drbg);
- drbg_dealloc_state(shadow);
- kzfree(shadow);
return 0;
} else {
pr_devel("DRBG: self test successful\n");
@@ -1425,8 +1411,6 @@ static int drbg_generate(struct drbg_state *drbg,
*/
len = 0;
err:
- shadow->d_ops->crypto_fini(shadow);
- drbg_restore_shadow(drbg, &shadow);
return len;
}
@@ -1442,19 +1426,68 @@ static int drbg_generate_long(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct drbg_string *addtl)
{
- int len = 0;
+ unsigned int len = 0;
unsigned int slice = 0;
do {
- int tmplen = 0;
+ int err = 0;
unsigned int chunk = 0;
slice = ((buflen - len) / drbg_max_request_bytes(drbg));
chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len);
- tmplen = drbg_generate(drbg, buf + len, chunk, addtl);
- if (0 >= tmplen)
- return tmplen;
- len += tmplen;
+ mutex_lock(&drbg->drbg_mutex);
+ err = drbg_generate(drbg, buf + len, chunk, addtl);
+ mutex_unlock(&drbg->drbg_mutex);
+ if (0 > err)
+ return err;
+ len += chunk;
} while (slice > 0 && (len < buflen));
- return len;
+ return 0;
+}
+
+static void drbg_schedule_async_seed(struct random_ready_callback *rdy)
+{
+ struct drbg_state *drbg = container_of(rdy, struct drbg_state,
+ random_ready);
+
+ schedule_work(&drbg->seed_work);
+}
+
+static int drbg_prepare_hrng(struct drbg_state *drbg)
+{
+ int err;
+
+ /* We do not need an HRNG in test mode. */
+ if (list_empty(&drbg->test_data.list))
+ return 0;
+
+ INIT_WORK(&drbg->seed_work, drbg_async_seed);
+
+ drbg->random_ready.owner = THIS_MODULE;
+ drbg->random_ready.func = drbg_schedule_async_seed;
+
+ err = add_random_ready_callback(&drbg->random_ready);
+
+ switch (err) {
+ case 0:
+ break;
+
+ case -EALREADY:
+ err = 0;
+ /* fall through */
+
+ default:
+ drbg->random_ready.func = NULL;
+ return err;
+ }
+
+ drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
+
+ /*
+ * Require frequent reseeds until the seed source is fully
+ * initialized.
+ */
+ drbg->reseed_threshold = 50;
+
+ return err;
}
/*
@@ -1477,32 +1510,12 @@ static int drbg_generate_long(struct drbg_state *drbg,
static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
int coreref, bool pr)
{
- int ret = -ENOMEM;
+ int ret;
+ bool reseed = true;
pr_devel("DRBG: Initializing DRBG core %d with prediction resistance "
"%s\n", coreref, pr ? "enabled" : "disabled");
- drbg->core = &drbg_cores[coreref];
- drbg->pr = pr;
- drbg->seeded = false;
- switch (drbg->core->flags & DRBG_TYPE_MASK) {
-#ifdef CONFIG_CRYPTO_DRBG_HMAC
- case DRBG_HMAC:
- drbg->d_ops = &drbg_hmac_ops;
- break;
-#endif /* CONFIG_CRYPTO_DRBG_HMAC */
-#ifdef CONFIG_CRYPTO_DRBG_HASH
- case DRBG_HASH:
- drbg->d_ops = &drbg_hash_ops;
- break;
-#endif /* CONFIG_CRYPTO_DRBG_HASH */
-#ifdef CONFIG_CRYPTO_DRBG_CTR
- case DRBG_CTR:
- drbg->d_ops = &drbg_ctr_ops;
- break;
-#endif /* CONFIG_CRYPTO_DRBG_CTR */
- default:
- return -EOPNOTSUPP;
- }
+ mutex_lock(&drbg->drbg_mutex);
/* 9.1 step 1 is implicit with the selected DRBG type */
@@ -1514,22 +1527,52 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
/* 9.1 step 4 is implicit in drbg_sec_strength */
- ret = drbg_alloc_state(drbg);
- if (ret)
- return ret;
+ if (!drbg->core) {
+ drbg->core = &drbg_cores[coreref];
+ drbg->pr = pr;
+ drbg->seeded = false;
+ drbg->reseed_threshold = drbg_max_requests(drbg);
- ret = -EFAULT;
- if (drbg->d_ops->crypto_init(drbg))
- goto err;
- ret = drbg_seed(drbg, pers, false);
- drbg->d_ops->crypto_fini(drbg);
- if (ret)
- goto err;
+ ret = drbg_alloc_state(drbg);
+ if (ret)
+ goto unlock;
- return 0;
+ ret = -EFAULT;
+ if (drbg->d_ops->crypto_init(drbg))
+ goto err;
+
+ ret = drbg_prepare_hrng(drbg);
+ if (ret)
+ goto free_everything;
+
+ if (IS_ERR(drbg->jent)) {
+ ret = PTR_ERR(drbg->jent);
+ drbg->jent = NULL;
+ if (fips_enabled || ret != -ENOENT)
+ goto free_everything;
+ pr_info("DRBG: Continuing without Jitter RNG\n");
+ }
+
+ reseed = false;
+ }
+
+ ret = drbg_seed(drbg, pers, reseed);
+
+ if (ret && !reseed)
+ goto free_everything;
+
+ mutex_unlock(&drbg->drbg_mutex);
+ return ret;
err:
drbg_dealloc_state(drbg);
+unlock:
+ mutex_unlock(&drbg->drbg_mutex);
+ return ret;
+
+free_everything:
+ mutex_unlock(&drbg->drbg_mutex);
+ drbg_uninstantiate(drbg);
return ret;
}
@@ -1544,10 +1587,17 @@ err:
*/
static int drbg_uninstantiate(struct drbg_state *drbg)
{
- spin_lock_bh(&drbg->drbg_lock);
+ if (drbg->random_ready.func) {
+ del_random_ready_callback(&drbg->random_ready);
+ cancel_work_sync(&drbg->seed_work);
+ crypto_free_rng(drbg->jent);
+ drbg->jent = NULL;
+ }
+
+ if (drbg->d_ops)
+ drbg->d_ops->crypto_fini(drbg);
drbg_dealloc_state(drbg);
/* no scrubbing of test_data -- this shall survive an uninstantiate */
- spin_unlock_bh(&drbg->drbg_lock);
return 0;
}
@@ -1555,16 +1605,17 @@ static int drbg_uninstantiate(struct drbg_state *drbg)
* Helper function for setting the test data in the DRBG
*
* @drbg DRBG state handle
- * @test_data test data to sets
+ * @data test data
+ * @len test data length
*/
-static inline void drbg_set_testdata(struct drbg_state *drbg,
- struct drbg_test_data *test_data)
+static void drbg_kcapi_set_entropy(struct crypto_rng *tfm,
+ const u8 *data, unsigned int len)
{
- if (!test_data || !test_data->testentropy)
- return;
- spin_lock_bh(&drbg->drbg_lock);
- drbg->test_data = test_data;
- spin_unlock_bh(&drbg->drbg_lock);
+ struct drbg_state *drbg = crypto_rng_ctx(tfm);
+
+ mutex_lock(&drbg->drbg_mutex);
+ drbg_string_fill(&drbg->test_data, data, len);
+ mutex_unlock(&drbg->drbg_mutex);
}
/***************************************************************
@@ -1584,7 +1635,8 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) {
- pr_info("DRBG: could not allocate digest TFM handle\n");
+ pr_info("DRBG: could not allocate digest TFM handle: %s\n",
+ drbg->core->backend_cra_name);
return PTR_ERR(tfm);
}
BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm));
@@ -1635,7 +1687,8 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) {
- pr_info("DRBG: could not allocate cipher TFM handle\n");
+ pr_info("DRBG: could not allocate cipher TFM handle: %s\n",
+ drbg->core->backend_cra_name);
return PTR_ERR(tfm);
}
BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
@@ -1714,15 +1767,10 @@ static inline void drbg_convert_tfm_core(const char *cra_driver_name,
static int drbg_kcapi_init(struct crypto_tfm *tfm)
{
struct drbg_state *drbg = crypto_tfm_ctx(tfm);
- bool pr = false;
- int coreref = 0;
- drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr);
- /*
- * when personalization string is needed, the caller must call reset
- * and provide the personalization string as seed information
- */
- return drbg_instantiate(drbg, NULL, coreref, pr);
+ mutex_init(&drbg->drbg_mutex);
+
+ return 0;
}
static void drbg_kcapi_cleanup(struct crypto_tfm *tfm)
@@ -1734,65 +1782,49 @@ static void drbg_kcapi_cleanup(struct crypto_tfm *tfm)
* Generate random numbers invoked by the kernel crypto API:
* The API of the kernel crypto API is extended as follows:
*
- * If dlen is larger than zero, rdata is interpreted as the output buffer
- * where random data is to be stored.
- *
- * If dlen is zero, rdata is interpreted as a pointer to a struct drbg_gen
- * which holds the additional information string that is used for the
- * DRBG generation process. The output buffer that is to be used to store
- * data is also pointed to by struct drbg_gen.
+ * src is additional input supplied to the RNG.
+ * slen is the length of src.
+ * dst is the output buffer where random data is to be stored.
+ * dlen is the length of dst.
*/
-static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata,
- unsigned int dlen)
+static int drbg_kcapi_random(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
- if (0 < dlen) {
- return drbg_generate_long(drbg, rdata, dlen, NULL);
- } else {
- struct drbg_gen *data = (struct drbg_gen *)rdata;
- struct drbg_string addtl;
- /* catch NULL pointer */
- if (!data)
- return 0;
- drbg_set_testdata(drbg, data->test_data);
+ struct drbg_string *addtl = NULL;
+ struct drbg_string string;
+
+ if (slen) {
/* linked list variable is now local to allow modification */
- drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len);
- return drbg_generate_long(drbg, data->outbuf, data->outlen,
- &addtl);
+ drbg_string_fill(&string, src, slen);
+ addtl = &string;
}
+
+ return drbg_generate_long(drbg, dst, dlen, addtl);
}
/*
- * Reset the DRBG invoked by the kernel crypto API
- * The reset implies a full re-initialization of the DRBG. Similar to the
- * generate function of drbg_kcapi_random, this function extends the
- * kernel crypto API interface with struct drbg_gen
+ * Seed the DRBG invoked by the kernel crypto API
*/
-static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+static int drbg_kcapi_seed(struct crypto_rng *tfm,
+ const u8 *seed, unsigned int slen)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm);
bool pr = false;
- struct drbg_string seed_string;
+ struct drbg_string string;
+ struct drbg_string *seed_string = NULL;
int coreref = 0;
- drbg_uninstantiate(drbg);
drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref,
&pr);
if (0 < slen) {
- drbg_string_fill(&seed_string, seed, slen);
- return drbg_instantiate(drbg, &seed_string, coreref, pr);
- } else {
- struct drbg_gen *data = (struct drbg_gen *)seed;
- /* allow invocation of API call with NULL, 0 */
- if (!data)
- return drbg_instantiate(drbg, NULL, coreref, pr);
- drbg_set_testdata(drbg, data->test_data);
- /* linked list variable is now local to allow modification */
- drbg_string_fill(&seed_string, data->addtl->buf,
- data->addtl->len);
- return drbg_instantiate(drbg, &seed_string, coreref, pr);
+ drbg_string_fill(&string, seed, slen);
+ seed_string = &string;
}
+
+ return drbg_instantiate(drbg, seed_string, coreref, pr);
}
/***************************************************************
@@ -1811,7 +1843,6 @@ static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
*/
static inline int __init drbg_healthcheck_sanity(void)
{
-#ifdef CONFIG_CRYPTO_FIPS
int len = 0;
#define OUTBUFLEN 16
unsigned char buf[OUTBUFLEN];
@@ -1839,6 +1870,8 @@ static inline int __init drbg_healthcheck_sanity(void)
if (!drbg)
return -ENOMEM;
+ mutex_init(&drbg->drbg_mutex);
+
/*
* if the following tests fail, it is likely that there is a buffer
* overflow as buf is much smaller than the requested or provided
@@ -1877,37 +1910,33 @@ static inline int __init drbg_healthcheck_sanity(void)
outbuf:
kzfree(drbg);
return rc;
-#else /* CONFIG_CRYPTO_FIPS */
- return 0;
-#endif /* CONFIG_CRYPTO_FIPS */
}
-static struct crypto_alg drbg_algs[22];
+static struct rng_alg drbg_algs[22];
/*
* Fill the array drbg_algs used to register the different DRBGs
* with the kernel crypto API. To fill the array, the information
* from drbg_cores[] is used.
*/
-static inline void __init drbg_fill_array(struct crypto_alg *alg,
+static inline void __init drbg_fill_array(struct rng_alg *alg,
const struct drbg_core *core, int pr)
{
int pos = 0;
- static int priority = 100;
+ static int priority = 200;
- memset(alg, 0, sizeof(struct crypto_alg));
- memcpy(alg->cra_name, "stdrng", 6);
+ memcpy(alg->base.cra_name, "stdrng", 6);
if (pr) {
- memcpy(alg->cra_driver_name, "drbg_pr_", 8);
+ memcpy(alg->base.cra_driver_name, "drbg_pr_", 8);
pos = 8;
} else {
- memcpy(alg->cra_driver_name, "drbg_nopr_", 10);
+ memcpy(alg->base.cra_driver_name, "drbg_nopr_", 10);
pos = 10;
}
- memcpy(alg->cra_driver_name + pos, core->cra_name,
+ memcpy(alg->base.cra_driver_name + pos, core->cra_name,
strlen(core->cra_name));
- alg->cra_priority = priority;
+ alg->base.cra_priority = priority;
priority++;
/*
* If FIPS mode enabled, the selected DRBG shall have the
@@ -1915,17 +1944,16 @@ static inline void __init drbg_fill_array(struct crypto_alg *alg,
* it is selected.
*/
if (fips_enabled)
- alg->cra_priority += 200;
+ alg->base.cra_priority += 200;
- alg->cra_flags = CRYPTO_ALG_TYPE_RNG;
- alg->cra_ctxsize = sizeof(struct drbg_state);
- alg->cra_type = &crypto_rng_type;
- alg->cra_module = THIS_MODULE;
- alg->cra_init = drbg_kcapi_init;
- alg->cra_exit = drbg_kcapi_cleanup;
- alg->cra_u.rng.rng_make_random = drbg_kcapi_random;
- alg->cra_u.rng.rng_reset = drbg_kcapi_reset;
- alg->cra_u.rng.seedsize = 0;
+ alg->base.cra_ctxsize = sizeof(struct drbg_state);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_init = drbg_kcapi_init;
+ alg->base.cra_exit = drbg_kcapi_cleanup;
+ alg->generate = drbg_kcapi_random;
+ alg->seed = drbg_kcapi_seed;
+ alg->set_ent = drbg_kcapi_set_entropy;
+ alg->seedsize = 0;
}
static int __init drbg_init(void)
@@ -1958,12 +1986,12 @@ static int __init drbg_init(void)
drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1);
for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0);
- return crypto_register_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
+ return crypto_register_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
static void __exit drbg_exit(void)
{
- crypto_unregister_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
+ crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
module_init(drbg_init);
@@ -1984,3 +2012,4 @@ MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
CRYPTO_DRBG_HASH_STRING
CRYPTO_DRBG_HMAC_STRING
CRYPTO_DRBG_CTR_STRING);
+MODULE_ALIAS_CRYPTO("stdrng");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
new file mode 100644
index 000000000000..b6e43dc61356
--- /dev/null
+++ b/crypto/echainiv.c
@@ -0,0 +1,312 @@
+/*
+ * echainiv: Encrypted Chain IV Generator
+ *
+ * This generator generates an IV based on a sequence number by xoring it
+ * with a salt and then encrypting it with the same key as used to encrypt
+ * the plain text. This algorithm requires that the block size be equal
+ * to the IV size. It is mainly useful for CBC.
+ *
+ * This generator can only be used by algorithms where authentication
+ * is performed after encryption (i.e., authenc).
+ *
+ * Copyright (c) 2015 Herbert Xu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define MAX_IV_SIZE 16
+
+struct echainiv_ctx {
+ /* aead_geniv_ctx must be first the element */
+ struct aead_geniv_ctx geniv;
+ struct crypto_blkcipher *null;
+ u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
+};
+
+static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
+
+/* We don't care if we get preempted and read/write IVs from the next CPU. */
+static void echainiv_read_iv(u8 *dst, unsigned size)
+{
+ u32 *a = (u32 *)dst;
+ u32 __percpu *b = echainiv_iv;
+
+ for (; size >= 4; size -= 4) {
+ *a++ = this_cpu_read(*b);
+ b++;
+ }
+}
+
+static void echainiv_write_iv(const u8 *src, unsigned size)
+{
+ const u32 *a = (const u32 *)src;
+ u32 __percpu *b = echainiv_iv;
+
+ for (; size >= 4; size -= 4) {
+ this_cpu_write(*b, *a);
+ a++;
+ b++;
+ }
+}
+
+static void echainiv_encrypt_complete2(struct aead_request *req, int err)
+{
+ struct aead_request *subreq = aead_request_ctx(req);
+ struct crypto_aead *geniv;
+ unsigned int ivsize;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (err)
+ goto out;
+
+ geniv = crypto_aead_reqtfm(req);
+ ivsize = crypto_aead_ivsize(geniv);
+
+ echainiv_write_iv(subreq->iv, ivsize);
+
+ if (req->iv != subreq->iv)
+ memcpy(req->iv, subreq->iv, ivsize);
+
+out:
+ if (req->iv != subreq->iv)
+ kzfree(subreq->iv);
+}
+
+static void echainiv_encrypt_complete(struct crypto_async_request *base,
+ int err)
+{
+ struct aead_request *req = base->data;
+
+ echainiv_encrypt_complete2(req, err);
+ aead_request_complete(req, err);
+}
+
+static int echainiv_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+ struct aead_request *subreq = aead_request_ctx(req);
+ crypto_completion_t compl;
+ void *data;
+ u8 *info;
+ unsigned int ivsize = crypto_aead_ivsize(geniv);
+ int err;
+
+ if (req->cryptlen < ivsize)
+ return -EINVAL;
+
+ aead_request_set_tfm(subreq, ctx->geniv.child);
+
+ compl = echainiv_encrypt_complete;
+ data = req;
+ info = req->iv;
+
+ if (req->src != req->dst) {
+ struct blkcipher_desc desc = {
+ .tfm = ctx->null,
+ };
+
+ err = crypto_blkcipher_encrypt(
+ &desc, req->dst, req->src,
+ req->assoclen + req->cryptlen);
+ if (err)
+ return err;
+ }
+
+ if (unlikely(!IS_ALIGNED((unsigned long)info,
+ crypto_aead_alignmask(geniv) + 1))) {
+ info = kmalloc(ivsize, req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
+ GFP_ATOMIC);
+ if (!info)
+ return -ENOMEM;
+
+ memcpy(info, req->iv, ivsize);
+ }
+
+ aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_crypt(subreq, req->dst, req->dst,
+ req->cryptlen - ivsize, info);
+ aead_request_set_ad(subreq, req->assoclen + ivsize);
+
+ crypto_xor(info, ctx->salt, ivsize);
+ scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
+ echainiv_read_iv(info, ivsize);
+
+ err = crypto_aead_encrypt(subreq);
+ echainiv_encrypt_complete2(req, err);
+ return err;
+}
+
+static int echainiv_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+ struct aead_request *subreq = aead_request_ctx(req);
+ crypto_completion_t compl;
+ void *data;
+ unsigned int ivsize = crypto_aead_ivsize(geniv);
+
+ if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
+ return -EINVAL;
+
+ aead_request_set_tfm(subreq, ctx->geniv.child);
+
+ compl = req->base.complete;
+ data = req->base.data;
+
+ aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen - ivsize, req->iv);
+ aead_request_set_ad(subreq, req->assoclen + ivsize);
+
+ scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
+ if (req->src != req->dst)
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->assoclen, ivsize, 1);
+
+ return crypto_aead_decrypt(subreq);
+}
+
+static int echainiv_init(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *geniv = __crypto_aead_cast(tfm);
+ struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+ int err;
+
+ spin_lock_init(&ctx->geniv.lock);
+
+ crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
+
+ err = crypto_get_default_rng();
+ if (err)
+ goto out;
+
+ err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+ crypto_aead_ivsize(geniv));
+ crypto_put_default_rng();
+ if (err)
+ goto out;
+
+ ctx->null = crypto_get_default_null_skcipher();
+ err = PTR_ERR(ctx->null);
+ if (IS_ERR(ctx->null))
+ goto out;
+
+ err = aead_geniv_init(tfm);
+ if (err)
+ goto drop_null;
+
+ ctx->geniv.child = geniv->child;
+ geniv->child = geniv;
+
+out:
+ return err;
+
+drop_null:
+ crypto_put_default_null_skcipher();
+ goto out;
+}
+
+static void echainiv_exit(struct crypto_tfm *tfm)
+{
+ struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_aead(ctx->geniv.child);
+ crypto_put_default_null_skcipher();
+}
+
+static int echainiv_aead_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
+{
+ struct aead_instance *inst;
+ struct crypto_aead_spawn *spawn;
+ struct aead_alg *alg;
+ int err;
+
+ inst = aead_geniv_alloc(tmpl, tb, 0, 0);
+
+ if (IS_ERR(inst))
+ return PTR_ERR(inst);
+
+ spawn = aead_instance_ctx(inst);
+ alg = crypto_spawn_aead_alg(spawn);
+
+ if (alg->base.cra_aead.encrypt)
+ goto done;
+
+ err = -EINVAL;
+ if (inst->alg.ivsize & (sizeof(u32) - 1) ||
+ inst->alg.ivsize > MAX_IV_SIZE)
+ goto free_inst;
+
+ inst->alg.encrypt = echainiv_encrypt;
+ inst->alg.decrypt = echainiv_decrypt;
+
+ inst->alg.base.cra_init = echainiv_init;
+ inst->alg.base.cra_exit = echainiv_exit;
+
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+ inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
+ inst->alg.base.cra_ctxsize += inst->alg.ivsize;
+
+done:
+ err = aead_register_instance(tmpl, inst);
+ if (err)
+ goto free_inst;
+
+out:
+ return err;
+
+free_inst:
+ aead_geniv_free(inst);
+ goto out;
+}
+
+static void echainiv_free(struct crypto_instance *inst)
+{
+ aead_geniv_free(aead_instance(inst));
+}
+
+static struct crypto_template echainiv_tmpl = {
+ .name = "echainiv",
+ .create = echainiv_aead_create,
+ .free = echainiv_free,
+ .module = THIS_MODULE,
+};
+
+static int __init echainiv_module_init(void)
+{
+ return crypto_register_template(&echainiv_tmpl);
+}
+
+static void __exit echainiv_module_exit(void)
+{
+ crypto_unregister_template(&echainiv_tmpl);
+}
+
+module_init(echainiv_module_init);
+module_exit(echainiv_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Encrypted Chain IV Generator");
+MODULE_ALIAS_CRYPTO("echainiv");
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index f116fae766f8..16dda72fc4f8 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -146,35 +146,13 @@ out:
return err;
}
-static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
-{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- int err = 0;
-
- spin_lock_bh(&ctx->lock);
- if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first)
- goto unlock;
-
- crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
- crypto_ablkcipher_ivsize(geniv));
-
-unlock:
- spin_unlock_bh(&ctx->lock);
-
- if (err)
- return err;
-
- return eseqiv_givencrypt(req);
-}
-
static int eseqiv_init(struct crypto_tfm *tfm)
{
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
unsigned long alignmask;
unsigned int reqsize;
+ int err;
spin_lock_init(&ctx->lock);
@@ -198,7 +176,15 @@ static int eseqiv_init(struct crypto_tfm *tfm)
tfm->crt_ablkcipher.reqsize = reqsize +
sizeof(struct ablkcipher_request);
- return skcipher_geniv_init(tfm);
+ err = 0;
+ if (!crypto_get_default_rng()) {
+ crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
+ err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+ crypto_ablkcipher_ivsize(geniv));
+ crypto_put_default_rng();
+ }
+
+ return err ?: skcipher_geniv_init(tfm);
}
static struct crypto_template eseqiv_tmpl;
@@ -208,20 +194,14 @@ static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
struct crypto_instance *inst;
int err;
- err = crypto_get_default_rng();
- if (err)
- return ERR_PTR(err);
-
inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
if (IS_ERR(inst))
- goto put_rng;
+ goto out;
err = -EINVAL;
if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
goto free_inst;
- inst->alg.cra_ablkcipher.givencrypt = eseqiv_givencrypt_first;
-
inst->alg.cra_init = eseqiv_init;
inst->alg.cra_exit = skcipher_geniv_exit;
@@ -234,21 +214,13 @@ out:
free_inst:
skcipher_geniv_free(inst);
inst = ERR_PTR(err);
-put_rng:
- crypto_put_default_rng();
goto out;
}
-static void eseqiv_free(struct crypto_instance *inst)
-{
- skcipher_geniv_free(inst);
- crypto_put_default_rng();
-}
-
static struct crypto_template eseqiv_tmpl = {
.name = "eseqiv",
.alloc = eseqiv_alloc,
- .free = eseqiv_free,
+ .free = skcipher_geniv_free,
.module = THIS_MODULE,
};
diff --git a/crypto/fips.c b/crypto/fips.c
index 553970081c62..9d627c1cf8bc 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -10,7 +10,12 @@
*
*/
-#include "internal.h"
+#include
+#include
+#include
+#include
+#include
+#include
int fips_enabled;
EXPORT_SYMBOL_GPL(fips_enabled);
@@ -25,3 +30,49 @@ static int fips_enable(char *str)
}
__setup("fips=", fips_enable);
+
+static struct ctl_table crypto_sysctl_table[] = {
+ {
+ .procname = "fips_enabled",
+ .data = &fips_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec
+ },
+ {}
+};
+
+static struct ctl_table crypto_dir_table[] = {
+ {
+ .procname = "crypto",
+ .mode = 0555,
+ .child = crypto_sysctl_table
+ },
+ {}
+};
+
+static struct ctl_table_header *crypto_sysctls;
+
+static void crypto_proc_fips_init(void)
+{
+ crypto_sysctls = register_sysctl_table(crypto_dir_table);
+}
+
+static void crypto_proc_fips_exit(void)
+{
+ unregister_sysctl_table(crypto_sysctls);
+}
+
+static int __init fips_init(void)
+{
+ crypto_proc_fips_init();
+ return 0;
+}
+
+static void __exit fips_exit(void)
+{
+ crypto_proc_fips_exit();
+}
+
+module_init(fips_init);
+module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 2e403f6138c1..7d32d4720564 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -12,6 +12,7 @@
#include
#include
#include
+#include
#include
#include
#include "internal.h"
@@ -39,7 +40,6 @@ struct crypto_rfc4106_ctx {
struct crypto_rfc4543_instance_ctx {
struct crypto_aead_spawn aead;
- struct crypto_skcipher_spawn null;
};
struct crypto_rfc4543_ctx {
@@ -49,25 +49,22 @@ struct crypto_rfc4543_ctx {
};
struct crypto_rfc4543_req_ctx {
- u8 auth_tag[16];
- u8 assocbuf[32];
- struct scatterlist cipher[1];
- struct scatterlist payload[2];
- struct scatterlist assoc[2];
struct aead_request subreq;
};
struct crypto_gcm_ghash_ctx {
unsigned int cryptlen;
struct scatterlist *src;
- void (*complete)(struct aead_request *req, int err);
+ int (*complete)(struct aead_request *req, u32 flags);
};
struct crypto_gcm_req_priv_ctx {
+ u8 iv[16];
u8 auth_tag[16];
u8 iauth_tag[16];
- struct scatterlist src[2];
- struct scatterlist dst[2];
+ struct scatterlist src[3];
+ struct scatterlist dst[3];
+ struct scatterlist sg;
struct crypto_gcm_ghash_ctx ghash_ctx;
union {
struct ahash_request ahreq;
@@ -80,7 +77,12 @@ struct crypto_gcm_setkey_result {
struct completion completion;
};
-static void *gcm_zeroes;
+static struct {
+ u8 buf[16];
+ struct scatterlist sg;
+} *gcm_zeroes;
+
+static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc);
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
@@ -120,15 +122,13 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
- CRYPTO_TFM_REQ_MASK);
-
+ CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(ctr, key, keylen);
+ crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
+ CRYPTO_TFM_RES_MASK);
if (err)
return err;
- crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
- CRYPTO_TFM_RES_MASK);
-
data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr),
GFP_KERNEL);
if (!data)
@@ -163,7 +163,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
CRYPTO_TFM_RES_MASK);
out:
- kfree(data);
+ kzfree(data);
return err;
}
@@ -186,35 +186,46 @@ static int crypto_gcm_setauthsize(struct crypto_aead *tfm,
return 0;
}
-static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
- struct aead_request *req,
+static void crypto_gcm_init_common(struct aead_request *req)
+{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ __be32 counter = cpu_to_be32(1);
+ struct scatterlist *sg;
+
+ memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
+ memcpy(pctx->iv, req->iv, 12);
+ memcpy(pctx->iv + 12, &counter, 4);
+
+ sg_init_table(pctx->src, 3);
+ sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
+ sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
+ if (sg != pctx->src + 1)
+ scatterwalk_sg_chain(pctx->src, 2, sg);
+
+ if (req->src != req->dst) {
+ sg_init_table(pctx->dst, 3);
+ sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
+ sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
+ if (sg != pctx->dst + 1)
+ scatterwalk_sg_chain(pctx->dst, 2, sg);
+ }
+}
+
+static void crypto_gcm_init_crypt(struct aead_request *req,
unsigned int cryptlen)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct ablkcipher_request *ablk_req = &pctx->u.abreq;
struct scatterlist *dst;
- __be32 counter = cpu_to_be32(1);
- memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
- memcpy(req->iv + 12, &counter, 4);
-
- sg_init_table(pctx->src, 2);
- sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
- scatterwalk_sg_chain(pctx->src, 2, req->src);
-
- dst = pctx->src;
- if (req->src != req->dst) {
- sg_init_table(pctx->dst, 2);
- sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
- scatterwalk_sg_chain(pctx->dst, 2, req->dst);
- dst = pctx->dst;
- }
+ dst = req->src == req->dst ? pctx->src : pctx->dst;
ablkcipher_request_set_tfm(ablk_req, ctx->ctr);
ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
cryptlen + sizeof(pctx->auth_tag),
- req->iv);
+ pctx->iv);
}
static inline unsigned int gcm_remain(unsigned int len)
@@ -224,41 +235,31 @@ static inline unsigned int gcm_remain(unsigned int len)
}
static void gcm_hash_len_done(struct crypto_async_request *areq, int err);
-static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
static int gcm_hash_update(struct aead_request *req,
- struct crypto_gcm_req_priv_ctx *pctx,
crypto_completion_t compl,
struct scatterlist *src,
- unsigned int len)
+ unsigned int len, u32 flags)
{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
- ahash_request_set_callback(ahreq, aead_request_flags(req),
- compl, req);
+ ahash_request_set_callback(ahreq, flags, compl, req);
ahash_request_set_crypt(ahreq, src, NULL, len);
return crypto_ahash_update(ahreq);
}
static int gcm_hash_remain(struct aead_request *req,
- struct crypto_gcm_req_priv_ctx *pctx,
unsigned int remain,
- crypto_completion_t compl)
+ crypto_completion_t compl, u32 flags)
{
- struct ahash_request *ahreq = &pctx->u.ahreq;
-
- ahash_request_set_callback(ahreq, aead_request_flags(req),
- compl, req);
- sg_init_one(pctx->src, gcm_zeroes, remain);
- ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
-
- return crypto_ahash_update(ahreq);
+ return gcm_hash_update(req, compl, &gcm_zeroes->sg, remain, flags);
}
-static int gcm_hash_len(struct aead_request *req,
- struct crypto_gcm_req_priv_ctx *pctx)
+static int gcm_hash_len(struct aead_request *req, u32 flags)
{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
u128 lengths;
@@ -266,76 +267,41 @@ static int gcm_hash_len(struct aead_request *req,
lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(gctx->cryptlen * 8);
memcpy(pctx->iauth_tag, &lengths, 16);
- sg_init_one(pctx->src, pctx->iauth_tag, 16);
- ahash_request_set_callback(ahreq, aead_request_flags(req),
- gcm_hash_len_done, req);
- ahash_request_set_crypt(ahreq, pctx->src,
- NULL, sizeof(lengths));
+ sg_init_one(&pctx->sg, pctx->iauth_tag, 16);
+ ahash_request_set_callback(ahreq, flags, gcm_hash_len_done, req);
+ ahash_request_set_crypt(ahreq, &pctx->sg,
+ pctx->iauth_tag, sizeof(lengths));
- return crypto_ahash_update(ahreq);
+ return crypto_ahash_finup(ahreq);
}
-static int gcm_hash_final(struct aead_request *req,
- struct crypto_gcm_req_priv_ctx *pctx)
-{
- struct ahash_request *ahreq = &pctx->u.ahreq;
-
- ahash_request_set_callback(ahreq, aead_request_flags(req),
- gcm_hash_final_done, req);
- ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0);
-
- return crypto_ahash_final(ahreq);
-}
-
-static void __gcm_hash_final_done(struct aead_request *req, int err)
+static int gcm_hash_len_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
- if (!err)
- crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
-
- gctx->complete(req, err);
-}
-
-static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
-{
- struct aead_request *req = areq->data;
-
- __gcm_hash_final_done(req, err);
-}
-
-static void __gcm_hash_len_done(struct aead_request *req, int err)
-{
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-
- if (!err) {
- err = gcm_hash_final(req, pctx);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
- }
-
- __gcm_hash_final_done(req, err);
+ return gctx->complete(req, flags);
}
static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
- __gcm_hash_len_done(req, err);
+ if (err)
+ goto out;
+
+ err = gcm_hash_len_continue(req, 0);
+ if (err == -EINPROGRESS)
+ return;
+
+out:
+ aead_request_complete(req, err);
}
-static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err)
+static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags)
{
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-
- if (!err) {
- err = gcm_hash_len(req, pctx);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
- }
-
- __gcm_hash_len_done(req, err);
+ return gcm_hash_len(req, flags) ?:
+ gcm_hash_len_continue(req, flags);
}
static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
@@ -343,55 +309,58 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
{
struct aead_request *req = areq->data;
- __gcm_hash_crypt_remain_done(req, err);
+ if (err)
+ goto out;
+
+ err = gcm_hash_crypt_remain_continue(req, 0);
+ if (err == -EINPROGRESS)
+ return;
+
+out:
+ aead_request_complete(req, err);
}
-static void __gcm_hash_crypt_done(struct aead_request *req, int err)
+static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int remain;
- if (!err) {
- remain = gcm_remain(gctx->cryptlen);
- BUG_ON(!remain);
- err = gcm_hash_remain(req, pctx, remain,
- gcm_hash_crypt_remain_done);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
- }
+ remain = gcm_remain(gctx->cryptlen);
+ if (remain)
+ return gcm_hash_remain(req, remain,
+ gcm_hash_crypt_remain_done, flags) ?:
+ gcm_hash_crypt_remain_continue(req, flags);
- __gcm_hash_crypt_remain_done(req, err);
+ return gcm_hash_crypt_remain_continue(req, flags);
}
static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
- __gcm_hash_crypt_done(req, err);
+ if (err)
+ goto out;
+
+ err = gcm_hash_crypt_continue(req, 0);
+ if (err == -EINPROGRESS)
+ return;
+
+out:
+ aead_request_complete(req, err);
}
-static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
+static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
- crypto_completion_t compl;
- unsigned int remain = 0;
- if (!err && gctx->cryptlen) {
- remain = gcm_remain(gctx->cryptlen);
- compl = remain ? gcm_hash_crypt_done :
- gcm_hash_crypt_remain_done;
- err = gcm_hash_update(req, pctx, compl,
- gctx->src, gctx->cryptlen);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
- }
+ if (gctx->cryptlen)
+ return gcm_hash_update(req, gcm_hash_crypt_done,
+ gctx->src, gctx->cryptlen, flags) ?:
+ gcm_hash_crypt_continue(req, flags);
- if (remain)
- __gcm_hash_crypt_done(req, err);
- else
- __gcm_hash_crypt_remain_done(req, err);
+ return gcm_hash_crypt_remain_continue(req, flags);
}
static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
@@ -399,146 +368,120 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
{
struct aead_request *req = areq->data;
- __gcm_hash_assoc_remain_done(req, err);
+ if (err)
+ goto out;
+
+ err = gcm_hash_assoc_remain_continue(req, 0);
+ if (err == -EINPROGRESS)
+ return;
+
+out:
+ aead_request_complete(req, err);
}
-static void __gcm_hash_assoc_done(struct aead_request *req, int err)
+static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags)
{
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
unsigned int remain;
- if (!err) {
- remain = gcm_remain(req->assoclen);
- BUG_ON(!remain);
- err = gcm_hash_remain(req, pctx, remain,
- gcm_hash_assoc_remain_done);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
- }
+ remain = gcm_remain(req->assoclen);
+ if (remain)
+ return gcm_hash_remain(req, remain,
+ gcm_hash_assoc_remain_done, flags) ?:
+ gcm_hash_assoc_remain_continue(req, flags);
- __gcm_hash_assoc_remain_done(req, err);
+ return gcm_hash_assoc_remain_continue(req, flags);
}
static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
- __gcm_hash_assoc_done(req, err);
+ if (err)
+ goto out;
+
+ err = gcm_hash_assoc_continue(req, 0);
+ if (err == -EINPROGRESS)
+ return;
+
+out:
+ aead_request_complete(req, err);
}
-static void __gcm_hash_init_done(struct aead_request *req, int err)
+static int gcm_hash_init_continue(struct aead_request *req, u32 flags)
{
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- crypto_completion_t compl;
- unsigned int remain = 0;
+ if (req->assoclen)
+ return gcm_hash_update(req, gcm_hash_assoc_done,
+ req->src, req->assoclen, flags) ?:
+ gcm_hash_assoc_continue(req, flags);
- if (!err && req->assoclen) {
- remain = gcm_remain(req->assoclen);
- compl = remain ? gcm_hash_assoc_done :
- gcm_hash_assoc_remain_done;
- err = gcm_hash_update(req, pctx, compl,
- req->assoc, req->assoclen);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
- }
-
- if (remain)
- __gcm_hash_assoc_done(req, err);
- else
- __gcm_hash_assoc_remain_done(req, err);
+ return gcm_hash_assoc_remain_continue(req, flags);
}
static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
- __gcm_hash_init_done(req, err);
+ if (err)
+ goto out;
+
+ err = gcm_hash_init_continue(req, 0);
+ if (err == -EINPROGRESS)
+ return;
+
+out:
+ aead_request_complete(req, err);
}
-static int gcm_hash(struct aead_request *req,
- struct crypto_gcm_req_priv_ctx *pctx)
+static int gcm_hash(struct aead_request *req, u32 flags)
{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
- struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
- struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- unsigned int remain;
- crypto_completion_t compl;
- int err;
+ struct crypto_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
ahash_request_set_tfm(ahreq, ctx->ghash);
- ahash_request_set_callback(ahreq, aead_request_flags(req),
- gcm_hash_init_done, req);
- err = crypto_ahash_init(ahreq);
- if (err)
- return err;
- remain = gcm_remain(req->assoclen);
- compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
- err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen);
- if (err)
- return err;
- if (remain) {
- err = gcm_hash_remain(req, pctx, remain,
- gcm_hash_assoc_remain_done);
- if (err)
- return err;
- }
- remain = gcm_remain(gctx->cryptlen);
- compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
- err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen);
- if (err)
- return err;
- if (remain) {
- err = gcm_hash_remain(req, pctx, remain,
- gcm_hash_crypt_remain_done);
- if (err)
- return err;
- }
- err = gcm_hash_len(req, pctx);
- if (err)
- return err;
- err = gcm_hash_final(req, pctx);
- if (err)
- return err;
-
- return 0;
+ ahash_request_set_callback(ahreq, flags, gcm_hash_init_done, req);
+ return crypto_ahash_init(ahreq) ?:
+ gcm_hash_init_continue(req, flags);
}
-static void gcm_enc_copy_hash(struct aead_request *req,
- struct crypto_gcm_req_priv_ctx *pctx)
+static int gcm_enc_copy_hash(struct aead_request *req, u32 flags)
{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
u8 *auth_tag = pctx->auth_tag;
- scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
+ crypto_xor(auth_tag, pctx->iauth_tag, 16);
+ scatterwalk_map_and_copy(auth_tag, req->dst,
+ req->assoclen + req->cryptlen,
crypto_aead_authsize(aead), 1);
+ return 0;
}
-static void gcm_enc_hash_done(struct aead_request *req, int err)
+static int gcm_encrypt_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+ struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
- if (!err)
- gcm_enc_copy_hash(req, pctx);
+ gctx->src = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
+ gctx->cryptlen = req->cryptlen;
+ gctx->complete = gcm_enc_copy_hash;
- aead_request_complete(req, err);
+ return gcm_hash(req, flags);
}
static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- if (!err) {
- err = gcm_hash(req, pctx);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
- else if (!err) {
- crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
- gcm_enc_copy_hash(req, pctx);
- }
- }
+ if (err)
+ goto out;
+ err = gcm_encrypt_continue(req, 0);
+ if (err == -EINPROGRESS)
+ return;
+
+out:
aead_request_complete(req, err);
}
@@ -546,34 +489,19 @@ static int crypto_gcm_encrypt(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->u.abreq;
- struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
- int err;
+ u32 flags = aead_request_flags(req);
- crypto_gcm_init_crypt(abreq, req, req->cryptlen);
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- gcm_encrypt_done, req);
+ crypto_gcm_init_common(req);
+ crypto_gcm_init_crypt(req, req->cryptlen);
+ ablkcipher_request_set_callback(abreq, flags, gcm_encrypt_done, req);
- gctx->src = req->dst;
- gctx->cryptlen = req->cryptlen;
- gctx->complete = gcm_enc_hash_done;
-
- err = crypto_ablkcipher_encrypt(abreq);
- if (err)
- return err;
-
- err = gcm_hash(req, pctx);
- if (err)
- return err;
-
- crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
- gcm_enc_copy_hash(req, pctx);
-
- return 0;
+ return crypto_ablkcipher_encrypt(abreq) ?:
+ gcm_encrypt_continue(req, flags);
}
-static int crypto_gcm_verify(struct aead_request *req,
- struct crypto_gcm_req_priv_ctx *pctx)
+static int crypto_gcm_verify(struct aead_request *req)
{
+ struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
u8 *auth_tag = pctx->auth_tag;
u8 *iauth_tag = pctx->iauth_tag;
@@ -581,78 +509,57 @@ static int crypto_gcm_verify(struct aead_request *req,
unsigned int cryptlen = req->cryptlen - authsize;
crypto_xor(auth_tag, iauth_tag, 16);
- scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
+ scatterwalk_map_and_copy(iauth_tag, req->src,
+ req->assoclen + cryptlen, authsize, 0);
return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
- struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
if (!err)
- err = crypto_gcm_verify(req, pctx);
+ err = crypto_gcm_verify(req);
aead_request_complete(req, err);
}
-static void gcm_dec_hash_done(struct aead_request *req, int err)
+static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ablkcipher_request *abreq = &pctx->u.abreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
- if (!err) {
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- gcm_decrypt_done, req);
- crypto_gcm_init_crypt(abreq, req, gctx->cryptlen);
- err = crypto_ablkcipher_decrypt(abreq);
- if (err == -EINPROGRESS || err == -EBUSY)
- return;
- else if (!err)
- err = crypto_gcm_verify(req, pctx);
- }
-
- aead_request_complete(req, err);
+ crypto_gcm_init_crypt(req, gctx->cryptlen);
+ ablkcipher_request_set_callback(abreq, flags, gcm_decrypt_done, req);
+ return crypto_ablkcipher_decrypt(abreq) ?: crypto_gcm_verify(req);
}
static int crypto_gcm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
- struct ablkcipher_request *abreq = &pctx->u.abreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen;
- int err;
+ u32 flags = aead_request_flags(req);
- if (cryptlen < authsize)
- return -EINVAL;
cryptlen -= authsize;
- gctx->src = req->src;
+ crypto_gcm_init_common(req);
+
+ gctx->src = sg_next(pctx->src);
gctx->cryptlen = cryptlen;
- gctx->complete = gcm_dec_hash_done;
+ gctx->complete = gcm_dec_hash_continue;
- err = gcm_hash(req, pctx);
- if (err)
- return err;
-
- ablkcipher_request_set_callback(abreq, aead_request_flags(req),
- gcm_decrypt_done, req);
- crypto_gcm_init_crypt(abreq, req, cryptlen);
- err = crypto_ablkcipher_decrypt(abreq);
- if (err)
- return err;
-
- return crypto_gcm_verify(req, pctx);
+ return gcm_hash(req, flags);
}
-static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
+static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_instance *inst = aead_alg_instance(tfm);
+ struct gcm_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ablkcipher *ctr;
struct crypto_ahash *ghash;
unsigned long align;
@@ -670,14 +577,14 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
ctx->ctr = ctr;
ctx->ghash = ghash;
- align = crypto_tfm_alg_alignmask(tfm);
+ align = crypto_aead_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
- tfm->crt_aead.reqsize = align +
- offsetof(struct crypto_gcm_req_priv_ctx, u) +
+ crypto_aead_set_reqsize(tfm,
+ align + offsetof(struct crypto_gcm_req_priv_ctx, u) +
max(sizeof(struct ablkcipher_request) +
crypto_ablkcipher_reqsize(ctr),
sizeof(struct ahash_request) +
- crypto_ahash_reqsize(ghash));
+ crypto_ahash_reqsize(ghash)));
return 0;
@@ -686,53 +593,59 @@ err_free_hash:
return err;
}
-static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
{
- struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->ghash);
crypto_free_ablkcipher(ctx->ctr);
}
-static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
- const char *full_name,
- const char *ctr_name,
- const char *ghash_name)
+static int crypto_gcm_create_common(struct crypto_template *tmpl,
+ struct rtattr **tb,
+ const char *full_name,
+ const char *ctr_name,
+ const char *ghash_name)
{
struct crypto_attr_type *algt;
- struct crypto_instance *inst;
+ struct aead_instance *inst;
struct crypto_alg *ctr;
struct crypto_alg *ghash_alg;
- struct ahash_alg *ghash_ahash_alg;
+ struct hash_alg_common *ghash;
struct gcm_instance_ctx *ctx;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(ghash_alg))
- return ERR_CAST(ghash_alg);
+ return PTR_ERR(ghash_alg);
+
+ ghash = __crypto_hash_alg_common(ghash_alg);
err = -ENOMEM;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
goto out_put_ghash;
- ctx = crypto_instance_ctx(inst);
- ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base);
- err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg,
- inst);
+ ctx = aead_instance_ctx(inst);
+ err = crypto_init_ahash_spawn(&ctx->ghash, ghash,
+ aead_crypto_instance(inst));
if (err)
goto err_free_inst;
- crypto_set_skcipher_spawn(&ctx->ctr, inst);
+ err = -EINVAL;
+ if (ghash->digestsize != 16)
+ goto err_drop_ghash;
+
+ crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
@@ -751,33 +664,38 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
goto out_put_ctr;
err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->cra_driver_name,
ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
- memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
+ memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
- inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_priority = ctr->cra_priority;
- inst->alg.cra_blocksize = 1;
- inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1);
- inst->alg.cra_type = &crypto_aead_type;
- inst->alg.cra_aead.ivsize = 16;
- inst->alg.cra_aead.maxauthsize = 16;
- inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
- inst->alg.cra_init = crypto_gcm_init_tfm;
- inst->alg.cra_exit = crypto_gcm_exit_tfm;
- inst->alg.cra_aead.setkey = crypto_gcm_setkey;
- inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize;
- inst->alg.cra_aead.encrypt = crypto_gcm_encrypt;
- inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
+ inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->cra_flags) &
+ CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = (ghash->base.cra_priority +
+ ctr->cra_priority) / 2;
+ inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
+ ctr->cra_alignmask;
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
+ inst->alg.ivsize = 12;
+ inst->alg.maxauthsize = 16;
+ inst->alg.init = crypto_gcm_init_tfm;
+ inst->alg.exit = crypto_gcm_exit_tfm;
+ inst->alg.setkey = crypto_gcm_setkey;
+ inst->alg.setauthsize = crypto_gcm_setauthsize;
+ inst->alg.encrypt = crypto_gcm_encrypt;
+ inst->alg.decrypt = crypto_gcm_decrypt;
-out:
+ err = aead_register_instance(tmpl, inst);
+ if (err)
+ goto out_put_ctr;
+
+out_put_ghash:
crypto_mod_put(ghash_alg);
- return inst;
+ return err;
out_put_ctr:
crypto_drop_skcipher(&ctx->ctr);
@@ -785,12 +703,10 @@ err_drop_ghash:
crypto_drop_ahash(&ctx->ghash);
err_free_inst:
kfree(inst);
-out_put_ghash:
- inst = ERR_PTR(err);
- goto out;
+ goto out_put_ghash;
}
-static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
+static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
@@ -798,17 +714,18 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
- return ERR_CAST(cipher_name);
+ return PTR_ERR(cipher_name);
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >=
CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-ENAMETOOLONG);
+ return -ENAMETOOLONG;
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-ENAMETOOLONG);
+ return -ENAMETOOLONG;
- return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash");
+ return crypto_gcm_create_common(tmpl, tb, full_name,
+ ctr_name, "ghash");
}
static void crypto_gcm_free(struct crypto_instance *inst)
@@ -817,17 +734,18 @@ static void crypto_gcm_free(struct crypto_instance *inst)
crypto_drop_skcipher(&ctx->ctr);
crypto_drop_ahash(&ctx->ghash);
- kfree(inst);
+ kfree(aead_instance(inst));
}
static struct crypto_template crypto_gcm_tmpl = {
.name = "gcm",
- .alloc = crypto_gcm_alloc,
+ .create = crypto_gcm_create,
.free = crypto_gcm_free,
.module = THIS_MODULE,
};
-static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
+static int crypto_gcm_base_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
{
const char *ctr_name;
const char *ghash_name;
@@ -835,22 +753,23 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
- return ERR_CAST(ctr_name);
+ return PTR_ERR(ctr_name);
ghash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(ghash_name))
- return ERR_CAST(ghash_name);
+ return PTR_ERR(ghash_name);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-ENAMETOOLONG);
+ return -ENAMETOOLONG;
- return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name);
+ return crypto_gcm_create_common(tmpl, tb, full_name,
+ ctr_name, ghash_name);
}
static struct crypto_template crypto_gcm_base_tmpl = {
.name = "gcm_base",
- .alloc = crypto_gcm_base_alloc,
+ .create = crypto_gcm_base_create,
.free = crypto_gcm_free,
.module = THIS_MODULE,
};
@@ -911,7 +830,7 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
- aead_request_set_assoc(subreq, req->assoc, req->assoclen);
+ aead_request_set_ad(subreq, req->assoclen);
return subreq;
}
@@ -930,11 +849,11 @@ static int crypto_rfc4106_decrypt(struct aead_request *req)
return crypto_aead_decrypt(req);
}
-static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm)
+static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
- struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_instance *inst = aead_alg_instance(tfm);
+ struct crypto_aead_spawn *spawn = aead_instance_ctx(inst);
+ struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
unsigned long align;
@@ -946,126 +865,120 @@ static int crypto_rfc4106_init_tfm(struct crypto_tfm *tfm)
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
- tfm->crt_aead.reqsize = sizeof(struct aead_request) +
- ALIGN(crypto_aead_reqsize(aead),
- crypto_tfm_ctx_alignment()) +
- align + 16;
+ crypto_aead_set_reqsize(
+ tfm,
+ sizeof(struct aead_request) +
+ ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
+ align + 12);
return 0;
}
-static void crypto_rfc4106_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm)
{
- struct crypto_rfc4106_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
}
-static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb)
+static int crypto_rfc4106_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
{
struct crypto_attr_type *algt;
- struct crypto_instance *inst;
+ struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
- struct crypto_alg *alg;
+ struct aead_alg *alg;
const char *ccm_name;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ccm_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ccm_name))
- return ERR_CAST(ccm_name);
+ return PTR_ERR(ccm_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- spawn = crypto_instance_ctx(inst);
- crypto_set_aead_spawn(spawn, inst);
+ spawn = aead_instance_ctx(inst);
+ crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
err = crypto_grab_aead(spawn, ccm_name, 0,
crypto_requires_sync(algt->type, algt->mask));
if (err)
goto out_free_inst;
- alg = crypto_aead_spawn_alg(spawn);
+ alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
- /* We only support 16-byte blocks. */
- if (alg->cra_aead.ivsize != 16)
+ /* Underlying IV size must be 12. */
+ if (crypto_aead_alg_ivsize(alg) != 12)
goto out_drop_alg;
/* Not a stream cipher? */
- if (alg->cra_blocksize != 1)
+ if (alg->base.cra_blocksize != 1)
goto out_drop_alg;
err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
- "rfc4106(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
- snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "rfc4106(%s)", alg->cra_driver_name) >=
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "rfc4106(%s)", alg->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME ||
+ snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "rfc4106(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_drop_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
- inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = 1;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_nivaead_type;
+ inst->alg.base.cra_flags |= alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
- inst->alg.cra_aead.ivsize = 8;
- inst->alg.cra_aead.maxauthsize = 16;
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
- inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
+ inst->alg.ivsize = 8;
+ inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
- inst->alg.cra_init = crypto_rfc4106_init_tfm;
- inst->alg.cra_exit = crypto_rfc4106_exit_tfm;
+ inst->alg.init = crypto_rfc4106_init_tfm;
+ inst->alg.exit = crypto_rfc4106_exit_tfm;
- inst->alg.cra_aead.setkey = crypto_rfc4106_setkey;
- inst->alg.cra_aead.setauthsize = crypto_rfc4106_setauthsize;
- inst->alg.cra_aead.encrypt = crypto_rfc4106_encrypt;
- inst->alg.cra_aead.decrypt = crypto_rfc4106_decrypt;
+ inst->alg.setkey = crypto_rfc4106_setkey;
+ inst->alg.setauthsize = crypto_rfc4106_setauthsize;
+ inst->alg.encrypt = crypto_rfc4106_encrypt;
+ inst->alg.decrypt = crypto_rfc4106_decrypt;
- inst->alg.cra_aead.geniv = "seqiv";
+ err = aead_register_instance(tmpl, inst);
+ if (err)
+ goto out_drop_alg;
out:
- return inst;
+ return err;
out_drop_alg:
crypto_drop_aead(spawn);
out_free_inst:
kfree(inst);
- inst = ERR_PTR(err);
goto out;
}
static void crypto_rfc4106_free(struct crypto_instance *inst)
{
- crypto_drop_spawn(crypto_instance_ctx(inst));
- kfree(inst);
+ crypto_drop_aead(crypto_instance_ctx(inst));
+ kfree(aead_instance(inst));
}
static struct crypto_template crypto_rfc4106_tmpl = {
.name = "rfc4106",
- .alloc = crypto_rfc4106_alloc,
+ .create = crypto_rfc4106_create,
.free = crypto_rfc4106_free,
.module = THIS_MODULE,
};
-static inline struct crypto_rfc4543_req_ctx *crypto_rfc4543_reqctx(
- struct aead_request *req)
-{
- unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
-
- return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
-}
-
static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
@@ -1100,83 +1013,35 @@ static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
return crypto_aead_setauthsize(ctx->child, authsize);
}
-static void crypto_rfc4543_done(struct crypto_async_request *areq, int err)
-{
- struct aead_request *req = areq->data;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
-
- if (!err) {
- scatterwalk_map_and_copy(rctx->auth_tag, req->dst,
- req->cryptlen,
- crypto_aead_authsize(aead), 1);
- }
-
- aead_request_complete(req, err);
-}
-
-static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
- bool enc)
+static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
- struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
+ struct crypto_rfc4543_req_ctx *rctx = aead_request_ctx(req);
struct aead_request *subreq = &rctx->subreq;
- struct scatterlist *src = req->src;
- struct scatterlist *cipher = rctx->cipher;
- struct scatterlist *payload = rctx->payload;
- struct scatterlist *assoc = rctx->assoc;
unsigned int authsize = crypto_aead_authsize(aead);
- unsigned int assoclen = req->assoclen;
- struct page *srcp;
- u8 *vsrc;
u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
crypto_aead_alignmask(ctx->child) + 1);
+ int err;
+
+ if (req->src != req->dst) {
+ err = crypto_rfc4543_copy_src_to_dst(req, enc);
+ if (err)
+ return err;
+ }
memcpy(iv, ctx->nonce, 4);
memcpy(iv + 4, req->iv, 8);
- /* construct cipher/plaintext */
- if (enc)
- memset(rctx->auth_tag, 0, authsize);
- else
- scatterwalk_map_and_copy(rctx->auth_tag, src,
- req->cryptlen - authsize,
- authsize, 0);
-
- sg_init_one(cipher, rctx->auth_tag, authsize);
-
- /* construct the aad */
- srcp = sg_page(src);
- vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
-
- sg_init_table(payload, 2);
- sg_set_buf(payload, req->iv, 8);
- scatterwalk_crypto_chain(payload, src, vsrc == req->iv + 8, 2);
- assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
-
- if (req->assoc->length == req->assoclen) {
- sg_init_table(assoc, 2);
- sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
- req->assoc->offset);
- } else {
- BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
-
- scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
- req->assoclen, 0);
-
- sg_init_table(assoc, 2);
- sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
- }
- scatterwalk_crypto_chain(assoc, payload, 0, 2);
-
aead_request_set_tfm(subreq, ctx->child);
- aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done,
- req);
- aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
- aead_request_set_assoc(subreq, assoc, assoclen);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ enc ? 0 : authsize, iv);
+ aead_request_set_ad(subreq, req->assoclen + req->cryptlen -
+ subreq->cryptlen);
- return subreq;
+ return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq);
}
static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
@@ -1184,7 +1049,8 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
- unsigned int nbytes = req->cryptlen - (enc ? 0 : authsize);
+ unsigned int nbytes = req->assoclen + req->cryptlen -
+ (enc ? 0 : authsize);
struct blkcipher_desc desc = {
.tfm = ctx->null,
};
@@ -1194,49 +1060,20 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
static int crypto_rfc4543_encrypt(struct aead_request *req)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
- struct aead_request *subreq;
- int err;
-
- if (req->src != req->dst) {
- err = crypto_rfc4543_copy_src_to_dst(req, true);
- if (err)
- return err;
- }
-
- subreq = crypto_rfc4543_crypt(req, true);
- err = crypto_aead_encrypt(subreq);
- if (err)
- return err;
-
- scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen,
- crypto_aead_authsize(aead), 1);
-
- return 0;
+ return crypto_rfc4543_crypt(req, true);
}
static int crypto_rfc4543_decrypt(struct aead_request *req)
{
- int err;
-
- if (req->src != req->dst) {
- err = crypto_rfc4543_copy_src_to_dst(req, false);
- if (err)
- return err;
- }
-
- req = crypto_rfc4543_crypt(req, false);
-
- return crypto_aead_decrypt(req);
+ return crypto_rfc4543_crypt(req, false);
}
-static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm)
+static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
{
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
- struct crypto_rfc4543_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct aead_instance *inst = aead_alg_instance(tfm);
+ struct crypto_rfc4543_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_aead_spawn *spawn = &ictx->aead;
- struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
struct crypto_blkcipher *null;
unsigned long align;
@@ -1246,7 +1083,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm)
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_spawn_blkcipher(&ictx->null.base);
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_aead;
@@ -1256,10 +1093,11 @@ static int crypto_rfc4543_init_tfm(struct crypto_tfm *tfm)
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
- tfm->crt_aead.reqsize = sizeof(struct crypto_rfc4543_req_ctx) +
- ALIGN(crypto_aead_reqsize(aead),
- crypto_tfm_ctx_alignment()) +
- align + 16;
+ crypto_aead_set_reqsize(
+ tfm,
+ sizeof(struct crypto_rfc4543_req_ctx) +
+ ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
+ align + 12);
return 0;
@@ -1268,107 +1106,98 @@ err_free_aead:
return err;
}
-static void crypto_rfc4543_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
{
- struct crypto_rfc4543_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_free_blkcipher(ctx->null);
+ crypto_put_default_null_skcipher();
}
-static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb)
+static int crypto_rfc4543_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
{
struct crypto_attr_type *algt;
- struct crypto_instance *inst;
+ struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
- struct crypto_alg *alg;
+ struct aead_alg *alg;
struct crypto_rfc4543_instance_ctx *ctx;
const char *ccm_name;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ccm_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ccm_name))
- return ERR_CAST(ccm_name);
+ return PTR_ERR(ccm_name);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- ctx = crypto_instance_ctx(inst);
+ ctx = aead_instance_ctx(inst);
spawn = &ctx->aead;
- crypto_set_aead_spawn(spawn, inst);
+ crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
err = crypto_grab_aead(spawn, ccm_name, 0,
crypto_requires_sync(algt->type, algt->mask));
if (err)
goto out_free_inst;
- alg = crypto_aead_spawn_alg(spawn);
-
- crypto_set_skcipher_spawn(&ctx->null, inst);
- err = crypto_grab_skcipher(&ctx->null, "ecb(cipher_null)", 0,
- CRYPTO_ALG_ASYNC);
- if (err)
- goto out_drop_alg;
-
- crypto_skcipher_spawn_alg(&ctx->null);
+ alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
- /* We only support 16-byte blocks. */
- if (alg->cra_aead.ivsize != 16)
- goto out_drop_ecbnull;
+ /* Underlying IV size must be 12. */
+ if (crypto_aead_alg_ivsize(alg) != 12)
+ goto out_drop_alg;
/* Not a stream cipher? */
- if (alg->cra_blocksize != 1)
- goto out_drop_ecbnull;
+ if (alg->base.cra_blocksize != 1)
+ goto out_drop_alg;
err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
- "rfc4543(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
- snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "rfc4543(%s)", alg->cra_driver_name) >=
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "rfc4543(%s)", alg->base.cra_name) >=
+ CRYPTO_MAX_ALG_NAME ||
+ snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "rfc4543(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
- goto out_drop_ecbnull;
+ goto out_drop_alg;
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
- inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
- inst->alg.cra_priority = alg->cra_priority;
- inst->alg.cra_blocksize = 1;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- inst->alg.cra_type = &crypto_nivaead_type;
+ inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = 1;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
- inst->alg.cra_aead.ivsize = 8;
- inst->alg.cra_aead.maxauthsize = 16;
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
- inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
+ inst->alg.ivsize = 8;
+ inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
- inst->alg.cra_init = crypto_rfc4543_init_tfm;
- inst->alg.cra_exit = crypto_rfc4543_exit_tfm;
+ inst->alg.init = crypto_rfc4543_init_tfm;
+ inst->alg.exit = crypto_rfc4543_exit_tfm;
- inst->alg.cra_aead.setkey = crypto_rfc4543_setkey;
- inst->alg.cra_aead.setauthsize = crypto_rfc4543_setauthsize;
- inst->alg.cra_aead.encrypt = crypto_rfc4543_encrypt;
- inst->alg.cra_aead.decrypt = crypto_rfc4543_decrypt;
+ inst->alg.setkey = crypto_rfc4543_setkey;
+ inst->alg.setauthsize = crypto_rfc4543_setauthsize;
+ inst->alg.encrypt = crypto_rfc4543_encrypt;
+ inst->alg.decrypt = crypto_rfc4543_decrypt;
- inst->alg.cra_aead.geniv = "seqiv";
+ err = aead_register_instance(tmpl, inst);
+ if (err)
+ goto out_drop_alg;
out:
- return inst;
+ return err;
-out_drop_ecbnull:
- crypto_drop_skcipher(&ctx->null);
out_drop_alg:
crypto_drop_aead(spawn);
out_free_inst:
kfree(inst);
- inst = ERR_PTR(err);
goto out;
}
@@ -1377,14 +1206,13 @@ static void crypto_rfc4543_free(struct crypto_instance *inst)
struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_aead(&ctx->aead);
- crypto_drop_skcipher(&ctx->null);
- kfree(inst);
+ kfree(aead_instance(inst));
}
static struct crypto_template crypto_rfc4543_tmpl = {
.name = "rfc4543",
- .alloc = crypto_rfc4543_alloc,
+ .create = crypto_rfc4543_create,
.free = crypto_rfc4543_free,
.module = THIS_MODULE,
};
@@ -1393,10 +1221,12 @@ static int __init crypto_gcm_module_init(void)
{
int err;
- gcm_zeroes = kzalloc(16, GFP_KERNEL);
+ gcm_zeroes = kzalloc(sizeof(*gcm_zeroes), GFP_KERNEL);
if (!gcm_zeroes)
return -ENOMEM;
+ sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf));
+
err = crypto_register_template(&crypto_gcm_base_tmpl);
if (err)
goto out;
diff --git a/crypto/internal.h b/crypto/internal.h
index bd39bfc92eab..00e42a3ed814 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -25,7 +25,6 @@
#include
#include
#include
-#include
/* Crypto notification events. */
enum {
@@ -103,6 +102,8 @@ int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v);
+unsigned int crypto_alg_extsize(struct crypto_alg *alg);
+
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
atomic_inc(&alg->cra_refcnt);
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
new file mode 100644
index 000000000000..d3c30452edee
--- /dev/null
+++ b/crypto/jitterentropy.c
@@ -0,0 +1,928 @@
+/*
+ * Non-physical true random number generator based on timing jitter.
+ *
+ * Copyright Stephan Mueller , 2014
+ *
+ * Design
+ * ======
+ *
+ * See http://www.chronox.de/jent.html
+ *
+ * License
+ * =======
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL2 are
+ * required INSTEAD OF the above restrictions. (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+/*
+ * This Jitterentropy RNG is based on the jitterentropy library
+ * version 1.1.0 provided at http://www.chronox.de/jent.html
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+/* The entropy pool */
+struct rand_data {
+ /* all data values that are vital to maintain the security
+ * of the RNG are marked as SENSITIVE. A user must not
+ * access that information while the RNG executes its loops to
+ * calculate the next random value. */
+ __u64 data; /* SENSITIVE Actual random number */
+ __u64 old_data; /* SENSITIVE Previous random number */
+ __u64 prev_time; /* SENSITIVE Previous time stamp */
+#define DATA_SIZE_BITS ((sizeof(__u64)) * 8)
+ __u64 last_delta; /* SENSITIVE stuck test */
+ __s64 last_delta2; /* SENSITIVE stuck test */
+ unsigned int stuck:1; /* Time measurement stuck */
+ unsigned int osr; /* Oversample rate */
+ unsigned int stir:1; /* Post-processing stirring */
+ unsigned int disable_unbias:1; /* Deactivate Von-Neuman unbias */
+#define JENT_MEMORY_BLOCKS 64
+#define JENT_MEMORY_BLOCKSIZE 32
+#define JENT_MEMORY_ACCESSLOOPS 128
+#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE)
+ unsigned char *mem; /* Memory access location with size of
+ * memblocks * memblocksize */
+ unsigned int memlocation; /* Pointer to byte in *mem */
+ unsigned int memblocks; /* Number of memory blocks in *mem */
+ unsigned int memblocksize; /* Size of one memory block in bytes */
+ unsigned int memaccessloops; /* Number of memory accesses per random
+ * bit generation */
+};
+
+/* Flags that can be used to initialize the RNG */
+#define JENT_DISABLE_STIR (1<<0) /* Disable stirring the entropy pool */
+#define JENT_DISABLE_UNBIAS (1<<1) /* Disable the Von-Neuman Unbiaser */
+#define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more
+ * entropy, saves MEMORY_SIZE RAM for
+ * entropy collector */
+
+#define DRIVER_NAME "jitterentropy"
+
+/* -- error codes for init function -- */
+#define JENT_ENOTIME 1 /* Timer service not available */
+#define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */
+#define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */
+#define JENT_EMINVARIATION 4 /* Timer variations too small for RNG */
+#define JENT_EVARVAR 5 /* Timer does not produce variations of
+ * variations (2nd derivation of time is
+ * zero). */
+#define JENT_EMINVARVAR 6 /* Timer variations of variations is tooi
+ * small. */
+
+/***************************************************************************
+ * Helper functions
+ ***************************************************************************/
+
+static inline void jent_get_nstime(__u64 *out)
+{
+ struct timespec ts;
+ __u64 tmp = 0;
+
+ tmp = random_get_entropy();
+
+ /*
+ * If random_get_entropy does not return a value (which is possible on,
+ * for example, MIPS), invoke __getnstimeofday
+ * hoping that there are timers we can work with.
+ *
+ * The list of available timers can be obtained from
+ * /sys/devices/system/clocksource/clocksource0/available_clocksource
+ * and are registered with clocksource_register()
+ */
+ if ((0 == tmp) &&
+ (0 == __getnstimeofday(&ts))) {
+ tmp = ts.tv_sec;
+ tmp = tmp << 32;
+ tmp = tmp | ts.tv_nsec;
+ }
+
+ *out = tmp;
+}
+
+
+/**
+ * Update of the loop count used for the next round of
+ * an entropy collection.
+ *
+ * Input:
+ * @ec entropy collector struct -- may be NULL
+ * @bits is the number of low bits of the timer to consider
+ * @min is the number of bits we shift the timer value to the right at
+ * the end to make sure we have a guaranteed minimum value
+ *
+ * @return Newly calculated loop counter
+ */
+static __u64 jent_loop_shuffle(struct rand_data *ec,
+ unsigned int bits, unsigned int min)
+{
+ __u64 time = 0;
+ __u64 shuffle = 0;
+ unsigned int i = 0;
+ unsigned int mask = (1<data;
+ /*
+ * we fold the time value as much as possible to ensure that as many
+ * bits of the time stamp are included as possible
+ */
+ for (i = 0; (DATA_SIZE_BITS / bits) > i; i++) {
+ shuffle ^= time & mask;
+ time = time >> bits;
+ }
+
+ /*
+ * We add a lower boundary value to ensure we have a minimum
+ * RNG loop count.
+ */
+ return (shuffle + (1<= i; i++) {
+ __u64 tmp = time << (DATA_SIZE_BITS - i);
+
+ tmp = tmp >> (DATA_SIZE_BITS - 1);
+ new ^= tmp;
+ }
+ }
+ *folded = new;
+ return fold_loop_cnt;
+}
+#pragma GCC pop_options
+
+/**
+ * Memory Access noise source -- this is a noise source based on variations in
+ * memory access times
+ *
+ * This function performs memory accesses which will add to the timing
+ * variations due to an unknown amount of CPU wait states that need to be
+ * added when accessing memory. The memory size should be larger than the L1
+ * caches as outlined in the documentation and the associated testing.
+ *
+ * The L1 cache has a very high bandwidth, albeit its access rate is usually
+ * slower than accessing CPU registers. Therefore, L1 accesses only add minimal
+ * variations as the CPU has hardly to wait. Starting with L2, significant
+ * variations are added because L2 typically does not belong to the CPU any more
+ * and therefore a wider range of CPU wait states is necessary for accesses.
+ * L3 and real memory accesses have even a wider range of wait states. However,
+ * to reliably access either L3 or memory, the ec->mem memory must be quite
+ * large which is usually not desirable.
+ *
+ * Input:
+ * @ec Reference to the entropy collector with the memory access data -- if
+ * the reference to the memory block to be accessed is NULL, this noise
+ * source is disabled
+ * @loop_cnt if a value not equal to 0 is set, use the given value as number of
+ * loops to perform the folding
+ *
+ * @return Number of memory access operations
+ */
+#pragma GCC push_options
+#pragma GCC optimize ("-O0")
+static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
+{
+ unsigned char *tmpval = NULL;
+ unsigned int wrap = 0;
+ __u64 i = 0;
+#define MAX_ACC_LOOP_BIT 7
+#define MIN_ACC_LOOP_BIT 0
+ __u64 acc_loop_cnt =
+ jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
+
+ if (NULL == ec || NULL == ec->mem)
+ return 0;
+ wrap = ec->memblocksize * ec->memblocks;
+
+ /*
+ * testing purposes -- allow test app to set the counter, not
+ * needed during runtime
+ */
+ if (loop_cnt)
+ acc_loop_cnt = loop_cnt;
+
+ for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) {
+ tmpval = ec->mem + ec->memlocation;
+ /*
+ * memory access: just add 1 to one byte,
+ * wrap at 255 -- memory access implies read
+ * from and write to memory location
+ */
+ *tmpval = (*tmpval + 1) & 0xff;
+ /*
+ * Addition of memblocksize - 1 to pointer
+ * with wrap around logic to ensure that every
+ * memory location is hit evenly
+ */
+ ec->memlocation = ec->memlocation + ec->memblocksize - 1;
+ ec->memlocation = ec->memlocation % wrap;
+ }
+ return i;
+}
+#pragma GCC pop_options
+
+/***************************************************************************
+ * Start of entropy processing logic
+ ***************************************************************************/
+
+/**
+ * Stuck test by checking the:
+ * 1st derivation of the jitter measurement (time delta)
+ * 2nd derivation of the jitter measurement (delta of time deltas)
+ * 3rd derivation of the jitter measurement (delta of delta of time deltas)
+ *
+ * All values must always be non-zero.
+ *
+ * Input:
+ * @ec Reference to entropy collector
+ * @current_delta Jitter time delta
+ *
+ * @return
+ * 0 jitter measurement not stuck (good bit)
+ * 1 jitter measurement stuck (reject bit)
+ */
+static void jent_stuck(struct rand_data *ec, __u64 current_delta)
+{
+ __s64 delta2 = ec->last_delta - current_delta;
+ __s64 delta3 = delta2 - ec->last_delta2;
+
+ ec->last_delta = current_delta;
+ ec->last_delta2 = delta2;
+
+ if (!current_delta || !delta2 || !delta3)
+ ec->stuck = 1;
+}
+
+/**
+ * This is the heart of the entropy generation: calculate time deltas and
+ * use the CPU jitter in the time deltas. The jitter is folded into one
+ * bit. You can call this function the "random bit generator" as it
+ * produces one random bit per invocation.
+ *
+ * WARNING: ensure that ->prev_time is primed before using the output
+ * of this function! This can be done by calling this function
+ * and not using its result.
+ *
+ * Input:
+ * @entropy_collector Reference to entropy collector
+ *
+ * @return One random bit
+ */
+#pragma GCC push_options
+#pragma GCC optimize ("-O0")
+static __u64 jent_measure_jitter(struct rand_data *ec)
+{
+ __u64 time = 0;
+ __u64 data = 0;
+ __u64 current_delta = 0;
+
+ /* Invoke one noise source before time measurement to add variations */
+ jent_memaccess(ec, 0);
+
+ /*
+ * Get time stamp and calculate time delta to previous
+ * invocation to measure the timing variations
+ */
+ jent_get_nstime(&time);
+ current_delta = time - ec->prev_time;
+ ec->prev_time = time;
+
+ /* Now call the next noise sources which also folds the data */
+ jent_fold_time(ec, current_delta, &data, 0);
+
+ /*
+ * Check whether we have a stuck measurement. The enforcement
+ * is performed after the stuck value has been mixed into the
+ * entropy pool.
+ */
+ jent_stuck(ec, current_delta);
+
+ return data;
+}
+#pragma GCC pop_options
+
+/**
+ * Von Neuman unbias as explained in RFC 4086 section 4.2. As shown in the
+ * documentation of that RNG, the bits from jent_measure_jitter are considered
+ * independent which implies that the Von Neuman unbias operation is applicable.
+ * A proof of the Von-Neumann unbias operation to remove skews is given in the
+ * document "A proposal for: Functionality classes for random number
+ * generators", version 2.0 by Werner Schindler, section 5.4.1.
+ *
+ * Input:
+ * @entropy_collector Reference to entropy collector
+ *
+ * @return One random bit
+ */
+static __u64 jent_unbiased_bit(struct rand_data *entropy_collector)
+{
+ do {
+ __u64 a = jent_measure_jitter(entropy_collector);
+ __u64 b = jent_measure_jitter(entropy_collector);
+
+ if (a == b)
+ continue;
+ if (1 == a)
+ return 1;
+ else
+ return 0;
+ } while (1);
+}
+
+/**
+ * Shuffle the pool a bit by mixing some value with a bijective function (XOR)
+ * into the pool.
+ *
+ * The function generates a mixer value that depends on the bits set and the
+ * location of the set bits in the random number generated by the entropy
+ * source. Therefore, based on the generated random number, this mixer value
+ * can have 2**64 different values. That mixer value is initialized with the
+ * first two SHA-1 constants. After obtaining the mixer value, it is XORed into
+ * the random number.
+ *
+ * The mixer value is not assumed to contain any entropy. But due to the XOR
+ * operation, it can also not destroy any entropy present in the entropy pool.
+ *
+ * Input:
+ * @entropy_collector Reference to entropy collector
+ */
+static void jent_stir_pool(struct rand_data *entropy_collector)
+{
+ /*
+ * to shut up GCC on 32 bit, we have to initialize the 64 variable
+ * with two 32 bit variables
+ */
+ union c {
+ __u64 u64;
+ __u32 u32[2];
+ };
+ /*
+ * This constant is derived from the first two 32 bit initialization
+ * vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1
+ */
+ union c constant;
+ /*
+ * The start value of the mixer variable is derived from the third
+ * and fourth 32 bit initialization vector of SHA-1 as defined in
+ * FIPS 180-4 section 5.3.1
+ */
+ union c mixer;
+ unsigned int i = 0;
+
+ /*
+ * Store the SHA-1 constants in reverse order to make up the 64 bit
+ * value -- this applies to a little endian system, on a big endian
+ * system, it reverses as expected. But this really does not matter
+ * as we do not rely on the specific numbers. We just pick the SHA-1
+ * constants as they have a good mix of bit set and unset.
+ */
+ constant.u32[1] = 0x67452301;
+ constant.u32[0] = 0xefcdab89;
+ mixer.u32[1] = 0x98badcfe;
+ mixer.u32[0] = 0x10325476;
+
+ for (i = 0; i < DATA_SIZE_BITS; i++) {
+ /*
+ * get the i-th bit of the input random number and only XOR
+ * the constant into the mixer value when that bit is set
+ */
+ if ((entropy_collector->data >> i) & 1)
+ mixer.u64 ^= constant.u64;
+ mixer.u64 = rol64(mixer.u64, 1);
+ }
+ entropy_collector->data ^= mixer.u64;
+}
+
+/**
+ * Generator of one 64 bit random number
+ * Function fills rand_data->data
+ *
+ * Input:
+ * @ec Reference to entropy collector
+ */
+#pragma GCC push_options
+#pragma GCC optimize ("-O0")
+static void jent_gen_entropy(struct rand_data *ec)
+{
+ unsigned int k = 0;
+
+ /* priming of the ->prev_time value */
+ jent_measure_jitter(ec);
+
+ while (1) {
+ __u64 data = 0;
+
+ if (ec->disable_unbias == 1)
+ data = jent_measure_jitter(ec);
+ else
+ data = jent_unbiased_bit(ec);
+
+ /* enforcement of the jent_stuck test */
+ if (ec->stuck) {
+ /*
+ * We only mix in the bit considered not appropriate
+ * without the LSFR. The reason is that if we apply
+ * the LSFR and we do not rotate, the 2nd bit with LSFR
+ * will cancel out the first LSFR application on the
+ * bad bit.
+ *
+ * And we do not rotate as we apply the next bit to the
+ * current bit location again.
+ */
+ ec->data ^= data;
+ ec->stuck = 0;
+ continue;
+ }
+
+ /*
+ * Fibonacci LSFR with polynom of
+ * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is
+ * primitive according to
+ * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf
+ * (the shift values are the polynom values minus one
+ * due to counting bits from 0 to 63). As the current
+ * position is always the LSB, the polynom only needs
+ * to shift data in from the left without wrap.
+ */
+ ec->data ^= data;
+ ec->data ^= ((ec->data >> 63) & 1);
+ ec->data ^= ((ec->data >> 60) & 1);
+ ec->data ^= ((ec->data >> 55) & 1);
+ ec->data ^= ((ec->data >> 30) & 1);
+ ec->data ^= ((ec->data >> 27) & 1);
+ ec->data ^= ((ec->data >> 22) & 1);
+ ec->data = rol64(ec->data, 1);
+
+ /*
+ * We multiply the loop value with ->osr to obtain the
+ * oversampling rate requested by the caller
+ */
+ if (++k >= (DATA_SIZE_BITS * ec->osr))
+ break;
+ }
+ if (ec->stir)
+ jent_stir_pool(ec);
+}
+#pragma GCC pop_options
+
+/**
+ * The continuous test required by FIPS 140-2 -- the function automatically
+ * primes the test if needed.
+ *
+ * Return:
+ * 0 if FIPS test passed
+ * < 0 if FIPS test failed
+ */
+static void jent_fips_test(struct rand_data *ec)
+{
+ if (!fips_enabled)
+ return;
+
+ /* prime the FIPS test */
+ if (!ec->old_data) {
+ ec->old_data = ec->data;
+ jent_gen_entropy(ec);
+ }
+
+ if (ec->data == ec->old_data)
+ panic(DRIVER_NAME ": Duplicate output detected\n");
+
+ ec->old_data = ec->data;
+}
+
+
+/**
+ * Entry function: Obtain entropy for the caller.
+ *
+ * This function invokes the entropy gathering logic as often to generate
+ * as many bytes as requested by the caller. The entropy gathering logic
+ * creates 64 bit per invocation.
+ *
+ * This function truncates the last 64 bit entropy value output to the exact
+ * size specified by the caller.
+ *
+ * Input:
+ * @ec Reference to entropy collector
+ * @data pointer to buffer for storing random data -- buffer must already
+ * exist
+ * @len size of the buffer, specifying also the requested number of random
+ * in bytes
+ *
+ * @return 0 when request is fulfilled or an error
+ *
+ * The following error codes can occur:
+ * -1 entropy_collector is NULL
+ */
+static ssize_t jent_read_entropy(struct rand_data *ec, u8 *data, size_t len)
+{
+ u8 *p = data;
+
+ if (!ec)
+ return -EINVAL;
+
+ while (0 < len) {
+ size_t tocopy;
+
+ jent_gen_entropy(ec);
+ jent_fips_test(ec);
+ if ((DATA_SIZE_BITS / 8) < len)
+ tocopy = (DATA_SIZE_BITS / 8);
+ else
+ tocopy = len;
+ memcpy(p, &ec->data, tocopy);
+
+ len -= tocopy;
+ p += tocopy;
+ }
+
+ return 0;
+}
+
+/***************************************************************************
+ * Initialization logic
+ ***************************************************************************/
+
+static struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
+ unsigned int flags)
+{
+ struct rand_data *entropy_collector;
+
+ entropy_collector = kzalloc(sizeof(struct rand_data), GFP_KERNEL);
+ if (!entropy_collector)
+ return NULL;
+
+ if (!(flags & JENT_DISABLE_MEMORY_ACCESS)) {
+ /* Allocate memory for adding variations based on memory
+ * access
+ */
+ entropy_collector->mem = kzalloc(JENT_MEMORY_SIZE, GFP_KERNEL);
+ if (!entropy_collector->mem) {
+ kfree(entropy_collector);
+ return NULL;
+ }
+ entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE;
+ entropy_collector->memblocks = JENT_MEMORY_BLOCKS;
+ entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS;
+ }
+
+ /* verify and set the oversampling rate */
+ if (0 == osr)
+ osr = 1; /* minimum sampling rate is 1 */
+ entropy_collector->osr = osr;
+
+ entropy_collector->stir = 1;
+ if (flags & JENT_DISABLE_STIR)
+ entropy_collector->stir = 0;
+ if (flags & JENT_DISABLE_UNBIAS)
+ entropy_collector->disable_unbias = 1;
+
+ /* fill the data pad with non-zero values */
+ jent_gen_entropy(entropy_collector);
+
+ return entropy_collector;
+}
+
+static void jent_entropy_collector_free(struct rand_data *entropy_collector)
+{
+ if (entropy_collector->mem)
+ kzfree(entropy_collector->mem);
+ entropy_collector->mem = NULL;
+ if (entropy_collector)
+ kzfree(entropy_collector);
+ entropy_collector = NULL;
+}
+
+static int jent_entropy_init(void)
+{
+ int i;
+ __u64 delta_sum = 0;
+ __u64 old_delta = 0;
+ int time_backwards = 0;
+ int count_var = 0;
+ int count_mod = 0;
+
+ /* We could perform statistical tests here, but the problem is
+ * that we only have a few loop counts to do testing. These
+ * loop counts may show some slight skew and we produce
+ * false positives.
+ *
+ * Moreover, only old systems show potentially problematic
+ * jitter entropy that could potentially be caught here. But
+ * the RNG is intended for hardware that is available or widely
+ * used, but not old systems that are long out of favor. Thus,
+ * no statistical tests.
+ */
+
+ /*
+ * We could add a check for system capabilities such as clock_getres or
+ * check for CONFIG_X86_TSC, but it does not make much sense as the
+ * following sanity checks verify that we have a high-resolution
+ * timer.
+ */
+ /*
+ * TESTLOOPCOUNT needs some loops to identify edge systems. 100 is
+ * definitely too little.
+ */
+#define TESTLOOPCOUNT 300
+#define CLEARCACHE 100
+ for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
+ __u64 time = 0;
+ __u64 time2 = 0;
+ __u64 folded = 0;
+ __u64 delta = 0;
+ unsigned int lowdelta = 0;
+
+ jent_get_nstime(&time);
+ jent_fold_time(NULL, time, &folded, 1< i)
+ continue;
+
+ /* test whether we have an increasing timer */
+ if (!(time2 > time))
+ time_backwards++;
+
+ /*
+ * Avoid modulo of 64 bit integer to allow code to compile
+ * on 32 bit architectures.
+ */
+ lowdelta = time2 - time;
+ if (!(lowdelta % 100))
+ count_mod++;
+
+ /*
+ * ensure that we have a varying delta timer which is necessary
+ * for the calculation of entropy -- perform this check
+ * only after the first loop is executed as we need to prime
+ * the old_data value
+ */
+ if (i) {
+ if (delta != old_delta)
+ count_var++;
+ if (delta > old_delta)
+ delta_sum += (delta - old_delta);
+ else
+ delta_sum += (old_delta - delta);
+ }
+ old_delta = delta;
+ }
+
+ /*
+ * we allow up to three times the time running backwards.
+ * CLOCK_REALTIME is affected by adjtime and NTP operations. Thus,
+ * if such an operation just happens to interfere with our test, it
+ * should not fail. The value of 3 should cover the NTP case being
+ * performed during our test run.
+ */
+ if (3 < time_backwards)
+ return JENT_ENOMONOTONIC;
+ /* Error if the time variances are always identical */
+ if (!delta_sum)
+ return JENT_EVARVAR;
+
+ /*
+ * Variations of deltas of time must on average be larger
+ * than 1 to ensure the entropy estimation
+ * implied with 1 is preserved
+ */
+ if (delta_sum <= 1)
+ return JENT_EMINVARVAR;
+
+ /*
+ * Ensure that we have variations in the time stamp below 10 for at
+ * least 10% of all checks -- on some platforms, the counter
+ * increments in multiples of 100, but not always
+ */
+ if ((TESTLOOPCOUNT/10 * 9) < count_mod)
+ return JENT_ECOARSETIME;
+
+ return 0;
+}
+
+/***************************************************************************
+ * Kernel crypto API interface
+ ***************************************************************************/
+
+struct jitterentropy {
+ spinlock_t jent_lock;
+ struct rand_data *entropy_collector;
+};
+
+static int jent_kcapi_init(struct crypto_tfm *tfm)
+{
+ struct jitterentropy *rng = crypto_tfm_ctx(tfm);
+ int ret = 0;
+
+ rng->entropy_collector = jent_entropy_collector_alloc(1, 0);
+ if (!rng->entropy_collector)
+ ret = -ENOMEM;
+
+ spin_lock_init(&rng->jent_lock);
+ return ret;
+}
+
+static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
+{
+ struct jitterentropy *rng = crypto_tfm_ctx(tfm);
+
+ spin_lock(&rng->jent_lock);
+ if (rng->entropy_collector)
+ jent_entropy_collector_free(rng->entropy_collector);
+ rng->entropy_collector = NULL;
+ spin_unlock(&rng->jent_lock);
+}
+
+static int jent_kcapi_random(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *rdata, unsigned int dlen)
+{
+ struct jitterentropy *rng = crypto_rng_ctx(tfm);
+ int ret = 0;
+
+ spin_lock(&rng->jent_lock);
+ ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
+ spin_unlock(&rng->jent_lock);
+
+ return ret;
+}
+
+static int jent_kcapi_reset(struct crypto_rng *tfm,
+ const u8 *seed, unsigned int slen)
+{
+ return 0;
+}
+
+static struct rng_alg jent_alg = {
+ .generate = jent_kcapi_random,
+ .seed = jent_kcapi_reset,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "jitterentropy_rng",
+ .cra_driver_name = "jitterentropy_rng",
+ .cra_priority = 100,
+ .cra_ctxsize = sizeof(struct jitterentropy),
+ .cra_module = THIS_MODULE,
+ .cra_init = jent_kcapi_init,
+ .cra_exit = jent_kcapi_cleanup,
+
+ }
+};
+
+static int __init jent_mod_init(void)
+{
+ int ret = 0;
+
+ ret = jent_entropy_init();
+ if (ret) {
+ pr_info(DRIVER_NAME ": Initialization failed with host not compliant with requirements: %d\n", ret);
+ return -EFAULT;
+ }
+ return crypto_register_rng(&jent_alg);
+}
+
+static void __exit jent_mod_exit(void)
+{
+ crypto_unregister_rng(&jent_alg);
+}
+
+module_init(jent_mod_init);
+module_exit(jent_mod_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Stephan Mueller ");
+MODULE_DESCRIPTION("Non-physical True Random Number Generator based on CPU Jitter");
+MODULE_ALIAS_CRYPTO("jitterentropy_rng");
diff --git a/crypto/krng.c b/crypto/krng.c
deleted file mode 100644
index 0224841b6579..000000000000
--- a/crypto/krng.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * RNG implementation using standard kernel RNG.
- *
- * Copyright (c) 2008 Herbert Xu
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * any later version.
- *
- */
-
-#include
-#include
-#include
-#include
-#include
-
-static int krng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen)
-{
- get_random_bytes(rdata, dlen);
- return 0;
-}
-
-static int krng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
-{
- return 0;
-}
-
-static struct crypto_alg krng_alg = {
- .cra_name = "stdrng",
- .cra_driver_name = "krng",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_RNG,
- .cra_ctxsize = 0,
- .cra_type = &crypto_rng_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .rng = {
- .rng_make_random = krng_get_random,
- .rng_reset = krng_reset,
- .seedsize = 0,
- }
- }
-};
-
-
-/* Module initalization */
-static int __init krng_mod_init(void)
-{
- return crypto_register_alg(&krng_alg);
-}
-
-static void __exit krng_mod_fini(void)
-{
- crypto_unregister_alg(&krng_alg);
- return;
-}
-
-module_init(krng_mod_init);
-module_exit(krng_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Kernel Random Number Generator");
-MODULE_ALIAS_CRYPTO("stdrng");
-MODULE_ALIAS_CRYPTO("krng");
diff --git a/crypto/md5.c b/crypto/md5.c
index 36f5e5b103f3..33d17e9a8702 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -51,10 +51,10 @@ static int md5_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
- mctx->hash[0] = 0x67452301;
- mctx->hash[1] = 0xefcdab89;
- mctx->hash[2] = 0x98badcfe;
- mctx->hash[3] = 0x10325476;
+ mctx->hash[0] = MD5_H0;
+ mctx->hash[1] = MD5_H1;
+ mctx->hash[2] = MD5_H2;
+ mctx->hash[3] = MD5_H3;
mctx->byte_count = 0;
return 0;
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index 7140fe70c7af..7a13b4088857 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -38,11 +38,6 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
return 0;
}
-static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg)
-{
- return alg->cra_ctxsize;
-}
-
static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
{
return 0;
@@ -77,7 +72,7 @@ static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
}
static const struct crypto_type crypto_pcomp_type = {
- .extsize = crypto_pcomp_extsize,
+ .extsize = crypto_alg_extsize,
.init = crypto_pcomp_init,
.init_tfm = crypto_pcomp_init_tfm,
#ifdef CONFIG_PROC_FS
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index c305d4112735..45e7d5155672 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -20,6 +20,7 @@
#include
#include
+#include
#include
#include
#include
@@ -60,8 +61,8 @@ static struct padata_pcrypt pdecrypt;
static struct kset *pcrypt_kset;
struct pcrypt_instance_ctx {
- struct crypto_spawn spawn;
- unsigned int tfm_count;
+ struct crypto_aead_spawn spawn;
+ atomic_t tfm_count;
};
struct pcrypt_aead_ctx {
@@ -122,14 +123,6 @@ static void pcrypt_aead_serial(struct padata_priv *padata)
aead_request_complete(req->base.data, padata->info);
}
-static void pcrypt_aead_giv_serial(struct padata_priv *padata)
-{
- struct pcrypt_request *preq = pcrypt_padata_request(padata);
- struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
-
- aead_request_complete(req->areq.base.data, padata->info);
-}
-
static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
@@ -175,7 +168,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
- aead_request_set_assoc(creq, req->assoc, req->assoclen);
+ aead_request_set_ad(creq, req->assoclen);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
if (!err)
@@ -217,7 +210,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
- aead_request_set_assoc(creq, req->assoc, req->assoclen);
+ aead_request_set_ad(creq, req->assoclen);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
if (!err)
@@ -226,182 +219,134 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
return err;
}
-static void pcrypt_aead_givenc(struct padata_priv *padata)
-{
- struct pcrypt_request *preq = pcrypt_padata_request(padata);
- struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
-
- padata->info = crypto_aead_givencrypt(req);
-
- if (padata->info == -EINPROGRESS)
- return;
-
- padata_do_serial(padata);
-}
-
-static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
-{
- int err;
- struct aead_request *areq = &req->areq;
- struct pcrypt_request *preq = aead_request_ctx(areq);
- struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
- struct padata_priv *padata = pcrypt_request_padata(preq);
- struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
- struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
- u32 flags = aead_request_flags(areq);
-
- memset(padata, 0, sizeof(struct padata_priv));
-
- padata->parallel = pcrypt_aead_givenc;
- padata->serial = pcrypt_aead_giv_serial;
-
- aead_givcrypt_set_tfm(creq, ctx->child);
- aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
- pcrypt_aead_done, areq);
- aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
- areq->cryptlen, areq->iv);
- aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
- aead_givcrypt_set_giv(creq, req->giv, req->seq);
-
- err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
- if (!err)
- return -EINPROGRESS;
-
- return err;
-}
-
-static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
+static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
{
int cpu, cpu_index;
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct aead_instance *inst = aead_alg_instance(tfm);
+ struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *cipher;
- ictx->tfm_count++;
-
- cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);
+ cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
+ cpumask_weight(cpu_online_mask);
ctx->cb_cpu = cpumask_first(cpu_online_mask);
for (cpu = 0; cpu < cpu_index; cpu++)
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
- cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
+ cipher = crypto_spawn_aead(&ictx->spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
- tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
- + sizeof(struct aead_givcrypt_request)
- + crypto_aead_reqsize(cipher);
+ crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
+ sizeof(struct aead_request) +
+ crypto_aead_reqsize(cipher));
return 0;
}
-static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
+static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
{
- struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
}
-static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
+static int pcrypt_init_instance(struct crypto_instance *inst,
+ struct crypto_alg *alg)
{
- struct crypto_instance *inst;
- struct pcrypt_instance_ctx *ctx;
- int err;
-
- inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- if (!inst) {
- inst = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_inst;
+ return -ENAMETOOLONG;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
- ctx = crypto_instance_ctx(inst);
- err = crypto_init_spawn(&ctx->spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK);
- if (err)
- goto out_free_inst;
-
inst->alg.cra_priority = alg->cra_priority + 100;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
-out:
- return inst;
+ return 0;
+}
+static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
+ u32 type, u32 mask)
+{
+ struct pcrypt_instance_ctx *ctx;
+ struct aead_instance *inst;
+ struct aead_alg *alg;
+ const char *name;
+ int err;
+
+ name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ ctx = aead_instance_ctx(inst);
+ crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
+
+ err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
+ if (err)
+ goto out_free_inst;
+
+ alg = crypto_spawn_aead_alg(&ctx->spawn);
+ err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
+ if (err)
+ goto out_drop_aead;
+
+ inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
+ inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
+
+ inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
+
+ inst->alg.init = pcrypt_aead_init_tfm;
+ inst->alg.exit = pcrypt_aead_exit_tfm;
+
+ inst->alg.setkey = pcrypt_aead_setkey;
+ inst->alg.setauthsize = pcrypt_aead_setauthsize;
+ inst->alg.encrypt = pcrypt_aead_encrypt;
+ inst->alg.decrypt = pcrypt_aead_decrypt;
+
+ err = aead_register_instance(tmpl, inst);
+ if (err)
+ goto out_drop_aead;
+
+out:
+ return err;
+
+out_drop_aead:
+ crypto_drop_aead(&ctx->spawn);
out_free_inst:
kfree(inst);
- inst = ERR_PTR(err);
goto out;
}
-static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
- u32 type, u32 mask)
-{
- struct crypto_instance *inst;
- struct crypto_alg *alg;
-
- alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
- if (IS_ERR(alg))
- return ERR_CAST(alg);
-
- inst = pcrypt_alloc_instance(alg);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
- inst->alg.cra_type = &crypto_aead_type;
-
- inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
- inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
- inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
-
- inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
-
- inst->alg.cra_init = pcrypt_aead_init_tfm;
- inst->alg.cra_exit = pcrypt_aead_exit_tfm;
-
- inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
- inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
- inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
- inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
- inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
-
-out_put_alg:
- crypto_mod_put(alg);
- return inst;
-}
-
-static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
+static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
+ return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- return pcrypt_alloc_aead(tb, algt->type, algt->mask);
+ return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
}
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
static void pcrypt_free(struct crypto_instance *inst)
{
struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
- crypto_drop_spawn(&ctx->spawn);
+ crypto_drop_aead(&ctx->spawn);
kfree(inst);
}
@@ -516,7 +461,7 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
- .alloc = pcrypt_alloc,
+ .create = pcrypt_create,
.free = pcrypt_free,
.module = THIS_MODULE,
};
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
new file mode 100644
index 000000000000..387b5c887a80
--- /dev/null
+++ b/crypto/poly1305_generic.c
@@ -0,0 +1,321 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#define POLY1305_BLOCK_SIZE 16
+#define POLY1305_KEY_SIZE 32
+#define POLY1305_DIGEST_SIZE 16
+
+struct poly1305_desc_ctx {
+ /* key */
+ u32 r[5];
+ /* finalize key */
+ u32 s[4];
+ /* accumulator */
+ u32 h[5];
+ /* partial buffer */
+ u8 buf[POLY1305_BLOCK_SIZE];
+ /* bytes used in partial buffer */
+ unsigned int buflen;
+ /* r key has been set */
+ bool rset;
+ /* s key has been set */
+ bool sset;
+};
+
+static inline u64 mlt(u64 a, u64 b)
+{
+ return a * b;
+}
+
+static inline u32 sr(u64 v, u_char n)
+{
+ return v >> n;
+}
+
+static inline u32 and(u32 v, u32 mask)
+{
+ return v & mask;
+}
+
+static inline u32 le32_to_cpuvp(const void *p)
+{
+ return le32_to_cpup(p);
+}
+
+static int poly1305_init(struct shash_desc *desc)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ memset(dctx->h, 0, sizeof(dctx->h));
+ dctx->buflen = 0;
+ dctx->rset = false;
+ dctx->sset = false;
+
+ return 0;
+}
+
+static int poly1305_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ /* Poly1305 requires a unique key for each tag, which implies that
+ * we can't set it on the tfm that gets accessed by multiple users
+ * simultaneously. Instead we expect the key as the first 32 bytes in
+ * the update() call. */
+ return -ENOTSUPP;
+}
+
+static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
+{
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ dctx->r[0] = (le32_to_cpuvp(key + 0) >> 0) & 0x3ffffff;
+ dctx->r[1] = (le32_to_cpuvp(key + 3) >> 2) & 0x3ffff03;
+ dctx->r[2] = (le32_to_cpuvp(key + 6) >> 4) & 0x3ffc0ff;
+ dctx->r[3] = (le32_to_cpuvp(key + 9) >> 6) & 0x3f03fff;
+ dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff;
+}
+
+static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
+{
+ dctx->s[0] = le32_to_cpuvp(key + 0);
+ dctx->s[1] = le32_to_cpuvp(key + 4);
+ dctx->s[2] = le32_to_cpuvp(key + 8);
+ dctx->s[3] = le32_to_cpuvp(key + 12);
+}
+
+static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen,
+ u32 hibit)
+{
+ u32 r0, r1, r2, r3, r4;
+ u32 s1, s2, s3, s4;
+ u32 h0, h1, h2, h3, h4;
+ u64 d0, d1, d2, d3, d4;
+
+ if (unlikely(!dctx->sset)) {
+ if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
+ poly1305_setrkey(dctx, src);
+ src += POLY1305_BLOCK_SIZE;
+ srclen -= POLY1305_BLOCK_SIZE;
+ dctx->rset = true;
+ }
+ if (srclen >= POLY1305_BLOCK_SIZE) {
+ poly1305_setskey(dctx, src);
+ src += POLY1305_BLOCK_SIZE;
+ srclen -= POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ }
+
+ r0 = dctx->r[0];
+ r1 = dctx->r[1];
+ r2 = dctx->r[2];
+ r3 = dctx->r[3];
+ r4 = dctx->r[4];
+
+ s1 = r1 * 5;
+ s2 = r2 * 5;
+ s3 = r3 * 5;
+ s4 = r4 * 5;
+
+ h0 = dctx->h[0];
+ h1 = dctx->h[1];
+ h2 = dctx->h[2];
+ h3 = dctx->h[3];
+ h4 = dctx->h[4];
+
+ while (likely(srclen >= POLY1305_BLOCK_SIZE)) {
+
+ /* h += m[i] */
+ h0 += (le32_to_cpuvp(src + 0) >> 0) & 0x3ffffff;
+ h1 += (le32_to_cpuvp(src + 3) >> 2) & 0x3ffffff;
+ h2 += (le32_to_cpuvp(src + 6) >> 4) & 0x3ffffff;
+ h3 += (le32_to_cpuvp(src + 9) >> 6) & 0x3ffffff;
+ h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit;
+
+ /* h *= r */
+ d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
+ mlt(h3, s2) + mlt(h4, s1);
+ d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) +
+ mlt(h3, s3) + mlt(h4, s2);
+ d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) +
+ mlt(h3, s4) + mlt(h4, s3);
+ d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) +
+ mlt(h3, r0) + mlt(h4, s4);
+ d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) +
+ mlt(h3, r1) + mlt(h4, r0);
+
+ /* (partial) h %= p */
+ d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff);
+ d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff);
+ d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff);
+ d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff);
+ h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff);
+ h1 += h0 >> 26; h0 = h0 & 0x3ffffff;
+
+ src += POLY1305_BLOCK_SIZE;
+ srclen -= POLY1305_BLOCK_SIZE;
+ }
+
+ dctx->h[0] = h0;
+ dctx->h[1] = h1;
+ dctx->h[2] = h2;
+ dctx->h[3] = h3;
+ dctx->h[4] = h4;
+
+ return srclen;
+}
+
+static int poly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+ unsigned int bytes;
+
+ if (unlikely(dctx->buflen)) {
+ bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ srclen -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ poly1305_blocks(dctx, dctx->buf,
+ POLY1305_BLOCK_SIZE, 1 << 24);
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
+ bytes = poly1305_blocks(dctx, src, srclen, 1 << 24);
+ src += srclen - bytes;
+ srclen = bytes;
+ }
+
+ if (unlikely(srclen)) {
+ dctx->buflen = srclen;
+ memcpy(dctx->buf, src, srclen);
+ }
+
+ return 0;
+}
+
+static int poly1305_final(struct shash_desc *desc, u8 *dst)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+ __le32 *mac = (__le32 *)dst;
+ u32 h0, h1, h2, h3, h4;
+ u32 g0, g1, g2, g3, g4;
+ u32 mask;
+ u64 f = 0;
+
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ if (unlikely(dctx->buflen)) {
+ dctx->buf[dctx->buflen++] = 1;
+ memset(dctx->buf + dctx->buflen, 0,
+ POLY1305_BLOCK_SIZE - dctx->buflen);
+ poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ /* fully carry h */
+ h0 = dctx->h[0];
+ h1 = dctx->h[1];
+ h2 = dctx->h[2];
+ h3 = dctx->h[3];
+ h4 = dctx->h[4];
+
+ h2 += (h1 >> 26); h1 = h1 & 0x3ffffff;
+ h3 += (h2 >> 26); h2 = h2 & 0x3ffffff;
+ h4 += (h3 >> 26); h3 = h3 & 0x3ffffff;
+ h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff;
+ h1 += (h0 >> 26); h0 = h0 & 0x3ffffff;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff;
+ g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff;
+ g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff;
+ g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff;
+
+ /* select h if h < p, or h + -p if h >= p */
+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ g4 &= mask;
+ mask = ~mask;
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+ h4 = (h4 & mask) | g4;
+
+ /* h = h % (2^128) */
+ h0 = (h0 >> 0) | (h1 << 26);
+ h1 = (h1 >> 6) | (h2 << 20);
+ h2 = (h2 >> 12) | (h3 << 14);
+ h3 = (h3 >> 18) | (h4 << 8);
+
+ /* mac = (h + s) % (2^128) */
+ f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f);
+ f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f);
+ f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f);
+ f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f);
+
+ return 0;
+}
+
+static struct shash_alg poly1305_alg = {
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .init = poly1305_init,
+ .update = poly1305_update,
+ .final = poly1305_final,
+ .setkey = poly1305_setkey,
+ .descsize = sizeof(struct poly1305_desc_ctx),
+ .base = {
+ .cra_name = "poly1305",
+ .cra_driver_name = "poly1305-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_alignmask = sizeof(u32) - 1,
+ .cra_blocksize = POLY1305_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int __init poly1305_mod_init(void)
+{
+ return crypto_register_shash(&poly1305_alg);
+}
+
+static void __exit poly1305_mod_exit(void)
+{
+ crypto_unregister_shash(&poly1305_alg);
+}
+
+module_init(poly1305_mod_init);
+module_exit(poly1305_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi ");
+MODULE_DESCRIPTION("Poly1305 authenticator");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-generic");
diff --git a/crypto/proc.c b/crypto/proc.c
index 4ffe73b51612..2cc10c96d753 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -20,47 +20,8 @@
#include
#include
#include
-#include
#include "internal.h"
-#ifdef CONFIG_CRYPTO_FIPS
-static struct ctl_table crypto_sysctl_table[] = {
- {
- .procname = "fips_enabled",
- .data = &fips_enabled,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = proc_dointvec
- },
- {}
-};
-
-static struct ctl_table crypto_dir_table[] = {
- {
- .procname = "crypto",
- .mode = 0555,
- .child = crypto_sysctl_table
- },
- {}
-};
-
-static struct ctl_table_header *crypto_sysctls;
-
-static void crypto_proc_fips_init(void)
-{
- crypto_sysctls = register_sysctl_table(crypto_dir_table);
-}
-
-static void crypto_proc_fips_exit(void)
-{
- if (crypto_sysctls)
- unregister_sysctl_table(crypto_sysctls);
-}
-#else
-#define crypto_proc_fips_init()
-#define crypto_proc_fips_exit()
-#endif
-
static void *c_start(struct seq_file *m, loff_t *pos)
{
down_read(&crypto_alg_sem);
@@ -148,11 +109,9 @@ static const struct file_operations proc_crypto_ops = {
void __init crypto_init_proc(void)
{
proc_create("crypto", 0, NULL, &proc_crypto_ops);
- crypto_proc_fips_init();
}
void __exit crypto_exit_proc(void)
{
- crypto_proc_fips_exit();
remove_proc_entry("crypto", NULL);
}
diff --git a/crypto/rng.c b/crypto/rng.c
index e0a25c2456de..b81cffb13bab 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -4,6 +4,7 @@
* RNG operations.
*
* Copyright (c) 2008 Neil Horman
+ * Copyright (c) 2015 Herbert Xu
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -24,12 +25,19 @@
#include
#include
+#include "internal.h"
+
static DEFINE_MUTEX(crypto_default_rng_lock);
struct crypto_rng *crypto_default_rng;
EXPORT_SYMBOL_GPL(crypto_default_rng);
static int crypto_default_rng_refcnt;
-static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_rng, base);
+}
+
+int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
u8 *buf = NULL;
int err;
@@ -43,23 +51,25 @@ static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
seed = buf;
}
- err = crypto_rng_alg(tfm)->rng_reset(tfm, seed, slen);
+ err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
- kfree(buf);
+ kzfree(buf);
return err;
}
+EXPORT_SYMBOL_GPL(crypto_rng_reset);
-static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+static int crypto_rng_init_tfm(struct crypto_tfm *tfm)
{
- struct rng_alg *alg = &tfm->__crt_alg->cra_rng;
- struct rng_tfm *ops = &tfm->crt_rng;
-
- ops->rng_gen_random = alg->rng_make_random;
- ops->rng_reset = rngapi_reset;
-
return 0;
}
+static unsigned int seedsize(struct crypto_alg *alg)
+{
+ struct rng_alg *ralg = container_of(alg, struct rng_alg, base);
+
+ return ralg->seedsize;
+}
+
#ifdef CONFIG_NET
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -67,7 +77,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rrng.type, "rng", sizeof(rrng.type));
- rrng.seedsize = alg->cra_rng.seedsize;
+ rrng.seedsize = seedsize(alg);
if (nla_put(skb, CRYPTOCFGA_REPORT_RNG,
sizeof(struct crypto_report_rng), &rrng))
@@ -89,24 +99,27 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : rng\n");
- seq_printf(m, "seedsize : %u\n", alg->cra_rng.seedsize);
+ seq_printf(m, "seedsize : %u\n", seedsize(alg));
}
-static unsigned int crypto_rng_ctxsize(struct crypto_alg *alg, u32 type,
- u32 mask)
-{
- return alg->cra_ctxsize;
-}
-
-const struct crypto_type crypto_rng_type = {
- .ctxsize = crypto_rng_ctxsize,
- .init = crypto_init_rng_ops,
+static const struct crypto_type crypto_rng_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_rng_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
.report = crypto_rng_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_RNG,
+ .tfmsize = offsetof(struct crypto_rng, base),
};
-EXPORT_SYMBOL_GPL(crypto_rng_type);
+
+struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_rng_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_rng);
int crypto_get_default_rng(void)
{
@@ -142,13 +155,82 @@ EXPORT_SYMBOL_GPL(crypto_get_default_rng);
void crypto_put_default_rng(void)
{
mutex_lock(&crypto_default_rng_lock);
- if (!--crypto_default_rng_refcnt) {
- crypto_free_rng(crypto_default_rng);
- crypto_default_rng = NULL;
- }
+ crypto_default_rng_refcnt--;
mutex_unlock(&crypto_default_rng_lock);
}
EXPORT_SYMBOL_GPL(crypto_put_default_rng);
+#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
+int crypto_del_default_rng(void)
+{
+ int err = -EBUSY;
+
+ mutex_lock(&crypto_default_rng_lock);
+ if (crypto_default_rng_refcnt)
+ goto out;
+
+ crypto_free_rng(crypto_default_rng);
+ crypto_default_rng = NULL;
+
+ err = 0;
+
+out:
+ mutex_unlock(&crypto_default_rng_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(crypto_del_default_rng);
+#endif
+
+int crypto_register_rng(struct rng_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ if (alg->seedsize > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ base->cra_type = &crypto_rng_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_rng);
+
+void crypto_unregister_rng(struct rng_alg *alg)
+{
+ crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_rng);
+
+int crypto_register_rngs(struct rng_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_register_rng(algs + i);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (--i; i >= 0; --i)
+ crypto_unregister_rng(algs + i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_rngs);
+
+void crypto_unregister_rngs(struct rng_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_rng(algs + i);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_rngs);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Random Number Generator");
diff --git a/crypto/rsa.c b/crypto/rsa.c
new file mode 100644
index 000000000000..752af0656f2e
--- /dev/null
+++ b/crypto/rsa.c
@@ -0,0 +1,315 @@
+/* RSA asymmetric public-key algorithm [RFC3447]
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include
+#include
+#include
+#include
+
+/*
+ * RSAEP function [RFC3447 sec 5.1.1]
+ * c = m^e mod n;
+ */
+static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m)
+{
+ /* (1) Validate 0 <= m < n */
+ if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
+ return -EINVAL;
+
+ /* (2) c = m^e mod n */
+ return mpi_powm(c, m, key->e, key->n);
+}
+
+/*
+ * RSADP function [RFC3447 sec 5.1.2]
+ * m = c^d mod n;
+ */
+static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c)
+{
+ /* (1) Validate 0 <= c < n */
+ if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
+ return -EINVAL;
+
+ /* (2) m = c^d mod n */
+ return mpi_powm(m, c, key->d, key->n);
+}
+
+/*
+ * RSASP1 function [RFC3447 sec 5.2.1]
+ * s = m^d mod n
+ */
+static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m)
+{
+ /* (1) Validate 0 <= m < n */
+ if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
+ return -EINVAL;
+
+ /* (2) s = m^d mod n */
+ return mpi_powm(s, m, key->d, key->n);
+}
+
+/*
+ * RSAVP1 function [RFC3447 sec 5.2.2]
+ * m = s^e mod n;
+ */
+static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s)
+{
+ /* (1) Validate 0 <= s < n */
+ if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
+ return -EINVAL;
+
+ /* (2) m = s^e mod n */
+ return mpi_powm(m, s, key->e, key->n);
+}
+
+static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm)
+{
+ return akcipher_tfm_ctx(tfm);
+}
+
+static int rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ const struct rsa_key *pkey = rsa_get_key(tfm);
+ MPI m, c = mpi_alloc(0);
+ int ret = 0;
+ int sign;
+
+ if (!c)
+ return -ENOMEM;
+
+ if (unlikely(!pkey->n || !pkey->e)) {
+ ret = -EINVAL;
+ goto err_free_c;
+ }
+
+ if (req->dst_len < mpi_get_size(pkey->n)) {
+ req->dst_len = mpi_get_size(pkey->n);
+ ret = -EOVERFLOW;
+ goto err_free_c;
+ }
+
+ m = mpi_read_raw_data(req->src, req->src_len);
+ if (!m) {
+ ret = -ENOMEM;
+ goto err_free_c;
+ }
+
+ ret = _rsa_enc(pkey, c, m);
+ if (ret)
+ goto err_free_m;
+
+ ret = mpi_read_buffer(c, req->dst, req->dst_len, &req->dst_len, &sign);
+ if (ret)
+ goto err_free_m;
+
+ if (sign < 0) {
+ ret = -EBADMSG;
+ goto err_free_m;
+ }
+
+err_free_m:
+ mpi_free(m);
+err_free_c:
+ mpi_free(c);
+ return ret;
+}
+
+static int rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ const struct rsa_key *pkey = rsa_get_key(tfm);
+ MPI c, m = mpi_alloc(0);
+ int ret = 0;
+ int sign;
+
+ if (!m)
+ return -ENOMEM;
+
+ if (unlikely(!pkey->n || !pkey->d)) {
+ ret = -EINVAL;
+ goto err_free_m;
+ }
+
+ if (req->dst_len < mpi_get_size(pkey->n)) {
+ req->dst_len = mpi_get_size(pkey->n);
+ ret = -EOVERFLOW;
+ goto err_free_m;
+ }
+
+ c = mpi_read_raw_data(req->src, req->src_len);
+ if (!c) {
+ ret = -ENOMEM;
+ goto err_free_m;
+ }
+
+ ret = _rsa_dec(pkey, m, c);
+ if (ret)
+ goto err_free_c;
+
+ ret = mpi_read_buffer(m, req->dst, req->dst_len, &req->dst_len, &sign);
+ if (ret)
+ goto err_free_c;
+
+ if (sign < 0) {
+ ret = -EBADMSG;
+ goto err_free_c;
+ }
+
+err_free_c:
+ mpi_free(c);
+err_free_m:
+ mpi_free(m);
+ return ret;
+}
+
+static int rsa_sign(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ const struct rsa_key *pkey = rsa_get_key(tfm);
+ MPI m, s = mpi_alloc(0);
+ int ret = 0;
+ int sign;
+
+ if (!s)
+ return -ENOMEM;
+
+ if (unlikely(!pkey->n || !pkey->d)) {
+ ret = -EINVAL;
+ goto err_free_s;
+ }
+
+ if (req->dst_len < mpi_get_size(pkey->n)) {
+ req->dst_len = mpi_get_size(pkey->n);
+ ret = -EOVERFLOW;
+ goto err_free_s;
+ }
+
+ m = mpi_read_raw_data(req->src, req->src_len);
+ if (!m) {
+ ret = -ENOMEM;
+ goto err_free_s;
+ }
+
+ ret = _rsa_sign(pkey, s, m);
+ if (ret)
+ goto err_free_m;
+
+ ret = mpi_read_buffer(s, req->dst, req->dst_len, &req->dst_len, &sign);
+ if (ret)
+ goto err_free_m;
+
+ if (sign < 0) {
+ ret = -EBADMSG;
+ goto err_free_m;
+ }
+
+err_free_m:
+ mpi_free(m);
+err_free_s:
+ mpi_free(s);
+ return ret;
+}
+
+static int rsa_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ const struct rsa_key *pkey = rsa_get_key(tfm);
+ MPI s, m = mpi_alloc(0);
+ int ret = 0;
+ int sign;
+
+ if (!m)
+ return -ENOMEM;
+
+ if (unlikely(!pkey->n || !pkey->e)) {
+ ret = -EINVAL;
+ goto err_free_m;
+ }
+
+ if (req->dst_len < mpi_get_size(pkey->n)) {
+ req->dst_len = mpi_get_size(pkey->n);
+ ret = -EOVERFLOW;
+ goto err_free_m;
+ }
+
+ s = mpi_read_raw_data(req->src, req->src_len);
+ if (!s) {
+ ret = -ENOMEM;
+ goto err_free_m;
+ }
+
+ ret = _rsa_verify(pkey, m, s);
+ if (ret)
+ goto err_free_s;
+
+ ret = mpi_read_buffer(m, req->dst, req->dst_len, &req->dst_len, &sign);
+ if (ret)
+ goto err_free_s;
+
+ if (sign < 0) {
+ ret = -EBADMSG;
+ goto err_free_s;
+ }
+
+err_free_s:
+ mpi_free(s);
+err_free_m:
+ mpi_free(m);
+ return ret;
+}
+
+static int rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+
+ return rsa_parse_key(pkey, key, keylen);
+}
+
+static void rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+
+ rsa_free_key(pkey);
+}
+
+static struct akcipher_alg rsa = {
+ .encrypt = rsa_enc,
+ .decrypt = rsa_dec,
+ .sign = rsa_sign,
+ .verify = rsa_verify,
+ .setkey = rsa_setkey,
+ .exit = rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct rsa_key),
+ },
+};
+
+static int rsa_init(void)
+{
+ return crypto_register_akcipher(&rsa);
+}
+
+static void rsa_exit(void)
+{
+ crypto_unregister_akcipher(&rsa);
+}
+
+module_init(rsa_init);
+module_exit(rsa_exit);
+MODULE_ALIAS_CRYPTO("rsa");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RSA generic algorithm");
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
new file mode 100644
index 000000000000..3e8e0a9e5a8e
--- /dev/null
+++ b/crypto/rsa_helper.c
@@ -0,0 +1,121 @@
+/*
+ * RSA key extract helper
+ *
+ * Copyright (c) 2015, Intel Corporation
+ * Authors: Tadeusz Struk
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include
+#include
+#include
+#include
+#include
+#include "rsakey-asn1.h"
+
+int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct rsa_key *key = context;
+
+ key->n = mpi_read_raw_data(value, vlen);
+
+ if (!key->n)
+ return -ENOMEM;
+
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (fips_enabled && (mpi_get_size(key->n) != 256 ||
+ mpi_get_size(key->n) != 384)) {
+ pr_err("RSA: key size not allowed in FIPS mode\n");
+ mpi_free(key->n);
+ key->n = NULL;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct rsa_key *key = context;
+
+ key->e = mpi_read_raw_data(value, vlen);
+
+ if (!key->e)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct rsa_key *key = context;
+
+ key->d = mpi_read_raw_data(value, vlen);
+
+ if (!key->d)
+ return -ENOMEM;
+
+ /* In FIPS mode only allow key size 2K & 3K */
+ if (fips_enabled && (mpi_get_size(key->d) != 256 ||
+ mpi_get_size(key->d) != 384)) {
+ pr_err("RSA: key size not allowed in FIPS mode\n");
+ mpi_free(key->d);
+ key->d = NULL;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void free_mpis(struct rsa_key *key)
+{
+ mpi_free(key->n);
+ mpi_free(key->e);
+ mpi_free(key->d);
+ key->n = NULL;
+ key->e = NULL;
+ key->d = NULL;
+}
+
+/**
+ * rsa_free_key() - frees rsa key allocated by rsa_parse_key()
+ *
+ * @rsa_key: struct rsa_key key representation
+ */
+void rsa_free_key(struct rsa_key *key)
+{
+ free_mpis(key);
+}
+EXPORT_SYMBOL_GPL(rsa_free_key);
+
+/**
+ * rsa_parse_key() - extracts an rsa key from BER encoded buffer
+ * and stores it in the provided struct rsa_key
+ *
+ * @rsa_key: struct rsa_key key representation
+ * @key: key in BER format
+ * @key_len: length of key
+ *
+ * Return: 0 on success or error code in case of error
+ */
+int rsa_parse_key(struct rsa_key *rsa_key, const void *key,
+ unsigned int key_len)
+{
+ int ret;
+
+ free_mpis(rsa_key);
+ ret = asn1_ber_decoder(&rsakey_decoder, rsa_key, key, key_len);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+error:
+ free_mpis(rsa_key);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rsa_parse_key);
diff --git a/crypto/rsakey.asn1 b/crypto/rsakey.asn1
new file mode 100644
index 000000000000..3c7b5df7b428
--- /dev/null
+++ b/crypto/rsakey.asn1
@@ -0,0 +1,5 @@
+RsaKey ::= SEQUENCE {
+ n INTEGER ({ rsa_get_n }),
+ e INTEGER ({ rsa_get_e }),
+ d INTEGER ({ rsa_get_d })
+}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 3bd749c7bb70..ea5815c5e128 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -54,7 +54,11 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
struct page *page;
page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- if (!PageSlab(page))
+ /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
+ * PageSlab cannot be optimised away per se due to
+ * use of volatile pointer.
+ */
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
flush_dcache_page(page);
}
@@ -104,22 +108,18 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out)
{
struct scatter_walk walk;
- unsigned int offset = 0;
+ struct scatterlist tmp[2];
if (!nbytes)
return;
- for (;;) {
- scatterwalk_start(&walk, sg);
+ sg = scatterwalk_ffwd(tmp, sg, start);
- if (start < offset + sg->length)
- break;
+ if (sg_page(sg) == virt_to_page(buf) &&
+ sg->offset == offset_in_page(buf))
+ return;
- offset += sg->length;
- sg = sg_next(sg);
- }
-
- scatterwalk_advance(&walk, start - offset);
+ scatterwalk_start(&walk, sg);
scatterwalk_copychunks(buf, &walk, nbytes, out);
scatterwalk_done(&walk, out, 0);
}
@@ -146,3 +146,26 @@ int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
return n;
}
EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen);
+
+struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
+ struct scatterlist *src,
+ unsigned int len)
+{
+ for (;;) {
+ if (!len)
+ return src;
+
+ if (src->length > len)
+ break;
+
+ len -= src->length;
+ src = sg_next(src);
+ }
+
+ sg_init_table(dst, 2);
+ sg_set_page(dst, sg_page(src), src->length - len, src->offset + len);
+ scatterwalk_crypto_chain(dst, sg_next(src), 0, 2);
+
+ return dst;
+}
+EXPORT_SYMBOL_GPL(scatterwalk_ffwd);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index b7bb9a2f4a31..122c56e3491b 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -13,9 +13,11 @@
*
*/
-#include
+#include
#include
+#include
#include
+#include
#include
#include
#include
@@ -24,11 +26,25 @@
#include
#include
+struct seqniv_request_ctx {
+ struct scatterlist dst[2];
+ struct aead_request subreq;
+};
+
struct seqiv_ctx {
spinlock_t lock;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
+struct seqiv_aead_ctx {
+ /* aead_geniv_ctx must be first the element */
+ struct aead_geniv_ctx geniv;
+ struct crypto_blkcipher *null;
+ u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
+};
+
+static void seqiv_free(struct crypto_instance *inst);
+
static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
{
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
@@ -81,6 +97,77 @@ static void seqiv_aead_complete(struct crypto_async_request *base, int err)
aead_givcrypt_complete(req, err);
}
+static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
+{
+ struct aead_request *subreq = aead_request_ctx(req);
+ struct crypto_aead *geniv;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ if (err)
+ goto out;
+
+ geniv = crypto_aead_reqtfm(req);
+ memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv));
+
+out:
+ kzfree(subreq->iv);
+}
+
+static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
+ int err)
+{
+ struct aead_request *req = base->data;
+
+ seqiv_aead_encrypt_complete2(req, err);
+ aead_request_complete(req, err);
+}
+
+static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err)
+{
+ unsigned int ivsize = 8;
+ u8 data[20];
+
+ if (err == -EINPROGRESS)
+ return;
+
+ /* Swap IV and ESP header back to correct order. */
+ scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0);
+ scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1);
+ scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1);
+}
+
+static void seqniv_aead_encrypt_complete(struct crypto_async_request *base,
+ int err)
+{
+ struct aead_request *req = base->data;
+
+ seqniv_aead_encrypt_complete2(req, err);
+ aead_request_complete(req, err);
+}
+
+static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err)
+{
+ u8 data[4];
+
+ if (err == -EINPROGRESS)
+ return;
+
+ /* Move ESP header back to correct location. */
+ scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0);
+ scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1);
+}
+
+static void seqniv_aead_decrypt_complete(struct crypto_async_request *base,
+ int err)
+{
+ struct aead_request *req = base->data;
+
+ seqniv_aead_decrypt_complete2(req, err);
+ aead_request_complete(req, err);
+}
+
static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
unsigned int ivsize)
{
@@ -186,160 +273,477 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
return err;
}
-static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
+static int seqniv_aead_encrypt(struct aead_request *req)
{
- struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
- struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
- int err = 0;
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+ struct seqniv_request_ctx *rctx = aead_request_ctx(req);
+ struct aead_request *subreq = &rctx->subreq;
+ struct scatterlist *dst;
+ crypto_completion_t compl;
+ void *data;
+ unsigned int ivsize = 8;
+ u8 buf[20] __attribute__ ((aligned(__alignof__(u32))));
+ int err;
- spin_lock_bh(&ctx->lock);
- if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first)
- goto unlock;
+ if (req->cryptlen < ivsize)
+ return -EINVAL;
- crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
- crypto_ablkcipher_ivsize(geniv));
+ /* ESP AD is at most 12 bytes (ESN). */
+ if (req->assoclen > 12)
+ return -EINVAL;
-unlock:
- spin_unlock_bh(&ctx->lock);
+ aead_request_set_tfm(subreq, ctx->geniv.child);
- if (err)
- return err;
+ compl = seqniv_aead_encrypt_complete;
+ data = req;
- return seqiv_givencrypt(req);
+ if (req->src != req->dst) {
+ struct blkcipher_desc desc = {
+ .tfm = ctx->null,
+ };
+
+ err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
+ req->assoclen + req->cryptlen);
+ if (err)
+ return err;
+ }
+
+ dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
+
+ aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_crypt(subreq, dst, dst,
+ req->cryptlen - ivsize, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ memcpy(buf, req->iv, ivsize);
+ crypto_xor(buf, ctx->salt, ivsize);
+ memcpy(req->iv, buf, ivsize);
+
+ /* Swap order of IV and ESP AD for ICV generation. */
+ scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0);
+ scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1);
+
+ err = crypto_aead_encrypt(subreq);
+ seqniv_aead_encrypt_complete2(req, err);
+ return err;
}
-static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
+static int seqiv_aead_encrypt(struct aead_request *req)
{
- struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
- struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
- int err = 0;
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+ struct aead_request *subreq = aead_request_ctx(req);
+ crypto_completion_t compl;
+ void *data;
+ u8 *info;
+ unsigned int ivsize = 8;
+ int err;
- spin_lock_bh(&ctx->lock);
- if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
- goto unlock;
+ if (req->cryptlen < ivsize)
+ return -EINVAL;
- crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
- crypto_aead_ivsize(geniv));
+ aead_request_set_tfm(subreq, ctx->geniv.child);
-unlock:
- spin_unlock_bh(&ctx->lock);
+ compl = req->base.complete;
+ data = req->base.data;
+ info = req->iv;
- if (err)
- return err;
+ if (req->src != req->dst) {
+ struct blkcipher_desc desc = {
+ .tfm = ctx->null,
+ };
- return seqiv_aead_givencrypt(req);
+ err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
+ req->assoclen + req->cryptlen);
+ if (err)
+ return err;
+ }
+
+ if (unlikely(!IS_ALIGNED((unsigned long)info,
+ crypto_aead_alignmask(geniv) + 1))) {
+ info = kmalloc(ivsize, req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
+ GFP_ATOMIC);
+ if (!info)
+ return -ENOMEM;
+
+ memcpy(info, req->iv, ivsize);
+ compl = seqiv_aead_encrypt_complete;
+ data = req;
+ }
+
+ aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_crypt(subreq, req->dst, req->dst,
+ req->cryptlen - ivsize, info);
+ aead_request_set_ad(subreq, req->assoclen + ivsize);
+
+ crypto_xor(info, ctx->salt, ivsize);
+ scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
+
+ err = crypto_aead_encrypt(subreq);
+ if (unlikely(info != req->iv))
+ seqiv_aead_encrypt_complete2(req, err);
+ return err;
+}
+
+static int seqniv_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+ struct seqniv_request_ctx *rctx = aead_request_ctx(req);
+ struct aead_request *subreq = &rctx->subreq;
+ struct scatterlist *dst;
+ crypto_completion_t compl;
+ void *data;
+ unsigned int ivsize = 8;
+ u8 buf[20];
+ int err;
+
+ if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
+ return -EINVAL;
+
+ aead_request_set_tfm(subreq, ctx->geniv.child);
+
+ compl = req->base.complete;
+ data = req->base.data;
+
+ if (req->assoclen > 12)
+ return -EINVAL;
+ else if (req->assoclen > 8) {
+ compl = seqniv_aead_decrypt_complete;
+ data = req;
+ }
+
+ if (req->src != req->dst) {
+ struct blkcipher_desc desc = {
+ .tfm = ctx->null,
+ };
+
+ err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
+ req->assoclen + req->cryptlen);
+ if (err)
+ return err;
+ }
+
+ /* Move ESP AD forward for ICV generation. */
+ scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0);
+ memcpy(req->iv, buf + req->assoclen, ivsize);
+ scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1);
+
+ dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
+
+ aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_crypt(subreq, dst, dst,
+ req->cryptlen - ivsize, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ err = crypto_aead_decrypt(subreq);
+ if (req->assoclen > 8)
+ seqniv_aead_decrypt_complete2(req, err);
+ return err;
+}
+
+static int seqiv_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *geniv = crypto_aead_reqtfm(req);
+ struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+ struct aead_request *subreq = aead_request_ctx(req);
+ crypto_completion_t compl;
+ void *data;
+ unsigned int ivsize = 8;
+
+ if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
+ return -EINVAL;
+
+ aead_request_set_tfm(subreq, ctx->geniv.child);
+
+ compl = req->base.complete;
+ data = req->base.data;
+
+ aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen - ivsize, req->iv);
+ aead_request_set_ad(subreq, req->assoclen + ivsize);
+
+ scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
+ if (req->src != req->dst)
+ scatterwalk_map_and_copy(req->iv, req->dst,
+ req->assoclen, ivsize, 1);
+
+ return crypto_aead_decrypt(subreq);
}
static int seqiv_init(struct crypto_tfm *tfm)
{
struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
+ int err;
spin_lock_init(&ctx->lock);
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
- return skcipher_geniv_init(tfm);
+ err = 0;
+ if (!crypto_get_default_rng()) {
+ crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
+ err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+ crypto_ablkcipher_ivsize(geniv));
+ crypto_put_default_rng();
+ }
+
+ return err ?: skcipher_geniv_init(tfm);
+}
+
+static int seqiv_old_aead_init(struct crypto_tfm *tfm)
+{
+ struct crypto_aead *geniv = __crypto_aead_cast(tfm);
+ struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
+ int err;
+
+ spin_lock_init(&ctx->lock);
+
+ crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+ sizeof(struct aead_request));
+ err = 0;
+ if (!crypto_get_default_rng()) {
+ geniv->givencrypt = seqiv_aead_givencrypt;
+ err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+ crypto_aead_ivsize(geniv));
+ crypto_put_default_rng();
+ }
+
+ return err ?: aead_geniv_init(tfm);
+}
+
+static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize)
+{
+ struct crypto_aead *geniv = __crypto_aead_cast(tfm);
+ struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+ int err;
+
+ spin_lock_init(&ctx->geniv.lock);
+
+ crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
+
+ err = crypto_get_default_rng();
+ if (err)
+ goto out;
+
+ err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+ crypto_aead_ivsize(geniv));
+ crypto_put_default_rng();
+ if (err)
+ goto out;
+
+ ctx->null = crypto_get_default_null_skcipher();
+ err = PTR_ERR(ctx->null);
+ if (IS_ERR(ctx->null))
+ goto out;
+
+ err = aead_geniv_init(tfm);
+ if (err)
+ goto drop_null;
+
+ ctx->geniv.child = geniv->child;
+ geniv->child = geniv;
+
+out:
+ return err;
+
+drop_null:
+ crypto_put_default_null_skcipher();
+ goto out;
}
static int seqiv_aead_init(struct crypto_tfm *tfm)
{
- struct crypto_aead *geniv = __crypto_aead_cast(tfm);
- struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-
- spin_lock_init(&ctx->lock);
-
- tfm->crt_aead.reqsize = sizeof(struct aead_request);
-
- return aead_geniv_init(tfm);
+ return seqiv_aead_init_common(tfm, sizeof(struct aead_request));
}
-static struct crypto_template seqiv_tmpl;
+static int seqniv_aead_init(struct crypto_tfm *tfm)
+{
+ return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx));
+}
-static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
+static void seqiv_aead_exit(struct crypto_tfm *tfm)
+{
+ struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_aead(ctx->geniv.child);
+ crypto_put_default_null_skcipher();
+}
+
+static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
+ struct rtattr **tb)
{
struct crypto_instance *inst;
+ int err;
- inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
+ inst = skcipher_geniv_alloc(tmpl, tb, 0, 0);
if (IS_ERR(inst))
- goto out;
+ return PTR_ERR(inst);
- if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) {
- skcipher_geniv_free(inst);
- inst = ERR_PTR(-EINVAL);
- goto out;
- }
-
- inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
+ err = -EINVAL;
+ if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64))
+ goto free_inst;
inst->alg.cra_init = seqiv_init;
inst->alg.cra_exit = skcipher_geniv_exit;
inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
+ inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
+
+ inst->alg.cra_alignmask |= __alignof__(u32) - 1;
+
+ err = crypto_register_instance(tmpl, inst);
+ if (err)
+ goto free_inst;
out:
- return inst;
+ return err;
+
+free_inst:
+ skcipher_geniv_free(inst);
+ goto out;
}
-static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
+static int seqiv_old_aead_create(struct crypto_template *tmpl,
+ struct aead_instance *aead)
{
- struct crypto_instance *inst;
+ struct crypto_instance *inst = aead_crypto_instance(aead);
+ int err = -EINVAL;
- inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
+ if (inst->alg.cra_aead.ivsize < sizeof(u64))
+ goto free_inst;
- if (IS_ERR(inst))
- goto out;
-
- if (inst->alg.cra_aead.ivsize < sizeof(u64)) {
- aead_geniv_free(inst);
- inst = ERR_PTR(-EINVAL);
- goto out;
- }
-
- inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
-
- inst->alg.cra_init = seqiv_aead_init;
+ inst->alg.cra_init = seqiv_old_aead_init;
inst->alg.cra_exit = aead_geniv_exit;
inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
+ inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
+
+ err = crypto_register_instance(tmpl, inst);
+ if (err)
+ goto free_inst;
out:
- return inst;
+ return err;
+
+free_inst:
+ aead_geniv_free(aead);
+ goto out;
}
-static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
+static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct aead_instance *inst;
+ struct crypto_aead_spawn *spawn;
+ struct aead_alg *alg;
+ int err;
+
+ inst = aead_geniv_alloc(tmpl, tb, 0, 0);
+
+ if (IS_ERR(inst))
+ return PTR_ERR(inst);
+
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+
+ if (inst->alg.base.cra_aead.encrypt)
+ return seqiv_old_aead_create(tmpl, inst);
+
+ spawn = aead_instance_ctx(inst);
+ alg = crypto_spawn_aead_alg(spawn);
+
+ if (alg->base.cra_aead.encrypt)
+ goto done;
+
+ err = -EINVAL;
+ if (inst->alg.ivsize != sizeof(u64))
+ goto free_inst;
+
+ inst->alg.encrypt = seqiv_aead_encrypt;
+ inst->alg.decrypt = seqiv_aead_decrypt;
+
+ inst->alg.base.cra_init = seqiv_aead_init;
+ inst->alg.base.cra_exit = seqiv_aead_exit;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
+ inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
+
+done:
+ err = aead_register_instance(tmpl, inst);
+ if (err)
+ goto free_inst;
+
+out:
+ return err;
+
+free_inst:
+ aead_geniv_free(inst);
+ goto out;
+}
+
+static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
- struct crypto_instance *inst;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_CAST(algt);
-
- err = crypto_get_default_rng();
- if (err)
- return ERR_PTR(err);
+ return PTR_ERR(algt);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
- inst = seqiv_ablkcipher_alloc(tb);
+ err = seqiv_ablkcipher_create(tmpl, tb);
else
- inst = seqiv_aead_alloc(tb);
+ err = seqiv_aead_create(tmpl, tb);
+ return err;
+}
+
+static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct aead_instance *inst;
+ struct crypto_aead_spawn *spawn;
+ struct aead_alg *alg;
+ int err;
+
+ inst = aead_geniv_alloc(tmpl, tb, 0, 0);
+ err = PTR_ERR(inst);
if (IS_ERR(inst))
- goto put_rng;
+ goto out;
- inst->alg.cra_alignmask |= __alignof__(u32) - 1;
- inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
+ spawn = aead_instance_ctx(inst);
+ alg = crypto_spawn_aead_alg(spawn);
+
+ if (alg->base.cra_aead.encrypt)
+ goto done;
+
+ err = -EINVAL;
+ if (inst->alg.ivsize != sizeof(u64))
+ goto free_inst;
+
+ inst->alg.encrypt = seqniv_aead_encrypt;
+ inst->alg.decrypt = seqniv_aead_decrypt;
+
+ inst->alg.base.cra_init = seqniv_aead_init;
+ inst->alg.base.cra_exit = seqiv_aead_exit;
+
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+ inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
+ inst->alg.base.cra_ctxsize += inst->alg.ivsize;
+
+done:
+ err = aead_register_instance(tmpl, inst);
+ if (err)
+ goto free_inst;
out:
- return inst;
+ return err;
-put_rng:
- crypto_put_default_rng();
+free_inst:
+ aead_geniv_free(inst);
goto out;
}
@@ -348,24 +752,46 @@ static void seqiv_free(struct crypto_instance *inst)
if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
skcipher_geniv_free(inst);
else
- aead_geniv_free(inst);
- crypto_put_default_rng();
+ aead_geniv_free(aead_instance(inst));
}
static struct crypto_template seqiv_tmpl = {
.name = "seqiv",
- .alloc = seqiv_alloc,
+ .create = seqiv_create,
+ .free = seqiv_free,
+ .module = THIS_MODULE,
+};
+
+static struct crypto_template seqniv_tmpl = {
+ .name = "seqniv",
+ .create = seqniv_create,
.free = seqiv_free,
.module = THIS_MODULE,
};
static int __init seqiv_module_init(void)
{
- return crypto_register_template(&seqiv_tmpl);
+ int err;
+
+ err = crypto_register_template(&seqiv_tmpl);
+ if (err)
+ goto out;
+
+ err = crypto_register_template(&seqniv_tmpl);
+ if (err)
+ goto out_undo_niv;
+
+out:
+ return err;
+
+out_undo_niv:
+ crypto_unregister_template(&seqiv_tmpl);
+ goto out;
}
static void __exit seqiv_module_exit(void)
{
+ crypto_unregister_template(&seqniv_tmpl);
crypto_unregister_template(&seqiv_tmpl);
}
@@ -375,3 +801,4 @@ module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Sequence Number IV Generator");
MODULE_ALIAS_CRYPTO("seqiv");
+MODULE_ALIAS_CRYPTO("seqniv");
diff --git a/crypto/shash.c b/crypto/shash.c
index 47c713954bf3..ecb1e3d39bf0 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -520,11 +520,6 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
return 0;
}
-static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
-{
- return alg->cra_ctxsize;
-}
-
#ifdef CONFIG_NET
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
@@ -564,7 +559,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
static const struct crypto_type crypto_shash_type = {
.ctxsize = crypto_shash_ctxsize,
- .extsize = crypto_shash_extsize,
+ .extsize = crypto_alg_extsize,
.init = crypto_init_shash_ops,
.init_tfm = crypto_shash_init_tfm,
#ifdef CONFIG_PROC_FS
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1a2800107fc8..9f6f10b498ba 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -22,8 +22,10 @@
*
*/
+#include
#include
#include
+#include
#include
#include
#include
@@ -34,7 +36,6 @@
#include
#include
#include "tcrypt.h"
-#include "internal.h"
/*
* Need slab memory for testing (size in number of pages).
@@ -257,12 +258,12 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
rem = buflen % PAGE_SIZE;
}
- sg_init_table(sg, np);
+ sg_init_table(sg, np + 1);
np--;
for (k = 0; k < np; k++)
- sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE);
+ sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
- sg_set_buf(&sg[k], xbuf[k], rem);
+ sg_set_buf(&sg[k + 1], xbuf[k], rem);
}
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
@@ -276,7 +277,6 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
const char *key;
struct aead_request *req;
struct scatterlist *sg;
- struct scatterlist *asg;
struct scatterlist *sgout;
const char *e;
void *assoc;
@@ -308,11 +308,10 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
if (testmgr_alloc_buf(xoutbuf))
goto out_nooutbuf;
- sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL);
+ sg = kmalloc(sizeof(*sg) * 9 * 2, GFP_KERNEL);
if (!sg)
goto out_nosg;
- asg = &sg[8];
- sgout = &asg[8];
+ sgout = &sg[9];
tfm = crypto_alloc_aead(algo, 0, 0);
@@ -338,7 +337,6 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
do {
assoc = axbuf[0];
memset(assoc, 0xff, aad_size);
- sg_init_one(&asg[0], assoc, aad_size);
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
@@ -374,14 +372,17 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
goto out;
}
- sg_init_aead(&sg[0], xbuf,
+ sg_init_aead(sg, xbuf,
*b_size + (enc ? authsize : 0));
- sg_init_aead(&sgout[0], xoutbuf,
+ sg_init_aead(sgout, xoutbuf,
*b_size + (enc ? authsize : 0));
+ sg_set_buf(&sg[0], assoc, aad_size);
+ sg_set_buf(&sgout[0], assoc, aad_size);
+
aead_request_set_crypt(req, sg, sgout, *b_size, iv);
- aead_request_set_assoc(req, asg, aad_size);
+ aead_request_set_ad(req, aad_size);
if (secs)
ret = test_aead_jiffies(req, enc, *b_size,
@@ -808,7 +809,7 @@ static int test_ahash_jiffies(struct ahash_request *req, int blen,
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
- ret = crypto_ahash_init(req);
+ ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
return ret;
for (pcount = 0; pcount < blen; pcount += plen) {
@@ -877,7 +878,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen,
/* Warm-up run. */
for (i = 0; i < 4; i++) {
- ret = crypto_ahash_init(req);
+ ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
@@ -896,7 +897,7 @@ static int test_ahash_cycles(struct ahash_request *req, int blen,
start = get_cycles();
- ret = crypto_ahash_init(req);
+ ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
@@ -1761,6 +1762,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
NULL, 0, 16, 8, aead_speed_template_20);
break;
+ case 212:
+ test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
+ NULL, 0, 16, 8, aead_speed_template_19);
+ break;
+
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 6c7e21a09f78..6cc1b856871b 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -65,6 +65,7 @@ static u8 speed_template_32_64[] = {32, 64, 0};
/*
* AEAD speed tests
*/
+static u8 aead_speed_template_19[] = {19, 0};
static u8 aead_speed_template_20[] = {20, 0};
/*
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index f9bce3d7ee7f..975e1eac3e2d 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -20,14 +20,17 @@
*
*/
+#include
#include
#include
+#include
#include
#include