Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - add AEAD support to crypto engine - allow batch registration in simd Algorithms: - add CFB mode - add speck block cipher - add sm4 block cipher - new test case for crct10dif - improve scheduling latency on ARM - scatter/gather support to gcm in aesni - convert x86 crypto algorithms to skcihper Drivers: - hmac(sha224/sha256) support in inside-secure - aes gcm/ccm support in stm32 - stm32mp1 support in stm32 - ccree driver from staging tree - gcm support over QI in caam - add ks-sa hwrng driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (212 commits) crypto: ccree - remove unused enums crypto: ahash - Fix early termination in hash walk crypto: brcm - explicitly cast cipher to hash type crypto: talitos - don't leak pointers to authenc keys crypto: qat - don't leak pointers to authenc keys crypto: picoxcell - don't leak pointers to authenc keys crypto: ixp4xx - don't leak pointers to authenc keys crypto: chelsio - don't leak pointers to authenc keys crypto: caam/qi - don't leak pointers to authenc keys crypto: caam - don't leak pointers to authenc keys crypto: lrw - Free rctx->ext with kzfree crypto: talitos - fix IPsec cipher in length crypto: Deduplicate le32_to_cpu_array() and cpu_to_le32_array() crypto: doc - clarify hash callbacks state machine crypto: api - Keep failed instances alive crypto: api - Make crypto_alg_lookup static crypto: api - Remove unused crypto_type lookup function crypto: chelsio - Remove declaration of static function from header crypto: inside-secure - hmac(sha224) support crypto: inside-secure - hmac(sha256) support ..
This commit is contained in:
commit
9eb31227cb
48
Documentation/crypto/crypto_engine.rst
Normal file
48
Documentation/crypto/crypto_engine.rst
Normal file
@ -0,0 +1,48 @@
|
||||
=============
|
||||
CRYPTO ENGINE
|
||||
=============
|
||||
|
||||
Overview
|
||||
--------
|
||||
The crypto engine API (CE), is a crypto queue manager.
|
||||
|
||||
Requirement
|
||||
-----------
|
||||
You have to put at start of your tfm_ctx the struct crypto_engine_ctx
|
||||
struct your_tfm_ctx {
|
||||
struct crypto_engine_ctx enginectx;
|
||||
...
|
||||
};
|
||||
Why: Since CE manage only crypto_async_request, it cannot know the underlying
|
||||
request_type and so have access only on the TFM.
|
||||
So using container_of for accessing __ctx is impossible.
|
||||
Furthermore, the crypto engine cannot know the "struct your_tfm_ctx",
|
||||
so it must assume that crypto_engine_ctx is at start of it.
|
||||
|
||||
Order of operations
|
||||
-------------------
|
||||
You have to obtain a struct crypto_engine via crypto_engine_alloc_init().
|
||||
And start it via crypto_engine_start().
|
||||
|
||||
Before transferring any request, you have to fill the enginectx.
|
||||
- prepare_request: (taking a function pointer) If you need to do some processing before doing the request
|
||||
- unprepare_request: (taking a function pointer) Undoing what's done in prepare_request
|
||||
- do_one_request: (taking a function pointer) Do encryption for current request
|
||||
|
||||
Note: that those three functions get the crypto_async_request associated with the received request.
|
||||
So your need to get the original request via container_of(areq, struct yourrequesttype_request, base);
|
||||
|
||||
When your driver receive a crypto_request, you have to transfer it to
|
||||
the cryptoengine via one of:
|
||||
- crypto_transfer_ablkcipher_request_to_engine()
|
||||
- crypto_transfer_aead_request_to_engine()
|
||||
- crypto_transfer_akcipher_request_to_engine()
|
||||
- crypto_transfer_hash_request_to_engine()
|
||||
- crypto_transfer_skcipher_request_to_engine()
|
||||
|
||||
At the end of the request process, a call to one of the following function is needed:
|
||||
- crypto_finalize_ablkcipher_request
|
||||
- crypto_finalize_aead_request
|
||||
- crypto_finalize_akcipher_request
|
||||
- crypto_finalize_hash_request
|
||||
- crypto_finalize_skcipher_request
|
@ -236,6 +236,14 @@ when used from another part of the kernel.
|
||||
|
|
||||
'---------------> HASH2
|
||||
|
||||
Note that it is perfectly legal to "abandon" a request object:
|
||||
- call .init() and then (as many times) .update()
|
||||
- _not_ call any of .final(), .finup() or .export() at any point in future
|
||||
|
||||
In other words implementations should mind the resource allocation and clean-up.
|
||||
No resources related to request objects should remain allocated after a call
|
||||
to .init() or .update(), since there might be no chance to free them.
|
||||
|
||||
|
||||
Specifics Of Asynchronous HASH Transformation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -1,7 +1,8 @@
|
||||
Arm TrustZone CryptoCell cryptographic engine
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "arm,cryptocell-712-ree".
|
||||
- compatible: Should be one of: "arm,cryptocell-712-ree",
|
||||
"arm,cryptocell-710-ree" or "arm,cryptocell-630p-ree".
|
||||
- reg: Base physical address of the engine and length of memory mapped region.
|
||||
- interrupts: Interrupt number for the device.
|
||||
|
||||
|
@ -8,7 +8,11 @@ Required properties:
|
||||
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
|
||||
|
||||
Optional properties:
|
||||
- clocks: Reference to the crypto engine clock.
|
||||
- clocks: Reference to the crypto engine clocks, the second clock is
|
||||
needed for the Armada 7K/8K SoCs.
|
||||
- clock-names: mandatory if there is a second clock, in this case the
|
||||
name must be "core" for the first clock and "reg" for
|
||||
the second one.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -1,15 +1,14 @@
|
||||
Freescale RNGC (Random Number Generator Version C)
|
||||
|
||||
The driver also supports version B, which is mostly compatible
|
||||
to version C.
|
||||
Freescale RNGA/RNGB/RNGC (Random Number Generator Versions A, B and C)
|
||||
|
||||
Required properties:
|
||||
- compatible : should be one of
|
||||
"fsl,imx21-rnga"
|
||||
"fsl,imx31-rnga" (backward compatible with "fsl,imx21-rnga")
|
||||
"fsl,imx25-rngb"
|
||||
"fsl,imx35-rngc"
|
||||
- reg : offset and length of the register set of this block
|
||||
- interrupts : the interrupt number for the RNGC block
|
||||
- clocks : the RNGC clk source
|
||||
- interrupts : the interrupt number for the RNG block
|
||||
- clocks : the RNG clk source
|
||||
|
||||
Example:
|
||||
|
21
Documentation/devicetree/bindings/rng/ks-sa-rng.txt
Normal file
21
Documentation/devicetree/bindings/rng/ks-sa-rng.txt
Normal file
@ -0,0 +1,21 @@
|
||||
Keystone SoC Hardware Random Number Generator(HWRNG) Module
|
||||
|
||||
On Keystone SoCs HWRNG module is a submodule of the Security Accelerator.
|
||||
|
||||
- compatible: should be "ti,keystone-rng"
|
||||
- ti,syscon-sa-cfg: phandle to syscon node of the SA configuration registers.
|
||||
This registers are shared between hwrng and crypto drivers.
|
||||
- clocks: phandle to the reference clocks for the subsystem
|
||||
- clock-names: functional clock name. Should be set to "fck"
|
||||
- reg: HWRNG module register space
|
||||
|
||||
Example:
|
||||
/* K2HK */
|
||||
|
||||
rng@24000 {
|
||||
compatible = "ti,keystone-rng";
|
||||
ti,syscon-sa-cfg = <&sa_config>;
|
||||
clocks = <&clksa>;
|
||||
clock-names = "fck";
|
||||
reg = <0x24000 0x1000>;
|
||||
};
|
@ -13,7 +13,12 @@ Required properties:
|
||||
- interrupts : the interrupt number for the RNG module.
|
||||
Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
|
||||
- clocks: the trng clock source. Only mandatory for the
|
||||
"inside-secure,safexcel-eip76" compatible.
|
||||
"inside-secure,safexcel-eip76" compatible, the second clock is
|
||||
needed for the Armada 7K/8K SoCs
|
||||
- clock-names: mandatory if there is a second clock, in this case the
|
||||
name must be "core" for the first clock and "reg" for the second
|
||||
one
|
||||
|
||||
|
||||
Example:
|
||||
/* AM335x */
|
||||
|
@ -11,6 +11,10 @@ Required properties:
|
||||
- interrupts : The designated IRQ line for the RNG
|
||||
- clocks : The clock needed to enable the RNG
|
||||
|
||||
Optional properties:
|
||||
- resets : The reset to properly start RNG
|
||||
- clock-error-detect : Enable the clock detection management
|
||||
|
||||
Example:
|
||||
|
||||
rng: rng@50060800 {
|
||||
|
15
MAINTAINERS
15
MAINTAINERS
@ -3252,12 +3252,11 @@ F: drivers/net/ieee802154/cc2520.c
|
||||
F: include/linux/spi/cc2520.h
|
||||
F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
|
||||
|
||||
CCREE ARM TRUSTZONE CRYPTOCELL 700 REE DRIVER
|
||||
CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER
|
||||
M: Gilad Ben-Yossef <gilad@benyossef.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
L: driverdev-devel@linuxdriverproject.org
|
||||
S: Supported
|
||||
F: drivers/staging/ccree/
|
||||
F: drivers/crypto/ccree/
|
||||
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
|
||||
|
||||
CEC FRAMEWORK
|
||||
@ -6962,7 +6961,7 @@ F: drivers/input/input-mt.c
|
||||
K: \b(ABS|SYN)_MT_
|
||||
|
||||
INSIDE SECURE CRYPTO DRIVER
|
||||
M: Antoine Tenart <antoine.tenart@free-electrons.com>
|
||||
M: Antoine Tenart <antoine.tenart@bootlin.com>
|
||||
F: drivers/crypto/inside-secure/
|
||||
S: Maintained
|
||||
L: linux-crypto@vger.kernel.org
|
||||
@ -7200,6 +7199,14 @@ L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/i40iw/
|
||||
|
||||
INTEL SHA MULTIBUFFER DRIVER
|
||||
M: Megha Dey <megha.dey@linux.intel.com>
|
||||
R: Tim Chen <tim.c.chen@linux.intel.com>
|
||||
L: linux-crypto@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/crypto/sha*-mb
|
||||
F: crypto/mcryptd.c
|
||||
|
||||
INTEL TELEMETRY DRIVER
|
||||
M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
|
@ -121,4 +121,10 @@ config CRYPTO_CHACHA20_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_CHACHA20
|
||||
|
||||
config CRYPTO_SPECK_NEON
|
||||
tristate "NEON accelerated Speck cipher algorithms"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SPECK
|
||||
|
||||
endif
|
||||
|
@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
|
||||
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
|
||||
obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
|
||||
|
||||
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
|
||||
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
|
||||
@ -53,7 +54,9 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
|
||||
crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
|
||||
crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
|
||||
chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
|
||||
speck-neon-y := speck-neon-core.o speck-neon-glue.o
|
||||
|
||||
ifdef REGENERATE_ARM_CRYPTO
|
||||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $(<) > $(@)
|
||||
|
||||
@ -62,5 +65,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
|
||||
|
||||
$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
|
||||
$(call cmd,perl)
|
||||
endif
|
||||
|
||||
.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
|
||||
|
@ -174,6 +174,16 @@
|
||||
.ltorg
|
||||
.endm
|
||||
|
||||
ENTRY(__aes_arm_encrypt)
|
||||
do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
|
||||
ENDPROC(__aes_arm_encrypt)
|
||||
|
||||
.align 5
|
||||
ENTRY(__aes_arm_decrypt)
|
||||
do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
|
||||
ENDPROC(__aes_arm_decrypt)
|
||||
|
||||
.section ".rodata", "a"
|
||||
.align L1_CACHE_SHIFT
|
||||
.type __aes_arm_inverse_sbox, %object
|
||||
__aes_arm_inverse_sbox:
|
||||
@ -210,12 +220,3 @@ __aes_arm_inverse_sbox:
|
||||
.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
|
||||
.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
|
||||
.size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox
|
||||
|
||||
ENTRY(__aes_arm_encrypt)
|
||||
do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
|
||||
ENDPROC(__aes_arm_encrypt)
|
||||
|
||||
.align 5
|
||||
ENTRY(__aes_arm_decrypt)
|
||||
do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
|
||||
ENDPROC(__aes_arm_decrypt)
|
||||
|
432
arch/arm/crypto/speck-neon-core.S
Normal file
432
arch/arm/crypto/speck-neon-core.S
Normal file
@ -0,0 +1,432 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
|
||||
*
|
||||
* Copyright (c) 2018 Google, Inc
|
||||
*
|
||||
* Author: Eric Biggers <ebiggers@google.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
.fpu neon
|
||||
|
||||
// arguments
|
||||
ROUND_KEYS .req r0 // const {u64,u32} *round_keys
|
||||
NROUNDS .req r1 // int nrounds
|
||||
DST .req r2 // void *dst
|
||||
SRC .req r3 // const void *src
|
||||
NBYTES .req r4 // unsigned int nbytes
|
||||
TWEAK .req r5 // void *tweak
|
||||
|
||||
// registers which hold the data being encrypted/decrypted
|
||||
X0 .req q0
|
||||
X0_L .req d0
|
||||
X0_H .req d1
|
||||
Y0 .req q1
|
||||
Y0_H .req d3
|
||||
X1 .req q2
|
||||
X1_L .req d4
|
||||
X1_H .req d5
|
||||
Y1 .req q3
|
||||
Y1_H .req d7
|
||||
X2 .req q4
|
||||
X2_L .req d8
|
||||
X2_H .req d9
|
||||
Y2 .req q5
|
||||
Y2_H .req d11
|
||||
X3 .req q6
|
||||
X3_L .req d12
|
||||
X3_H .req d13
|
||||
Y3 .req q7
|
||||
Y3_H .req d15
|
||||
|
||||
// the round key, duplicated in all lanes
|
||||
ROUND_KEY .req q8
|
||||
ROUND_KEY_L .req d16
|
||||
ROUND_KEY_H .req d17
|
||||
|
||||
// index vector for vtbl-based 8-bit rotates
|
||||
ROTATE_TABLE .req d18
|
||||
|
||||
// multiplication table for updating XTS tweaks
|
||||
GF128MUL_TABLE .req d19
|
||||
GF64MUL_TABLE .req d19
|
||||
|
||||
// current XTS tweak value(s)
|
||||
TWEAKV .req q10
|
||||
TWEAKV_L .req d20
|
||||
TWEAKV_H .req d21
|
||||
|
||||
TMP0 .req q12
|
||||
TMP0_L .req d24
|
||||
TMP0_H .req d25
|
||||
TMP1 .req q13
|
||||
TMP2 .req q14
|
||||
TMP3 .req q15
|
||||
|
||||
.align 4
|
||||
.Lror64_8_table:
|
||||
.byte 1, 2, 3, 4, 5, 6, 7, 0
|
||||
.Lror32_8_table:
|
||||
.byte 1, 2, 3, 0, 5, 6, 7, 4
|
||||
.Lrol64_8_table:
|
||||
.byte 7, 0, 1, 2, 3, 4, 5, 6
|
||||
.Lrol32_8_table:
|
||||
.byte 3, 0, 1, 2, 7, 4, 5, 6
|
||||
.Lgf128mul_table:
|
||||
.byte 0, 0x87
|
||||
.fill 14
|
||||
.Lgf64mul_table:
|
||||
.byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
|
||||
.fill 12
|
||||
|
||||
/*
|
||||
* _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
|
||||
*
|
||||
* Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
|
||||
* Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
|
||||
* of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
|
||||
*
|
||||
* The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
|
||||
* the vtbl approach is faster on some processors and the same speed on others.
|
||||
*/
|
||||
.macro _speck_round_128bytes n
|
||||
|
||||
// x = ror(x, 8)
|
||||
vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
|
||||
vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
|
||||
vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
|
||||
vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
|
||||
vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
|
||||
vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
|
||||
vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
|
||||
vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
|
||||
|
||||
// x += y
|
||||
vadd.u\n X0, Y0
|
||||
vadd.u\n X1, Y1
|
||||
vadd.u\n X2, Y2
|
||||
vadd.u\n X3, Y3
|
||||
|
||||
// x ^= k
|
||||
veor X0, ROUND_KEY
|
||||
veor X1, ROUND_KEY
|
||||
veor X2, ROUND_KEY
|
||||
veor X3, ROUND_KEY
|
||||
|
||||
// y = rol(y, 3)
|
||||
vshl.u\n TMP0, Y0, #3
|
||||
vshl.u\n TMP1, Y1, #3
|
||||
vshl.u\n TMP2, Y2, #3
|
||||
vshl.u\n TMP3, Y3, #3
|
||||
vsri.u\n TMP0, Y0, #(\n - 3)
|
||||
vsri.u\n TMP1, Y1, #(\n - 3)
|
||||
vsri.u\n TMP2, Y2, #(\n - 3)
|
||||
vsri.u\n TMP3, Y3, #(\n - 3)
|
||||
|
||||
// y ^= x
|
||||
veor Y0, TMP0, X0
|
||||
veor Y1, TMP1, X1
|
||||
veor Y2, TMP2, X2
|
||||
veor Y3, TMP3, X3
|
||||
.endm
|
||||
|
||||
/*
|
||||
* _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
|
||||
*
|
||||
* This is the inverse of _speck_round_128bytes().
|
||||
*/
|
||||
.macro _speck_unround_128bytes n
|
||||
|
||||
// y ^= x
|
||||
veor TMP0, Y0, X0
|
||||
veor TMP1, Y1, X1
|
||||
veor TMP2, Y2, X2
|
||||
veor TMP3, Y3, X3
|
||||
|
||||
// y = ror(y, 3)
|
||||
vshr.u\n Y0, TMP0, #3
|
||||
vshr.u\n Y1, TMP1, #3
|
||||
vshr.u\n Y2, TMP2, #3
|
||||
vshr.u\n Y3, TMP3, #3
|
||||
vsli.u\n Y0, TMP0, #(\n - 3)
|
||||
vsli.u\n Y1, TMP1, #(\n - 3)
|
||||
vsli.u\n Y2, TMP2, #(\n - 3)
|
||||
vsli.u\n Y3, TMP3, #(\n - 3)
|
||||
|
||||
// x ^= k
|
||||
veor X0, ROUND_KEY
|
||||
veor X1, ROUND_KEY
|
||||
veor X2, ROUND_KEY
|
||||
veor X3, ROUND_KEY
|
||||
|
||||
// x -= y
|
||||
vsub.u\n X0, Y0
|
||||
vsub.u\n X1, Y1
|
||||
vsub.u\n X2, Y2
|
||||
vsub.u\n X3, Y3
|
||||
|
||||
// x = rol(x, 8);
|
||||
vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
|
||||
vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
|
||||
vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
|
||||
vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
|
||||
vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
|
||||
vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
|
||||
vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
|
||||
vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
|
||||
.endm
|
||||
|
||||
.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
|
||||
|
||||
// Load the next source block
|
||||
vld1.8 {\dst_reg}, [SRC]!
|
||||
|
||||
// Save the current tweak in the tweak buffer
|
||||
vst1.8 {TWEAKV}, [\tweak_buf:128]!
|
||||
|
||||
// XOR the next source block with the current tweak
|
||||
veor \dst_reg, TWEAKV
|
||||
|
||||
/*
|
||||
* Calculate the next tweak by multiplying the current one by x,
|
||||
* modulo p(x) = x^128 + x^7 + x^2 + x + 1.
|
||||
*/
|
||||
vshr.u64 \tmp, TWEAKV, #63
|
||||
vshl.u64 TWEAKV, #1
|
||||
veor TWEAKV_H, \tmp\()_L
|
||||
vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
|
||||
veor TWEAKV_L, \tmp\()_H
|
||||
.endm
|
||||
|
||||
.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
|
||||
|
||||
// Load the next two source blocks
|
||||
vld1.8 {\dst_reg}, [SRC]!
|
||||
|
||||
// Save the current two tweaks in the tweak buffer
|
||||
vst1.8 {TWEAKV}, [\tweak_buf:128]!
|
||||
|
||||
// XOR the next two source blocks with the current two tweaks
|
||||
veor \dst_reg, TWEAKV
|
||||
|
||||
/*
|
||||
* Calculate the next two tweaks by multiplying the current ones by x^2,
|
||||
* modulo p(x) = x^64 + x^4 + x^3 + x + 1.
|
||||
*/
|
||||
vshr.u64 \tmp, TWEAKV, #62
|
||||
vshl.u64 TWEAKV, #2
|
||||
vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
|
||||
vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
|
||||
veor TWEAKV, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
* _speck_xts_crypt() - Speck-XTS encryption/decryption
|
||||
*
|
||||
* Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
|
||||
* using Speck-XTS, specifically the variant with a block size of '2n' and round
|
||||
* count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
|
||||
* the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
|
||||
* nonzero multiple of 128.
|
||||
*/
|
||||
.macro _speck_xts_crypt n, decrypting
|
||||
push {r4-r7}
|
||||
mov r7, sp
|
||||
|
||||
/*
|
||||
* The first four parameters were passed in registers r0-r3. Load the
|
||||
* additional parameters, which were passed on the stack.
|
||||
*/
|
||||
ldr NBYTES, [sp, #16]
|
||||
ldr TWEAK, [sp, #20]
|
||||
|
||||
/*
|
||||
* If decrypting, modify the ROUND_KEYS parameter to point to the last
|
||||
* round key rather than the first, since for decryption the round keys
|
||||
* are used in reverse order.
|
||||
*/
|
||||
.if \decrypting
|
||||
.if \n == 64
|
||||
add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
|
||||
sub ROUND_KEYS, #8
|
||||
.else
|
||||
add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
|
||||
sub ROUND_KEYS, #4
|
||||
.endif
|
||||
.endif
|
||||
|
||||
// Load the index vector for vtbl-based 8-bit rotates
|
||||
.if \decrypting
|
||||
ldr r12, =.Lrol\n\()_8_table
|
||||
.else
|
||||
ldr r12, =.Lror\n\()_8_table
|
||||
.endif
|
||||
vld1.8 {ROTATE_TABLE}, [r12:64]
|
||||
|
||||
// One-time XTS preparation
|
||||
|
||||
/*
|
||||
* Allocate stack space to store 128 bytes worth of tweaks. For
|
||||
* performance, this space is aligned to a 16-byte boundary so that we
|
||||
* can use the load/store instructions that declare 16-byte alignment.
|
||||
*/
|
||||
sub sp, #128
|
||||
bic sp, #0xf
|
||||
|
||||
.if \n == 64
|
||||
// Load first tweak
|
||||
vld1.8 {TWEAKV}, [TWEAK]
|
||||
|
||||
// Load GF(2^128) multiplication table
|
||||
ldr r12, =.Lgf128mul_table
|
||||
vld1.8 {GF128MUL_TABLE}, [r12:64]
|
||||
.else
|
||||
// Load first tweak
|
||||
vld1.8 {TWEAKV_L}, [TWEAK]
|
||||
|
||||
// Load GF(2^64) multiplication table
|
||||
ldr r12, =.Lgf64mul_table
|
||||
vld1.8 {GF64MUL_TABLE}, [r12:64]
|
||||
|
||||
// Calculate second tweak, packing it together with the first
|
||||
vshr.u64 TMP0_L, TWEAKV_L, #63
|
||||
vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
|
||||
vshl.u64 TWEAKV_H, TWEAKV_L, #1
|
||||
veor TWEAKV_H, TMP0_L
|
||||
.endif
|
||||
|
||||
.Lnext_128bytes_\@:
|
||||
|
||||
/*
|
||||
* Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
|
||||
* values, and save the tweaks on the stack for later. Then
|
||||
* de-interleave the 'x' and 'y' elements of each block, i.e. make it so
|
||||
* that the X[0-3] registers contain only the second halves of blocks,
|
||||
* and the Y[0-3] registers contain only the first halves of blocks.
|
||||
* (Speck uses the order (y, x) rather than the more intuitive (x, y).)
|
||||
*/
|
||||
mov r12, sp
|
||||
.if \n == 64
|
||||
_xts128_precrypt_one X0, r12, TMP0
|
||||
_xts128_precrypt_one Y0, r12, TMP0
|
||||
_xts128_precrypt_one X1, r12, TMP0
|
||||
_xts128_precrypt_one Y1, r12, TMP0
|
||||
_xts128_precrypt_one X2, r12, TMP0
|
||||
_xts128_precrypt_one Y2, r12, TMP0
|
||||
_xts128_precrypt_one X3, r12, TMP0
|
||||
_xts128_precrypt_one Y3, r12, TMP0
|
||||
vswp X0_L, Y0_H
|
||||
vswp X1_L, Y1_H
|
||||
vswp X2_L, Y2_H
|
||||
vswp X3_L, Y3_H
|
||||
.else
|
||||
_xts64_precrypt_two X0, r12, TMP0
|
||||
_xts64_precrypt_two Y0, r12, TMP0
|
||||
_xts64_precrypt_two X1, r12, TMP0
|
||||
_xts64_precrypt_two Y1, r12, TMP0
|
||||
_xts64_precrypt_two X2, r12, TMP0
|
||||
_xts64_precrypt_two Y2, r12, TMP0
|
||||
_xts64_precrypt_two X3, r12, TMP0
|
||||
_xts64_precrypt_two Y3, r12, TMP0
|
||||
vuzp.32 Y0, X0
|
||||
vuzp.32 Y1, X1
|
||||
vuzp.32 Y2, X2
|
||||
vuzp.32 Y3, X3
|
||||
.endif
|
||||
|
||||
// Do the cipher rounds
|
||||
|
||||
mov r12, ROUND_KEYS
|
||||
mov r6, NROUNDS
|
||||
|
||||
.Lnext_round_\@:
|
||||
.if \decrypting
|
||||
.if \n == 64
|
||||
vld1.64 ROUND_KEY_L, [r12]
|
||||
sub r12, #8
|
||||
vmov ROUND_KEY_H, ROUND_KEY_L
|
||||
.else
|
||||
vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
|
||||
sub r12, #4
|
||||
.endif
|
||||
_speck_unround_128bytes \n
|
||||
.else
|
||||
.if \n == 64
|
||||
vld1.64 ROUND_KEY_L, [r12]!
|
||||
vmov ROUND_KEY_H, ROUND_KEY_L
|
||||
.else
|
||||
vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
|
||||
.endif
|
||||
_speck_round_128bytes \n
|
||||
.endif
|
||||
subs r6, r6, #1
|
||||
bne .Lnext_round_\@
|
||||
|
||||
// Re-interleave the 'x' and 'y' elements of each block
|
||||
.if \n == 64
|
||||
vswp X0_L, Y0_H
|
||||
vswp X1_L, Y1_H
|
||||
vswp X2_L, Y2_H
|
||||
vswp X3_L, Y3_H
|
||||
.else
|
||||
vzip.32 Y0, X0
|
||||
vzip.32 Y1, X1
|
||||
vzip.32 Y2, X2
|
||||
vzip.32 Y3, X3
|
||||
.endif
|
||||
|
||||
// XOR the encrypted/decrypted blocks with the tweaks we saved earlier
|
||||
mov r12, sp
|
||||
vld1.8 {TMP0, TMP1}, [r12:128]!
|
||||
vld1.8 {TMP2, TMP3}, [r12:128]!
|
||||
veor X0, TMP0
|
||||
veor Y0, TMP1
|
||||
veor X1, TMP2
|
||||
veor Y1, TMP3
|
||||
vld1.8 {TMP0, TMP1}, [r12:128]!
|
||||
vld1.8 {TMP2, TMP3}, [r12:128]!
|
||||
veor X2, TMP0
|
||||
veor Y2, TMP1
|
||||
veor X3, TMP2
|
||||
veor Y3, TMP3
|
||||
|
||||
// Store the ciphertext in the destination buffer
|
||||
vst1.8 {X0, Y0}, [DST]!
|
||||
vst1.8 {X1, Y1}, [DST]!
|
||||
vst1.8 {X2, Y2}, [DST]!
|
||||
vst1.8 {X3, Y3}, [DST]!
|
||||
|
||||
// Continue if there are more 128-byte chunks remaining, else return
|
||||
subs NBYTES, #128
|
||||
bne .Lnext_128bytes_\@
|
||||
|
||||
// Store the next tweak
|
||||
.if \n == 64
|
||||
vst1.8 {TWEAKV}, [TWEAK]
|
||||
.else
|
||||
vst1.8 {TWEAKV_L}, [TWEAK]
|
||||
.endif
|
||||
|
||||
mov sp, r7
|
||||
pop {r4-r7}
|
||||
bx lr
|
||||
.endm
|
||||
|
||||
ENTRY(speck128_xts_encrypt_neon)
|
||||
_speck_xts_crypt n=64, decrypting=0
|
||||
ENDPROC(speck128_xts_encrypt_neon)
|
||||
|
||||
ENTRY(speck128_xts_decrypt_neon)
|
||||
_speck_xts_crypt n=64, decrypting=1
|
||||
ENDPROC(speck128_xts_decrypt_neon)
|
||||
|
||||
ENTRY(speck64_xts_encrypt_neon)
|
||||
_speck_xts_crypt n=32, decrypting=0
|
||||
ENDPROC(speck64_xts_encrypt_neon)
|
||||
|
||||
ENTRY(speck64_xts_decrypt_neon)
|
||||
_speck_xts_crypt n=32, decrypting=1
|
||||
ENDPROC(speck64_xts_decrypt_neon)
|
288
arch/arm/crypto/speck-neon-glue.c
Normal file
288
arch/arm/crypto/speck-neon-glue.c
Normal file
@ -0,0 +1,288 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
|
||||
*
|
||||
* Copyright (c) 2018 Google, Inc
|
||||
*
|
||||
* Note: the NIST recommendation for XTS only specifies a 128-bit block size,
|
||||
* but a 64-bit version (needed for Speck64) is fairly straightforward; the math
|
||||
* is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
|
||||
* x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
|
||||
* "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
|
||||
* OCB and PMAC"), represented as 0x1B.
|
||||
*/
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/speck.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* The assembly functions only handle multiples of 128 bytes */
|
||||
#define SPECK_NEON_CHUNK_SIZE 128
|
||||
|
||||
/* Speck128 */
|
||||
|
||||
struct speck128_xts_tfm_ctx {
|
||||
struct speck128_tfm_ctx main_key;
|
||||
struct speck128_tfm_ctx tweak_key;
|
||||
};
|
||||
|
||||
asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
|
||||
void *dst, const void *src,
|
||||
unsigned int nbytes, void *tweak);
|
||||
|
||||
asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
|
||||
void *dst, const void *src,
|
||||
unsigned int nbytes, void *tweak);
|
||||
|
||||
typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
|
||||
u8 *, const u8 *);
|
||||
typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
|
||||
const void *, unsigned int, void *);
|
||||
|
||||
static __always_inline int
|
||||
__speck128_xts_crypt(struct skcipher_request *req,
|
||||
speck128_crypt_one_t crypt_one,
|
||||
speck128_xts_crypt_many_t crypt_many)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
le128 tweak;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
|
||||
if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
|
||||
unsigned int count;
|
||||
|
||||
count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
|
||||
kernel_neon_begin();
|
||||
(*crypt_many)(ctx->main_key.round_keys,
|
||||
ctx->main_key.nrounds,
|
||||
dst, src, count, &tweak);
|
||||
kernel_neon_end();
|
||||
dst += count;
|
||||
src += count;
|
||||
nbytes -= count;
|
||||
}
|
||||
|
||||
/* Handle any remainder with generic code */
|
||||
while (nbytes >= sizeof(tweak)) {
|
||||
le128_xor((le128 *)dst, (const le128 *)src, &tweak);
|
||||
(*crypt_one)(&ctx->main_key, dst, dst);
|
||||
le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
|
||||
gf128mul_x_ble(&tweak, &tweak);
|
||||
|
||||
dst += sizeof(tweak);
|
||||
src += sizeof(tweak);
|
||||
nbytes -= sizeof(tweak);
|
||||
}
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int speck128_xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __speck128_xts_crypt(req, crypto_speck128_encrypt,
|
||||
speck128_xts_encrypt_neon);
|
||||
}
|
||||
|
||||
static int speck128_xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __speck128_xts_crypt(req, crypto_speck128_decrypt,
|
||||
speck128_xts_decrypt_neon);
|
||||
}
|
||||
|
||||
static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
keylen /= 2;
|
||||
|
||||
err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
|
||||
}
|
||||
|
||||
/* Speck64 */
|
||||
|
||||
struct speck64_xts_tfm_ctx {
|
||||
struct speck64_tfm_ctx main_key;
|
||||
struct speck64_tfm_ctx tweak_key;
|
||||
};
|
||||
|
||||
asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
|
||||
void *dst, const void *src,
|
||||
unsigned int nbytes, void *tweak);
|
||||
|
||||
asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
|
||||
void *dst, const void *src,
|
||||
unsigned int nbytes, void *tweak);
|
||||
|
||||
typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
|
||||
u8 *, const u8 *);
|
||||
typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
|
||||
const void *, unsigned int, void *);
|
||||
|
||||
static __always_inline int
|
||||
__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
|
||||
speck64_xts_crypt_many_t crypt_many)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
__le64 tweak;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
|
||||
if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
|
||||
unsigned int count;
|
||||
|
||||
count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
|
||||
kernel_neon_begin();
|
||||
(*crypt_many)(ctx->main_key.round_keys,
|
||||
ctx->main_key.nrounds,
|
||||
dst, src, count, &tweak);
|
||||
kernel_neon_end();
|
||||
dst += count;
|
||||
src += count;
|
||||
nbytes -= count;
|
||||
}
|
||||
|
||||
/* Handle any remainder with generic code */
|
||||
while (nbytes >= sizeof(tweak)) {
|
||||
*(__le64 *)dst = *(__le64 *)src ^ tweak;
|
||||
(*crypt_one)(&ctx->main_key, dst, dst);
|
||||
*(__le64 *)dst ^= tweak;
|
||||
tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
|
||||
((tweak & cpu_to_le64(1ULL << 63)) ?
|
||||
0x1B : 0));
|
||||
dst += sizeof(tweak);
|
||||
src += sizeof(tweak);
|
||||
nbytes -= sizeof(tweak);
|
||||
}
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int speck64_xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __speck64_xts_crypt(req, crypto_speck64_encrypt,
|
||||
speck64_xts_encrypt_neon);
|
||||
}
|
||||
|
||||
static int speck64_xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __speck64_xts_crypt(req, crypto_speck64_decrypt,
|
||||
speck64_xts_decrypt_neon);
|
||||
}
|
||||
|
||||
static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
keylen /= 2;
|
||||
|
||||
err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
|
||||
}
|
||||
|
||||
static struct skcipher_alg speck_algs[] = {
|
||||
{
|
||||
.base.cra_name = "xts(speck128)",
|
||||
.base.cra_driver_name = "xts-speck128-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = SPECK128_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * SPECK128_128_KEY_SIZE,
|
||||
.max_keysize = 2 * SPECK128_256_KEY_SIZE,
|
||||
.ivsize = SPECK128_BLOCK_SIZE,
|
||||
.walksize = SPECK_NEON_CHUNK_SIZE,
|
||||
.setkey = speck128_xts_setkey,
|
||||
.encrypt = speck128_xts_encrypt,
|
||||
.decrypt = speck128_xts_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "xts(speck64)",
|
||||
.base.cra_driver_name = "xts-speck64-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = SPECK64_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * SPECK64_96_KEY_SIZE,
|
||||
.max_keysize = 2 * SPECK64_128_KEY_SIZE,
|
||||
.ivsize = SPECK64_BLOCK_SIZE,
|
||||
.walksize = SPECK_NEON_CHUNK_SIZE,
|
||||
.setkey = speck64_xts_setkey,
|
||||
.encrypt = speck64_xts_encrypt,
|
||||
.decrypt = speck64_xts_decrypt,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init speck_neon_module_init(void)
|
||||
{
|
||||
if (!(elf_hwcap & HWCAP_NEON))
|
||||
return -ENODEV;
|
||||
return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
|
||||
}
|
||||
|
||||
static void __exit speck_neon_module_exit(void)
|
||||
{
|
||||
crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
|
||||
}
|
||||
|
||||
module_init(speck_neon_module_init);
|
||||
module_exit(speck_neon_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("xts(speck128)");
|
||||
MODULE_ALIAS_CRYPTO("xts-speck128-neon");
|
||||
MODULE_ALIAS_CRYPTO("xts(speck64)");
|
||||
MODULE_ALIAS_CRYPTO("xts-speck64-neon");
|
@ -113,4 +113,10 @@ config CRYPTO_AES_ARM64_BS
|
||||
select CRYPTO_AES_ARM64
|
||||
select CRYPTO_SIMD
|
||||
|
||||
config CRYPTO_SPECK_NEON
|
||||
tristate "NEON accelerated Speck cipher algorithms"
|
||||
depends on KERNEL_MODE_NEON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_SPECK
|
||||
|
||||
endif
|
||||
|
@ -53,20 +53,21 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
|
||||
chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
|
||||
speck-neon-y := speck-neon-core.o speck-neon-glue.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
|
||||
aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o
|
||||
aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
|
||||
|
||||
AFLAGS_aes-ce.o := -DINTERLEAVE=4
|
||||
AFLAGS_aes-neon.o := -DINTERLEAVE=4
|
||||
|
||||
CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
|
||||
|
||||
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
|
||||
$(call if_changed_rule,cc_o_c)
|
||||
|
||||
ifdef REGENERATE_ARM64_CRYPTO
|
||||
quiet_cmd_perlasm = PERLASM $@
|
||||
cmd_perlasm = $(PERL) $(<) void $(@)
|
||||
|
||||
@ -75,5 +76,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
|
||||
|
||||
$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
|
||||
$(call cmd,perlasm)
|
||||
endif
|
||||
|
||||
.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
|
||||
|
@ -107,11 +107,13 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
|
||||
}
|
||||
|
||||
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
|
||||
u32 abytes, u32 *macp, bool use_neon)
|
||||
u32 abytes, u32 *macp)
|
||||
{
|
||||
if (likely(use_neon)) {
|
||||
if (may_use_simd()) {
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
|
||||
num_rounds(key));
|
||||
kernel_neon_end();
|
||||
} else {
|
||||
if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
|
||||
int added = min(abytes, AES_BLOCK_SIZE - *macp);
|
||||
@ -143,8 +145,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
|
||||
}
|
||||
}
|
||||
|
||||
static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
|
||||
bool use_neon)
|
||||
static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||
{
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
|
||||
@ -163,7 +164,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
|
||||
ltag.len = 6;
|
||||
}
|
||||
|
||||
ccm_update_mac(ctx, mac, (u8 *)<ag, ltag.len, &macp, use_neon);
|
||||
ccm_update_mac(ctx, mac, (u8 *)<ag, ltag.len, &macp);
|
||||
scatterwalk_start(&walk, req->src);
|
||||
|
||||
do {
|
||||
@ -175,7 +176,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
|
||||
n = scatterwalk_clamp(&walk, len);
|
||||
}
|
||||
p = scatterwalk_map(&walk);
|
||||
ccm_update_mac(ctx, mac, p, n, &macp, use_neon);
|
||||
ccm_update_mac(ctx, mac, p, n, &macp);
|
||||
len -= n;
|
||||
|
||||
scatterwalk_unmap(p);
|
||||
@ -242,43 +243,42 @@ static int ccm_encrypt(struct aead_request *req)
|
||||
u8 __aligned(8) mac[AES_BLOCK_SIZE];
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
u32 len = req->cryptlen;
|
||||
bool use_neon = may_use_simd();
|
||||
int err;
|
||||
|
||||
err = ccm_init_mac(req, mac, len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (likely(use_neon))
|
||||
kernel_neon_begin();
|
||||
|
||||
if (req->assoclen)
|
||||
ccm_calculate_auth_mac(req, mac, use_neon);
|
||||
ccm_calculate_auth_mac(req, mac);
|
||||
|
||||
/* preserve the original iv for the final round */
|
||||
memcpy(buf, req->iv, AES_BLOCK_SIZE);
|
||||
|
||||
err = skcipher_walk_aead_encrypt(&walk, req, true);
|
||||
|
||||
if (likely(use_neon)) {
|
||||
if (may_use_simd()) {
|
||||
while (walk.nbytes) {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes == walk.total)
|
||||
tail = 0;
|
||||
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_encrypt(walk.dst.virt.addr,
|
||||
walk.src.virt.addr,
|
||||
walk.nbytes - tail, ctx->key_enc,
|
||||
num_rounds(ctx), mac, walk.iv);
|
||||
kernel_neon_end();
|
||||
|
||||
err = skcipher_walk_done(&walk, tail);
|
||||
}
|
||||
if (!err)
|
||||
if (!err) {
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_final(mac, buf, ctx->key_enc,
|
||||
num_rounds(ctx));
|
||||
|
||||
kernel_neon_end();
|
||||
kernel_neon_end();
|
||||
}
|
||||
} else {
|
||||
err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
|
||||
}
|
||||
@ -301,43 +301,42 @@ static int ccm_decrypt(struct aead_request *req)
|
||||
u8 __aligned(8) mac[AES_BLOCK_SIZE];
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
u32 len = req->cryptlen - authsize;
|
||||
bool use_neon = may_use_simd();
|
||||
int err;
|
||||
|
||||
err = ccm_init_mac(req, mac, len);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (likely(use_neon))
|
||||
kernel_neon_begin();
|
||||
|
||||
if (req->assoclen)
|
||||
ccm_calculate_auth_mac(req, mac, use_neon);
|
||||
ccm_calculate_auth_mac(req, mac);
|
||||
|
||||
/* preserve the original iv for the final round */
|
||||
memcpy(buf, req->iv, AES_BLOCK_SIZE);
|
||||
|
||||
err = skcipher_walk_aead_decrypt(&walk, req, true);
|
||||
|
||||
if (likely(use_neon)) {
|
||||
if (may_use_simd()) {
|
||||
while (walk.nbytes) {
|
||||
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
|
||||
|
||||
if (walk.nbytes == walk.total)
|
||||
tail = 0;
|
||||
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_decrypt(walk.dst.virt.addr,
|
||||
walk.src.virt.addr,
|
||||
walk.nbytes - tail, ctx->key_enc,
|
||||
num_rounds(ctx), mac, walk.iv);
|
||||
kernel_neon_end();
|
||||
|
||||
err = skcipher_walk_done(&walk, tail);
|
||||
}
|
||||
if (!err)
|
||||
if (!err) {
|
||||
kernel_neon_begin();
|
||||
ce_aes_ccm_final(mac, buf, ctx->key_enc,
|
||||
num_rounds(ctx));
|
||||
|
||||
kernel_neon_end();
|
||||
kernel_neon_end();
|
||||
}
|
||||
} else {
|
||||
err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
|
||||
}
|
||||
|
@ -64,17 +64,17 @@ MODULE_LICENSE("GPL v2");
|
||||
|
||||
/* defined in aes-modes.S */
|
||||
asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, int first);
|
||||
int rounds, int blocks);
|
||||
asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, int first);
|
||||
int rounds, int blocks);
|
||||
|
||||
asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 iv[], int first);
|
||||
int rounds, int blocks, u8 iv[]);
|
||||
asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 iv[], int first);
|
||||
int rounds, int blocks, u8 iv[]);
|
||||
|
||||
asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
int rounds, int blocks, u8 ctr[], int first);
|
||||
int rounds, int blocks, u8 ctr[]);
|
||||
|
||||
asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
|
||||
int rounds, int blocks, u8 const rk2[], u8 iv[],
|
||||
@ -133,19 +133,19 @@ static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err, first, rounds = 6 + ctx->key_length / 4;
|
||||
int err, rounds = 6 + ctx->key_length / 4;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int blocks;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
|
||||
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
|
||||
kernel_neon_begin();
|
||||
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_enc, rounds, blocks, first);
|
||||
(u8 *)ctx->key_enc, rounds, blocks);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -153,19 +153,19 @@ static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err, first, rounds = 6 + ctx->key_length / 4;
|
||||
int err, rounds = 6 + ctx->key_length / 4;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int blocks;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
|
||||
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
|
||||
kernel_neon_begin();
|
||||
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_dec, rounds, blocks, first);
|
||||
(u8 *)ctx->key_dec, rounds, blocks);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -173,20 +173,19 @@ static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err, first, rounds = 6 + ctx->key_length / 4;
|
||||
int err, rounds = 6 + ctx->key_length / 4;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int blocks;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
|
||||
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
|
||||
kernel_neon_begin();
|
||||
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
|
||||
first);
|
||||
(u8 *)ctx->key_enc, rounds, blocks, walk.iv);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -194,20 +193,19 @@ static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err, first, rounds = 6 + ctx->key_length / 4;
|
||||
int err, rounds = 6 + ctx->key_length / 4;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int blocks;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
|
||||
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
|
||||
kernel_neon_begin();
|
||||
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
|
||||
first);
|
||||
(u8 *)ctx->key_dec, rounds, blocks, walk.iv);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -215,20 +213,18 @@ static int ctr_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err, first, rounds = 6 + ctx->key_length / 4;
|
||||
int err, rounds = 6 + ctx->key_length / 4;
|
||||
struct skcipher_walk walk;
|
||||
int blocks;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
first = 1;
|
||||
kernel_neon_begin();
|
||||
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
|
||||
kernel_neon_begin();
|
||||
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
|
||||
first);
|
||||
(u8 *)ctx->key_enc, rounds, blocks, walk.iv);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
first = 0;
|
||||
kernel_neon_end();
|
||||
}
|
||||
if (walk.nbytes) {
|
||||
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
||||
@ -241,12 +237,13 @@ static int ctr_encrypt(struct skcipher_request *req)
|
||||
*/
|
||||
blocks = -1;
|
||||
|
||||
kernel_neon_begin();
|
||||
aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds,
|
||||
blocks, walk.iv, first);
|
||||
blocks, walk.iv);
|
||||
kernel_neon_end();
|
||||
crypto_xor_cpy(tdst, tsrc, tail, nbytes);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -270,16 +267,16 @@ static int xts_encrypt(struct skcipher_request *req)
|
||||
struct skcipher_walk walk;
|
||||
unsigned int blocks;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
|
||||
kernel_neon_begin();
|
||||
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key1.key_enc, rounds, blocks,
|
||||
(u8 *)ctx->key2.key_enc, walk.iv, first);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -292,16 +289,16 @@ static int xts_decrypt(struct skcipher_request *req)
|
||||
struct skcipher_walk walk;
|
||||
unsigned int blocks;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
|
||||
kernel_neon_begin();
|
||||
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key1.key_dec, rounds, blocks,
|
||||
(u8 *)ctx->key2.key_enc, walk.iv, first);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -425,7 +422,7 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
|
||||
|
||||
/* encrypt the zero vector */
|
||||
kernel_neon_begin();
|
||||
aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1, 1);
|
||||
aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1);
|
||||
kernel_neon_end();
|
||||
|
||||
cmac_gf128_mul_by_x(consts, consts);
|
||||
@ -454,8 +451,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
|
||||
return err;
|
||||
|
||||
kernel_neon_begin();
|
||||
aes_ecb_encrypt(key, ks[0], rk, rounds, 1, 1);
|
||||
aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2, 0);
|
||||
aes_ecb_encrypt(key, ks[0], rk, rounds, 1);
|
||||
aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2);
|
||||
kernel_neon_end();
|
||||
|
||||
return cbcmac_setkey(tfm, key, sizeof(key));
|
||||
|
@ -13,127 +13,39 @@
|
||||
.text
|
||||
.align 4
|
||||
|
||||
/*
|
||||
* There are several ways to instantiate this code:
|
||||
* - no interleave, all inline
|
||||
* - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2)
|
||||
* - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE)
|
||||
* - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4)
|
||||
* - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE)
|
||||
*
|
||||
* Macros imported by this code:
|
||||
* - enc_prepare - setup NEON registers for encryption
|
||||
* - dec_prepare - setup NEON registers for decryption
|
||||
* - enc_switch_key - change to new key after having prepared for encryption
|
||||
* - encrypt_block - encrypt a single block
|
||||
* - decrypt block - decrypt a single block
|
||||
* - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2)
|
||||
* - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2)
|
||||
* - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4)
|
||||
* - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4)
|
||||
*/
|
||||
|
||||
#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE)
|
||||
#define FRAME_PUSH stp x29, x30, [sp,#-16]! ; mov x29, sp
|
||||
#define FRAME_POP ldp x29, x30, [sp],#16
|
||||
|
||||
#if INTERLEAVE == 2
|
||||
|
||||
aes_encrypt_block2x:
|
||||
encrypt_block2x v0, v1, w3, x2, x6, w7
|
||||
ret
|
||||
ENDPROC(aes_encrypt_block2x)
|
||||
|
||||
aes_decrypt_block2x:
|
||||
decrypt_block2x v0, v1, w3, x2, x6, w7
|
||||
ret
|
||||
ENDPROC(aes_decrypt_block2x)
|
||||
|
||||
#elif INTERLEAVE == 4
|
||||
|
||||
aes_encrypt_block4x:
|
||||
encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
|
||||
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
||||
ret
|
||||
ENDPROC(aes_encrypt_block4x)
|
||||
|
||||
aes_decrypt_block4x:
|
||||
decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
|
||||
decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
|
||||
ret
|
||||
ENDPROC(aes_decrypt_block4x)
|
||||
|
||||
#else
|
||||
#error INTERLEAVE should equal 2 or 4
|
||||
#endif
|
||||
|
||||
.macro do_encrypt_block2x
|
||||
bl aes_encrypt_block2x
|
||||
.endm
|
||||
|
||||
.macro do_decrypt_block2x
|
||||
bl aes_decrypt_block2x
|
||||
.endm
|
||||
|
||||
.macro do_encrypt_block4x
|
||||
bl aes_encrypt_block4x
|
||||
.endm
|
||||
|
||||
.macro do_decrypt_block4x
|
||||
bl aes_decrypt_block4x
|
||||
.endm
|
||||
|
||||
#else
|
||||
#define FRAME_PUSH
|
||||
#define FRAME_POP
|
||||
|
||||
.macro do_encrypt_block2x
|
||||
encrypt_block2x v0, v1, w3, x2, x6, w7
|
||||
.endm
|
||||
|
||||
.macro do_decrypt_block2x
|
||||
decrypt_block2x v0, v1, w3, x2, x6, w7
|
||||
.endm
|
||||
|
||||
.macro do_encrypt_block4x
|
||||
encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
|
||||
.endm
|
||||
|
||||
.macro do_decrypt_block4x
|
||||
decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||
* int blocks, int first)
|
||||
* int blocks)
|
||||
* aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||
* int blocks, int first)
|
||||
* int blocks)
|
||||
*/
|
||||
|
||||
AES_ENTRY(aes_ecb_encrypt)
|
||||
FRAME_PUSH
|
||||
cbz w5, .LecbencloopNx
|
||||
stp x29, x30, [sp, #-16]!
|
||||
mov x29, sp
|
||||
|
||||
enc_prepare w3, x2, x5
|
||||
|
||||
.LecbencloopNx:
|
||||
#if INTERLEAVE >= 2
|
||||
subs w4, w4, #INTERLEAVE
|
||||
subs w4, w4, #4
|
||||
bmi .Lecbenc1x
|
||||
#if INTERLEAVE == 2
|
||||
ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
|
||||
do_encrypt_block2x
|
||||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
#else
|
||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
||||
do_encrypt_block4x
|
||||
bl aes_encrypt_block4x
|
||||
st1 {v0.16b-v3.16b}, [x0], #64
|
||||
#endif
|
||||
b .LecbencloopNx
|
||||
.Lecbenc1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
adds w4, w4, #4
|
||||
beq .Lecbencout
|
||||
#endif
|
||||
.Lecbencloop:
|
||||
ld1 {v0.16b}, [x1], #16 /* get next pt block */
|
||||
encrypt_block v0, w3, x2, x5, w6
|
||||
@ -141,35 +53,27 @@ AES_ENTRY(aes_ecb_encrypt)
|
||||
subs w4, w4, #1
|
||||
bne .Lecbencloop
|
||||
.Lecbencout:
|
||||
FRAME_POP
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
AES_ENDPROC(aes_ecb_encrypt)
|
||||
|
||||
|
||||
AES_ENTRY(aes_ecb_decrypt)
|
||||
FRAME_PUSH
|
||||
cbz w5, .LecbdecloopNx
|
||||
stp x29, x30, [sp, #-16]!
|
||||
mov x29, sp
|
||||
|
||||
dec_prepare w3, x2, x5
|
||||
|
||||
.LecbdecloopNx:
|
||||
#if INTERLEAVE >= 2
|
||||
subs w4, w4, #INTERLEAVE
|
||||
subs w4, w4, #4
|
||||
bmi .Lecbdec1x
|
||||
#if INTERLEAVE == 2
|
||||
ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
|
||||
do_decrypt_block2x
|
||||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
#else
|
||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
||||
do_decrypt_block4x
|
||||
bl aes_decrypt_block4x
|
||||
st1 {v0.16b-v3.16b}, [x0], #64
|
||||
#endif
|
||||
b .LecbdecloopNx
|
||||
.Lecbdec1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
adds w4, w4, #4
|
||||
beq .Lecbdecout
|
||||
#endif
|
||||
.Lecbdecloop:
|
||||
ld1 {v0.16b}, [x1], #16 /* get next ct block */
|
||||
decrypt_block v0, w3, x2, x5, w6
|
||||
@ -177,62 +81,68 @@ AES_ENTRY(aes_ecb_decrypt)
|
||||
subs w4, w4, #1
|
||||
bne .Lecbdecloop
|
||||
.Lecbdecout:
|
||||
FRAME_POP
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
AES_ENDPROC(aes_ecb_decrypt)
|
||||
|
||||
|
||||
/*
|
||||
* aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||
* int blocks, u8 iv[], int first)
|
||||
* int blocks, u8 iv[])
|
||||
* aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||
* int blocks, u8 iv[], int first)
|
||||
* int blocks, u8 iv[])
|
||||
*/
|
||||
|
||||
AES_ENTRY(aes_cbc_encrypt)
|
||||
cbz w6, .Lcbcencloop
|
||||
|
||||
ld1 {v0.16b}, [x5] /* get iv */
|
||||
ld1 {v4.16b}, [x5] /* get iv */
|
||||
enc_prepare w3, x2, x6
|
||||
|
||||
.Lcbcencloop:
|
||||
ld1 {v1.16b}, [x1], #16 /* get next pt block */
|
||||
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
|
||||
.Lcbcencloop4x:
|
||||
subs w4, w4, #4
|
||||
bmi .Lcbcenc1x
|
||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
||||
eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
st1 {v0.16b}, [x0], #16
|
||||
eor v1.16b, v1.16b, v0.16b
|
||||
encrypt_block v1, w3, x2, x6, w7
|
||||
eor v2.16b, v2.16b, v1.16b
|
||||
encrypt_block v2, w3, x2, x6, w7
|
||||
eor v3.16b, v3.16b, v2.16b
|
||||
encrypt_block v3, w3, x2, x6, w7
|
||||
st1 {v0.16b-v3.16b}, [x0], #64
|
||||
mov v4.16b, v3.16b
|
||||
b .Lcbcencloop4x
|
||||
.Lcbcenc1x:
|
||||
adds w4, w4, #4
|
||||
beq .Lcbcencout
|
||||
.Lcbcencloop:
|
||||
ld1 {v0.16b}, [x1], #16 /* get next pt block */
|
||||
eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
|
||||
encrypt_block v4, w3, x2, x6, w7
|
||||
st1 {v4.16b}, [x0], #16
|
||||
subs w4, w4, #1
|
||||
bne .Lcbcencloop
|
||||
st1 {v0.16b}, [x5] /* return iv */
|
||||
.Lcbcencout:
|
||||
st1 {v4.16b}, [x5] /* return iv */
|
||||
ret
|
||||
AES_ENDPROC(aes_cbc_encrypt)
|
||||
|
||||
|
||||
AES_ENTRY(aes_cbc_decrypt)
|
||||
FRAME_PUSH
|
||||
cbz w6, .LcbcdecloopNx
|
||||
stp x29, x30, [sp, #-16]!
|
||||
mov x29, sp
|
||||
|
||||
ld1 {v7.16b}, [x5] /* get iv */
|
||||
dec_prepare w3, x2, x6
|
||||
|
||||
.LcbcdecloopNx:
|
||||
#if INTERLEAVE >= 2
|
||||
subs w4, w4, #INTERLEAVE
|
||||
subs w4, w4, #4
|
||||
bmi .Lcbcdec1x
|
||||
#if INTERLEAVE == 2
|
||||
ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
|
||||
mov v2.16b, v0.16b
|
||||
mov v3.16b, v1.16b
|
||||
do_decrypt_block2x
|
||||
eor v0.16b, v0.16b, v7.16b
|
||||
eor v1.16b, v1.16b, v2.16b
|
||||
mov v7.16b, v3.16b
|
||||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
#else
|
||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
||||
mov v4.16b, v0.16b
|
||||
mov v5.16b, v1.16b
|
||||
mov v6.16b, v2.16b
|
||||
do_decrypt_block4x
|
||||
bl aes_decrypt_block4x
|
||||
sub x1, x1, #16
|
||||
eor v0.16b, v0.16b, v7.16b
|
||||
eor v1.16b, v1.16b, v4.16b
|
||||
@ -240,12 +150,10 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||
eor v2.16b, v2.16b, v5.16b
|
||||
eor v3.16b, v3.16b, v6.16b
|
||||
st1 {v0.16b-v3.16b}, [x0], #64
|
||||
#endif
|
||||
b .LcbcdecloopNx
|
||||
.Lcbcdec1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
adds w4, w4, #4
|
||||
beq .Lcbcdecout
|
||||
#endif
|
||||
.Lcbcdecloop:
|
||||
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
||||
mov v0.16b, v1.16b /* ...and copy to v0 */
|
||||
@ -256,49 +164,33 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||
subs w4, w4, #1
|
||||
bne .Lcbcdecloop
|
||||
.Lcbcdecout:
|
||||
FRAME_POP
|
||||
st1 {v7.16b}, [x5] /* return iv */
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
AES_ENDPROC(aes_cbc_decrypt)
|
||||
|
||||
|
||||
/*
|
||||
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
|
||||
* int blocks, u8 ctr[], int first)
|
||||
* int blocks, u8 ctr[])
|
||||
*/
|
||||
|
||||
AES_ENTRY(aes_ctr_encrypt)
|
||||
FRAME_PUSH
|
||||
cbz w6, .Lctrnotfirst /* 1st time around? */
|
||||
stp x29, x30, [sp, #-16]!
|
||||
mov x29, sp
|
||||
|
||||
enc_prepare w3, x2, x6
|
||||
ld1 {v4.16b}, [x5]
|
||||
|
||||
.Lctrnotfirst:
|
||||
umov x8, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x8, x8
|
||||
#if INTERLEAVE >= 2
|
||||
cmn w8, w4 /* 32 bit overflow? */
|
||||
umov x6, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x6, x6
|
||||
cmn w6, w4 /* 32 bit overflow? */
|
||||
bcs .Lctrloop
|
||||
.LctrloopNx:
|
||||
subs w4, w4, #INTERLEAVE
|
||||
subs w4, w4, #4
|
||||
bmi .Lctr1x
|
||||
#if INTERLEAVE == 2
|
||||
mov v0.8b, v4.8b
|
||||
mov v1.8b, v4.8b
|
||||
rev x7, x8
|
||||
add x8, x8, #1
|
||||
ins v0.d[1], x7
|
||||
rev x7, x8
|
||||
add x8, x8, #1
|
||||
ins v1.d[1], x7
|
||||
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
|
||||
do_encrypt_block2x
|
||||
eor v0.16b, v0.16b, v2.16b
|
||||
eor v1.16b, v1.16b, v3.16b
|
||||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
#else
|
||||
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
|
||||
dup v7.4s, w8
|
||||
dup v7.4s, w6
|
||||
mov v0.16b, v4.16b
|
||||
add v7.4s, v7.4s, v8.4s
|
||||
mov v1.16b, v4.16b
|
||||
@ -309,29 +201,27 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
mov v2.s[3], v8.s[1]
|
||||
mov v3.s[3], v8.s[2]
|
||||
ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
|
||||
do_encrypt_block4x
|
||||
bl aes_encrypt_block4x
|
||||
eor v0.16b, v5.16b, v0.16b
|
||||
ld1 {v5.16b}, [x1], #16 /* get 1 input block */
|
||||
eor v1.16b, v6.16b, v1.16b
|
||||
eor v2.16b, v7.16b, v2.16b
|
||||
eor v3.16b, v5.16b, v3.16b
|
||||
st1 {v0.16b-v3.16b}, [x0], #64
|
||||
add x8, x8, #INTERLEAVE
|
||||
#endif
|
||||
rev x7, x8
|
||||
add x6, x6, #4
|
||||
rev x7, x6
|
||||
ins v4.d[1], x7
|
||||
cbz w4, .Lctrout
|
||||
b .LctrloopNx
|
||||
.Lctr1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
adds w4, w4, #4
|
||||
beq .Lctrout
|
||||
#endif
|
||||
.Lctrloop:
|
||||
mov v0.16b, v4.16b
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
encrypt_block v0, w3, x2, x8, w7
|
||||
|
||||
adds x8, x8, #1 /* increment BE ctr */
|
||||
rev x7, x8
|
||||
adds x6, x6, #1 /* increment BE ctr */
|
||||
rev x7, x6
|
||||
ins v4.d[1], x7
|
||||
bcs .Lctrcarry /* overflow? */
|
||||
|
||||
@ -345,12 +235,12 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
|
||||
.Lctrout:
|
||||
st1 {v4.16b}, [x5] /* return next CTR value */
|
||||
FRAME_POP
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
|
||||
.Lctrtailblock:
|
||||
st1 {v0.16b}, [x0]
|
||||
FRAME_POP
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
|
||||
.Lctrcarry:
|
||||
@ -384,39 +274,26 @@ CPU_LE( .quad 1, 0x87 )
|
||||
CPU_BE( .quad 0x87, 1 )
|
||||
|
||||
AES_ENTRY(aes_xts_encrypt)
|
||||
FRAME_PUSH
|
||||
cbz w7, .LxtsencloopNx
|
||||
stp x29, x30, [sp, #-16]!
|
||||
mov x29, sp
|
||||
|
||||
ld1 {v4.16b}, [x6]
|
||||
enc_prepare w3, x5, x6
|
||||
encrypt_block v4, w3, x5, x6, w7 /* first tweak */
|
||||
enc_switch_key w3, x2, x6
|
||||
cbz w7, .Lxtsencnotfirst
|
||||
|
||||
enc_prepare w3, x5, x8
|
||||
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
|
||||
enc_switch_key w3, x2, x8
|
||||
ldr q7, .Lxts_mul_x
|
||||
b .LxtsencNx
|
||||
|
||||
.Lxtsencnotfirst:
|
||||
enc_prepare w3, x2, x8
|
||||
.LxtsencloopNx:
|
||||
ldr q7, .Lxts_mul_x
|
||||
next_tweak v4, v4, v7, v8
|
||||
.LxtsencNx:
|
||||
#if INTERLEAVE >= 2
|
||||
subs w4, w4, #INTERLEAVE
|
||||
subs w4, w4, #4
|
||||
bmi .Lxtsenc1x
|
||||
#if INTERLEAVE == 2
|
||||
ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
|
||||
next_tweak v5, v4, v7, v8
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
do_encrypt_block2x
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
cbz w4, .LxtsencoutNx
|
||||
next_tweak v4, v5, v7, v8
|
||||
b .LxtsencNx
|
||||
.LxtsencoutNx:
|
||||
mov v4.16b, v5.16b
|
||||
b .Lxtsencout
|
||||
#else
|
||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
|
||||
next_tweak v5, v4, v7, v8
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
@ -425,7 +302,7 @@ AES_ENTRY(aes_xts_encrypt)
|
||||
eor v2.16b, v2.16b, v6.16b
|
||||
next_tweak v7, v6, v7, v8
|
||||
eor v3.16b, v3.16b, v7.16b
|
||||
do_encrypt_block4x
|
||||
bl aes_encrypt_block4x
|
||||
eor v3.16b, v3.16b, v7.16b
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
@ -434,15 +311,13 @@ AES_ENTRY(aes_xts_encrypt)
|
||||
mov v4.16b, v7.16b
|
||||
cbz w4, .Lxtsencout
|
||||
b .LxtsencloopNx
|
||||
#endif
|
||||
.Lxtsenc1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
adds w4, w4, #4
|
||||
beq .Lxtsencout
|
||||
#endif
|
||||
.Lxtsencloop:
|
||||
ld1 {v1.16b}, [x1], #16
|
||||
eor v0.16b, v1.16b, v4.16b
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
encrypt_block v0, w3, x2, x8, w7
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
st1 {v0.16b}, [x0], #16
|
||||
subs w4, w4, #1
|
||||
@ -450,45 +325,33 @@ AES_ENTRY(aes_xts_encrypt)
|
||||
next_tweak v4, v4, v7, v8
|
||||
b .Lxtsencloop
|
||||
.Lxtsencout:
|
||||
FRAME_POP
|
||||
st1 {v4.16b}, [x6]
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
AES_ENDPROC(aes_xts_encrypt)
|
||||
|
||||
|
||||
AES_ENTRY(aes_xts_decrypt)
|
||||
FRAME_PUSH
|
||||
cbz w7, .LxtsdecloopNx
|
||||
stp x29, x30, [sp, #-16]!
|
||||
mov x29, sp
|
||||
|
||||
ld1 {v4.16b}, [x6]
|
||||
enc_prepare w3, x5, x6
|
||||
encrypt_block v4, w3, x5, x6, w7 /* first tweak */
|
||||
dec_prepare w3, x2, x6
|
||||
cbz w7, .Lxtsdecnotfirst
|
||||
|
||||
enc_prepare w3, x5, x8
|
||||
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
|
||||
dec_prepare w3, x2, x8
|
||||
ldr q7, .Lxts_mul_x
|
||||
b .LxtsdecNx
|
||||
|
||||
.Lxtsdecnotfirst:
|
||||
dec_prepare w3, x2, x8
|
||||
.LxtsdecloopNx:
|
||||
ldr q7, .Lxts_mul_x
|
||||
next_tweak v4, v4, v7, v8
|
||||
.LxtsdecNx:
|
||||
#if INTERLEAVE >= 2
|
||||
subs w4, w4, #INTERLEAVE
|
||||
subs w4, w4, #4
|
||||
bmi .Lxtsdec1x
|
||||
#if INTERLEAVE == 2
|
||||
ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
|
||||
next_tweak v5, v4, v7, v8
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
do_decrypt_block2x
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
cbz w4, .LxtsdecoutNx
|
||||
next_tweak v4, v5, v7, v8
|
||||
b .LxtsdecNx
|
||||
.LxtsdecoutNx:
|
||||
mov v4.16b, v5.16b
|
||||
b .Lxtsdecout
|
||||
#else
|
||||
ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
||||
next_tweak v5, v4, v7, v8
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
@ -497,7 +360,7 @@ AES_ENTRY(aes_xts_decrypt)
|
||||
eor v2.16b, v2.16b, v6.16b
|
||||
next_tweak v7, v6, v7, v8
|
||||
eor v3.16b, v3.16b, v7.16b
|
||||
do_decrypt_block4x
|
||||
bl aes_decrypt_block4x
|
||||
eor v3.16b, v3.16b, v7.16b
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
eor v1.16b, v1.16b, v5.16b
|
||||
@ -506,15 +369,13 @@ AES_ENTRY(aes_xts_decrypt)
|
||||
mov v4.16b, v7.16b
|
||||
cbz w4, .Lxtsdecout
|
||||
b .LxtsdecloopNx
|
||||
#endif
|
||||
.Lxtsdec1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
adds w4, w4, #4
|
||||
beq .Lxtsdecout
|
||||
#endif
|
||||
.Lxtsdecloop:
|
||||
ld1 {v1.16b}, [x1], #16
|
||||
eor v0.16b, v1.16b, v4.16b
|
||||
decrypt_block v0, w3, x2, x6, w7
|
||||
decrypt_block v0, w3, x2, x8, w7
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
st1 {v0.16b}, [x0], #16
|
||||
subs w4, w4, #1
|
||||
@ -522,7 +383,8 @@ AES_ENTRY(aes_xts_decrypt)
|
||||
next_tweak v4, v4, v7, v8
|
||||
b .Lxtsdecloop
|
||||
.Lxtsdecout:
|
||||
FRAME_POP
|
||||
st1 {v4.16b}, [x6]
|
||||
ldp x29, x30, [sp], #16
|
||||
ret
|
||||
AES_ENDPROC(aes_xts_decrypt)
|
||||
|
||||
@ -533,8 +395,28 @@ AES_ENDPROC(aes_xts_decrypt)
|
||||
AES_ENTRY(aes_mac_update)
|
||||
ld1 {v0.16b}, [x4] /* get dg */
|
||||
enc_prepare w2, x1, x7
|
||||
cbnz w5, .Lmacenc
|
||||
cbz w5, .Lmacloop4x
|
||||
|
||||
encrypt_block v0, w2, x1, x7, w8
|
||||
|
||||
.Lmacloop4x:
|
||||
subs w3, w3, #4
|
||||
bmi .Lmac1x
|
||||
ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
|
||||
eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
|
||||
encrypt_block v0, w2, x1, x7, w8
|
||||
eor v0.16b, v0.16b, v2.16b
|
||||
encrypt_block v0, w2, x1, x7, w8
|
||||
eor v0.16b, v0.16b, v3.16b
|
||||
encrypt_block v0, w2, x1, x7, w8
|
||||
eor v0.16b, v0.16b, v4.16b
|
||||
cmp w3, wzr
|
||||
csinv x5, x6, xzr, eq
|
||||
cbz w5, .Lmacout
|
||||
encrypt_block v0, w2, x1, x7, w8
|
||||
b .Lmacloop4x
|
||||
.Lmac1x:
|
||||
add w3, w3, #4
|
||||
.Lmacloop:
|
||||
cbz w3, .Lmacout
|
||||
ld1 {v1.16b}, [x0], #16 /* get next pt block */
|
||||
@ -544,7 +426,6 @@ AES_ENTRY(aes_mac_update)
|
||||
csinv x5, x6, xzr, eq
|
||||
cbz w5, .Lmacout
|
||||
|
||||
.Lmacenc:
|
||||
encrypt_block v0, w2, x1, x7, w8
|
||||
b .Lmacloop
|
||||
|
||||
|
@ -46,10 +46,9 @@ asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
|
||||
|
||||
/* borrowed from aes-neon-blk.ko */
|
||||
asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
||||
int rounds, int blocks, int first);
|
||||
int rounds, int blocks);
|
||||
asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
|
||||
int rounds, int blocks, u8 iv[],
|
||||
int first);
|
||||
int rounds, int blocks, u8 iv[]);
|
||||
|
||||
struct aesbs_ctx {
|
||||
u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32];
|
||||
@ -100,9 +99,8 @@ static int __ecb_crypt(struct skcipher_request *req,
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
|
||||
@ -110,12 +108,13 @@ static int __ecb_crypt(struct skcipher_request *req,
|
||||
blocks = round_down(blocks,
|
||||
walk.stride / AES_BLOCK_SIZE);
|
||||
|
||||
kernel_neon_begin();
|
||||
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
|
||||
ctx->rounds, blocks);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -157,22 +156,21 @@ static int cbc_encrypt(struct skcipher_request *req)
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
int err, first = 1;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
|
||||
/* fall back to the non-bitsliced NEON implementation */
|
||||
kernel_neon_begin();
|
||||
neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->enc, ctx->key.rounds, blocks, walk.iv,
|
||||
first);
|
||||
ctx->enc, ctx->key.rounds, blocks,
|
||||
walk.iv);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
first = 0;
|
||||
}
|
||||
kernel_neon_end();
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -183,9 +181,8 @@ static int cbc_decrypt(struct skcipher_request *req)
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
|
||||
@ -193,13 +190,14 @@ static int cbc_decrypt(struct skcipher_request *req)
|
||||
blocks = round_down(blocks,
|
||||
walk.stride / AES_BLOCK_SIZE);
|
||||
|
||||
kernel_neon_begin();
|
||||
aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->key.rk, ctx->key.rounds, blocks,
|
||||
walk.iv);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -231,9 +229,8 @@ static int ctr_encrypt(struct skcipher_request *req)
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
|
||||
@ -244,8 +241,10 @@ static int ctr_encrypt(struct skcipher_request *req)
|
||||
final = NULL;
|
||||
}
|
||||
|
||||
kernel_neon_begin();
|
||||
aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
ctx->rk, ctx->rounds, blocks, walk.iv, final);
|
||||
kernel_neon_end();
|
||||
|
||||
if (final) {
|
||||
u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
|
||||
@ -260,8 +259,6 @@ static int ctr_encrypt(struct skcipher_request *req)
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -306,12 +303,11 @@ static int __xts_crypt(struct skcipher_request *req,
|
||||
struct skcipher_walk walk;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
kernel_neon_begin();
|
||||
|
||||
neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey,
|
||||
ctx->key.rounds, 1, 1);
|
||||
neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
|
||||
kernel_neon_end();
|
||||
|
||||
while (walk.nbytes >= AES_BLOCK_SIZE) {
|
||||
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
|
||||
@ -320,13 +316,13 @@ static int __xts_crypt(struct skcipher_request *req,
|
||||
blocks = round_down(blocks,
|
||||
walk.stride / AES_BLOCK_SIZE);
|
||||
|
||||
kernel_neon_begin();
|
||||
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
|
||||
ctx->key.rounds, blocks, walk.iv);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk,
|
||||
walk.nbytes - blocks * AES_BLOCK_SIZE);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -37,12 +37,19 @@ static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src,
|
||||
u8 buf[CHACHA20_BLOCK_SIZE];
|
||||
|
||||
while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
|
||||
kernel_neon_begin();
|
||||
chacha20_4block_xor_neon(state, dst, src);
|
||||
kernel_neon_end();
|
||||
bytes -= CHACHA20_BLOCK_SIZE * 4;
|
||||
src += CHACHA20_BLOCK_SIZE * 4;
|
||||
dst += CHACHA20_BLOCK_SIZE * 4;
|
||||
state[12] += 4;
|
||||
}
|
||||
|
||||
if (!bytes)
|
||||
return;
|
||||
|
||||
kernel_neon_begin();
|
||||
while (bytes >= CHACHA20_BLOCK_SIZE) {
|
||||
chacha20_block_xor_neon(state, dst, src);
|
||||
bytes -= CHACHA20_BLOCK_SIZE;
|
||||
@ -55,6 +62,7 @@ static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src,
|
||||
chacha20_block_xor_neon(state, buf, buf);
|
||||
memcpy(dst, buf, bytes);
|
||||
}
|
||||
kernel_neon_end();
|
||||
}
|
||||
|
||||
static int chacha20_neon(struct skcipher_request *req)
|
||||
@ -68,11 +76,10 @@ static int chacha20_neon(struct skcipher_request *req)
|
||||
if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE)
|
||||
return crypto_chacha20_crypt(req);
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
crypto_chacha20_init(state, ctx, walk.iv);
|
||||
|
||||
kernel_neon_begin();
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
|
||||
@ -83,7 +90,6 @@ static int chacha20_neon(struct skcipher_request *req)
|
||||
nbytes);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||
}
|
||||
kernel_neon_end();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -89,21 +89,32 @@ static struct shash_alg algs[] = { {
|
||||
static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
/*
|
||||
* Stacking and unstacking a substantial slice of the NEON register
|
||||
* file may significantly affect performance for small updates when
|
||||
* executing in interrupt context, so fall back to the scalar code
|
||||
* in that case.
|
||||
*/
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
if (!may_use_simd())
|
||||
return sha256_base_do_update(desc, data, len,
|
||||
(sha256_block_fn *)sha256_block_data_order);
|
||||
|
||||
kernel_neon_begin();
|
||||
sha256_base_do_update(desc, data, len,
|
||||
(sha256_block_fn *)sha256_block_neon);
|
||||
kernel_neon_end();
|
||||
while (len > 0) {
|
||||
unsigned int chunk = len;
|
||||
|
||||
/*
|
||||
* Don't hog the CPU for the entire time it takes to process all
|
||||
* input when running on a preemptible kernel, but process the
|
||||
* data block by block instead.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT) &&
|
||||
chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
|
||||
chunk = SHA256_BLOCK_SIZE -
|
||||
sctx->count % SHA256_BLOCK_SIZE;
|
||||
|
||||
kernel_neon_begin();
|
||||
sha256_base_do_update(desc, data, chunk,
|
||||
(sha256_block_fn *)sha256_block_neon);
|
||||
kernel_neon_end();
|
||||
data += chunk;
|
||||
len -= chunk;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -117,10 +128,9 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
|
||||
sha256_base_do_finalize(desc,
|
||||
(sha256_block_fn *)sha256_block_data_order);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
if (len)
|
||||
sha256_base_do_update(desc, data, len,
|
||||
(sha256_block_fn *)sha256_block_neon);
|
||||
sha256_update_neon(desc, data, len);
|
||||
kernel_neon_begin();
|
||||
sha256_base_do_finalize(desc,
|
||||
(sha256_block_fn *)sha256_block_neon);
|
||||
kernel_neon_end();
|
||||
|
352
arch/arm64/crypto/speck-neon-core.S
Normal file
352
arch/arm64/crypto/speck-neon-core.S
Normal file
@ -0,0 +1,352 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
|
||||
*
|
||||
* Copyright (c) 2018 Google, Inc
|
||||
*
|
||||
* Author: Eric Biggers <ebiggers@google.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
|
||||
// arguments
|
||||
ROUND_KEYS .req x0 // const {u64,u32} *round_keys
|
||||
NROUNDS .req w1 // int nrounds
|
||||
NROUNDS_X .req x1
|
||||
DST .req x2 // void *dst
|
||||
SRC .req x3 // const void *src
|
||||
NBYTES .req w4 // unsigned int nbytes
|
||||
TWEAK .req x5 // void *tweak
|
||||
|
||||
// registers which hold the data being encrypted/decrypted
|
||||
// (underscores avoid a naming collision with ARM64 registers x0-x3)
|
||||
X_0 .req v0
|
||||
Y_0 .req v1
|
||||
X_1 .req v2
|
||||
Y_1 .req v3
|
||||
X_2 .req v4
|
||||
Y_2 .req v5
|
||||
X_3 .req v6
|
||||
Y_3 .req v7
|
||||
|
||||
// the round key, duplicated in all lanes
|
||||
ROUND_KEY .req v8
|
||||
|
||||
// index vector for tbl-based 8-bit rotates
|
||||
ROTATE_TABLE .req v9
|
||||
ROTATE_TABLE_Q .req q9
|
||||
|
||||
// temporary registers
|
||||
TMP0 .req v10
|
||||
TMP1 .req v11
|
||||
TMP2 .req v12
|
||||
TMP3 .req v13
|
||||
|
||||
// multiplication table for updating XTS tweaks
|
||||
GFMUL_TABLE .req v14
|
||||
GFMUL_TABLE_Q .req q14
|
||||
|
||||
// next XTS tweak value(s)
|
||||
TWEAKV_NEXT .req v15
|
||||
|
||||
// XTS tweaks for the blocks currently being encrypted/decrypted
|
||||
TWEAKV0 .req v16
|
||||
TWEAKV1 .req v17
|
||||
TWEAKV2 .req v18
|
||||
TWEAKV3 .req v19
|
||||
TWEAKV4 .req v20
|
||||
TWEAKV5 .req v21
|
||||
TWEAKV6 .req v22
|
||||
TWEAKV7 .req v23
|
||||
|
||||
.align 4
|
||||
.Lror64_8_table:
|
||||
.octa 0x080f0e0d0c0b0a090007060504030201
|
||||
.Lror32_8_table:
|
||||
.octa 0x0c0f0e0d080b0a090407060500030201
|
||||
.Lrol64_8_table:
|
||||
.octa 0x0e0d0c0b0a09080f0605040302010007
|
||||
.Lrol32_8_table:
|
||||
.octa 0x0e0d0c0f0a09080b0605040702010003
|
||||
.Lgf128mul_table:
|
||||
.octa 0x00000000000000870000000000000001
|
||||
.Lgf64mul_table:
|
||||
.octa 0x0000000000000000000000002d361b00
|
||||
|
||||
/*
|
||||
* _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
|
||||
*
|
||||
* Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
|
||||
* Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
|
||||
* of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
|
||||
* 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
|
||||
*/
|
||||
.macro _speck_round_128bytes n, lanes
|
||||
|
||||
// x = ror(x, 8)
|
||||
tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
|
||||
tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
|
||||
tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
|
||||
tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
|
||||
|
||||
// x += y
|
||||
add X_0.\lanes, X_0.\lanes, Y_0.\lanes
|
||||
add X_1.\lanes, X_1.\lanes, Y_1.\lanes
|
||||
add X_2.\lanes, X_2.\lanes, Y_2.\lanes
|
||||
add X_3.\lanes, X_3.\lanes, Y_3.\lanes
|
||||
|
||||
// x ^= k
|
||||
eor X_0.16b, X_0.16b, ROUND_KEY.16b
|
||||
eor X_1.16b, X_1.16b, ROUND_KEY.16b
|
||||
eor X_2.16b, X_2.16b, ROUND_KEY.16b
|
||||
eor X_3.16b, X_3.16b, ROUND_KEY.16b
|
||||
|
||||
// y = rol(y, 3)
|
||||
shl TMP0.\lanes, Y_0.\lanes, #3
|
||||
shl TMP1.\lanes, Y_1.\lanes, #3
|
||||
shl TMP2.\lanes, Y_2.\lanes, #3
|
||||
shl TMP3.\lanes, Y_3.\lanes, #3
|
||||
sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
|
||||
sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
|
||||
sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
|
||||
sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
|
||||
|
||||
// y ^= x
|
||||
eor Y_0.16b, TMP0.16b, X_0.16b
|
||||
eor Y_1.16b, TMP1.16b, X_1.16b
|
||||
eor Y_2.16b, TMP2.16b, X_2.16b
|
||||
eor Y_3.16b, TMP3.16b, X_3.16b
|
||||
.endm
|
||||
|
||||
/*
|
||||
* _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
|
||||
*
|
||||
* This is the inverse of _speck_round_128bytes().
|
||||
*/
|
||||
.macro _speck_unround_128bytes n, lanes
|
||||
|
||||
// y ^= x
|
||||
eor TMP0.16b, Y_0.16b, X_0.16b
|
||||
eor TMP1.16b, Y_1.16b, X_1.16b
|
||||
eor TMP2.16b, Y_2.16b, X_2.16b
|
||||
eor TMP3.16b, Y_3.16b, X_3.16b
|
||||
|
||||
// y = ror(y, 3)
|
||||
ushr Y_0.\lanes, TMP0.\lanes, #3
|
||||
ushr Y_1.\lanes, TMP1.\lanes, #3
|
||||
ushr Y_2.\lanes, TMP2.\lanes, #3
|
||||
ushr Y_3.\lanes, TMP3.\lanes, #3
|
||||
sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
|
||||
sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
|
||||
sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
|
||||
sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
|
||||
|
||||
// x ^= k
|
||||
eor X_0.16b, X_0.16b, ROUND_KEY.16b
|
||||
eor X_1.16b, X_1.16b, ROUND_KEY.16b
|
||||
eor X_2.16b, X_2.16b, ROUND_KEY.16b
|
||||
eor X_3.16b, X_3.16b, ROUND_KEY.16b
|
||||
|
||||
// x -= y
|
||||
sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
|
||||
sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
|
||||
sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
|
||||
sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
|
||||
|
||||
// x = rol(x, 8)
|
||||
tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
|
||||
tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
|
||||
tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
|
||||
tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
|
||||
.endm
|
||||
|
||||
.macro _next_xts_tweak next, cur, tmp, n
|
||||
.if \n == 64
|
||||
/*
|
||||
* Calculate the next tweak by multiplying the current one by x,
|
||||
* modulo p(x) = x^128 + x^7 + x^2 + x + 1.
|
||||
*/
|
||||
sshr \tmp\().2d, \cur\().2d, #63
|
||||
and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
|
||||
shl \next\().2d, \cur\().2d, #1
|
||||
ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
|
||||
eor \next\().16b, \next\().16b, \tmp\().16b
|
||||
.else
|
||||
/*
|
||||
* Calculate the next two tweaks by multiplying the current ones by x^2,
|
||||
* modulo p(x) = x^64 + x^4 + x^3 + x + 1.
|
||||
*/
|
||||
ushr \tmp\().2d, \cur\().2d, #62
|
||||
shl \next\().2d, \cur\().2d, #2
|
||||
tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
|
||||
eor \next\().16b, \next\().16b, \tmp\().16b
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* _speck_xts_crypt() - Speck-XTS encryption/decryption
|
||||
*
|
||||
* Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
|
||||
* using Speck-XTS, specifically the variant with a block size of '2n' and round
|
||||
* count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
|
||||
* the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
|
||||
* nonzero multiple of 128.
|
||||
*/
|
||||
.macro _speck_xts_crypt n, lanes, decrypting
|
||||
|
||||
/*
|
||||
* If decrypting, modify the ROUND_KEYS parameter to point to the last
|
||||
* round key rather than the first, since for decryption the round keys
|
||||
* are used in reverse order.
|
||||
*/
|
||||
.if \decrypting
|
||||
mov NROUNDS, NROUNDS /* zero the high 32 bits */
|
||||
.if \n == 64
|
||||
add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
|
||||
sub ROUND_KEYS, ROUND_KEYS, #8
|
||||
.else
|
||||
add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
|
||||
sub ROUND_KEYS, ROUND_KEYS, #4
|
||||
.endif
|
||||
.endif
|
||||
|
||||
// Load the index vector for tbl-based 8-bit rotates
|
||||
.if \decrypting
|
||||
ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
|
||||
.else
|
||||
ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
|
||||
.endif
|
||||
|
||||
// One-time XTS preparation
|
||||
.if \n == 64
|
||||
// Load first tweak
|
||||
ld1 {TWEAKV0.16b}, [TWEAK]
|
||||
|
||||
// Load GF(2^128) multiplication table
|
||||
ldr GFMUL_TABLE_Q, .Lgf128mul_table
|
||||
.else
|
||||
// Load first tweak
|
||||
ld1 {TWEAKV0.8b}, [TWEAK]
|
||||
|
||||
// Load GF(2^64) multiplication table
|
||||
ldr GFMUL_TABLE_Q, .Lgf64mul_table
|
||||
|
||||
// Calculate second tweak, packing it together with the first
|
||||
ushr TMP0.2d, TWEAKV0.2d, #63
|
||||
shl TMP1.2d, TWEAKV0.2d, #1
|
||||
tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
|
||||
eor TMP0.8b, TMP0.8b, TMP1.8b
|
||||
mov TWEAKV0.d[1], TMP0.d[0]
|
||||
.endif
|
||||
|
||||
.Lnext_128bytes_\@:
|
||||
|
||||
// Calculate XTS tweaks for next 128 bytes
|
||||
_next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
|
||||
_next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
|
||||
_next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
|
||||
_next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
|
||||
_next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
|
||||
_next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
|
||||
_next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
|
||||
_next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
|
||||
|
||||
// Load the next source blocks into {X,Y}[0-3]
|
||||
ld1 {X_0.16b-Y_1.16b}, [SRC], #64
|
||||
ld1 {X_2.16b-Y_3.16b}, [SRC], #64
|
||||
|
||||
// XOR the source blocks with their XTS tweaks
|
||||
eor TMP0.16b, X_0.16b, TWEAKV0.16b
|
||||
eor Y_0.16b, Y_0.16b, TWEAKV1.16b
|
||||
eor TMP1.16b, X_1.16b, TWEAKV2.16b
|
||||
eor Y_1.16b, Y_1.16b, TWEAKV3.16b
|
||||
eor TMP2.16b, X_2.16b, TWEAKV4.16b
|
||||
eor Y_2.16b, Y_2.16b, TWEAKV5.16b
|
||||
eor TMP3.16b, X_3.16b, TWEAKV6.16b
|
||||
eor Y_3.16b, Y_3.16b, TWEAKV7.16b
|
||||
|
||||
/*
|
||||
* De-interleave the 'x' and 'y' elements of each block, i.e. make it so
|
||||
* that the X[0-3] registers contain only the second halves of blocks,
|
||||
* and the Y[0-3] registers contain only the first halves of blocks.
|
||||
* (Speck uses the order (y, x) rather than the more intuitive (x, y).)
|
||||
*/
|
||||
uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
|
||||
uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
|
||||
uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
|
||||
uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
|
||||
uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
|
||||
uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
|
||||
uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
|
||||
uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
|
||||
|
||||
// Do the cipher rounds
|
||||
mov x6, ROUND_KEYS
|
||||
mov w7, NROUNDS
|
||||
.Lnext_round_\@:
|
||||
.if \decrypting
|
||||
ld1r {ROUND_KEY.\lanes}, [x6]
|
||||
sub x6, x6, #( \n / 8 )
|
||||
_speck_unround_128bytes \n, \lanes
|
||||
.else
|
||||
ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
|
||||
_speck_round_128bytes \n, \lanes
|
||||
.endif
|
||||
subs w7, w7, #1
|
||||
bne .Lnext_round_\@
|
||||
|
||||
// Re-interleave the 'x' and 'y' elements of each block
|
||||
zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
|
||||
zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
|
||||
zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
|
||||
zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
|
||||
zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
|
||||
zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
|
||||
zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
|
||||
zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
|
||||
|
||||
// XOR the encrypted/decrypted blocks with the tweaks calculated earlier
|
||||
eor X_0.16b, TMP0.16b, TWEAKV0.16b
|
||||
eor Y_0.16b, Y_0.16b, TWEAKV1.16b
|
||||
eor X_1.16b, TMP1.16b, TWEAKV2.16b
|
||||
eor Y_1.16b, Y_1.16b, TWEAKV3.16b
|
||||
eor X_2.16b, TMP2.16b, TWEAKV4.16b
|
||||
eor Y_2.16b, Y_2.16b, TWEAKV5.16b
|
||||
eor X_3.16b, TMP3.16b, TWEAKV6.16b
|
||||
eor Y_3.16b, Y_3.16b, TWEAKV7.16b
|
||||
mov TWEAKV0.16b, TWEAKV_NEXT.16b
|
||||
|
||||
// Store the ciphertext in the destination buffer
|
||||
st1 {X_0.16b-Y_1.16b}, [DST], #64
|
||||
st1 {X_2.16b-Y_3.16b}, [DST], #64
|
||||
|
||||
// Continue if there are more 128-byte chunks remaining
|
||||
subs NBYTES, NBYTES, #128
|
||||
bne .Lnext_128bytes_\@
|
||||
|
||||
// Store the next tweak and return
|
||||
.if \n == 64
|
||||
st1 {TWEAKV_NEXT.16b}, [TWEAK]
|
||||
.else
|
||||
st1 {TWEAKV_NEXT.8b}, [TWEAK]
|
||||
.endif
|
||||
ret
|
||||
.endm
|
||||
|
||||
ENTRY(speck128_xts_encrypt_neon)
|
||||
_speck_xts_crypt n=64, lanes=2d, decrypting=0
|
||||
ENDPROC(speck128_xts_encrypt_neon)
|
||||
|
||||
ENTRY(speck128_xts_decrypt_neon)
|
||||
_speck_xts_crypt n=64, lanes=2d, decrypting=1
|
||||
ENDPROC(speck128_xts_decrypt_neon)
|
||||
|
||||
ENTRY(speck64_xts_encrypt_neon)
|
||||
_speck_xts_crypt n=32, lanes=4s, decrypting=0
|
||||
ENDPROC(speck64_xts_encrypt_neon)
|
||||
|
||||
ENTRY(speck64_xts_decrypt_neon)
|
||||
_speck_xts_crypt n=32, lanes=4s, decrypting=1
|
||||
ENDPROC(speck64_xts_decrypt_neon)
|
282
arch/arm64/crypto/speck-neon-glue.c
Normal file
282
arch/arm64/crypto/speck-neon-glue.c
Normal file
@ -0,0 +1,282 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
|
||||
* (64-bit version; based on the 32-bit version)
|
||||
*
|
||||
* Copyright (c) 2018 Google, Inc
|
||||
*/
|
||||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/neon.h>
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/speck.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* The assembly functions only handle multiples of 128 bytes */
|
||||
#define SPECK_NEON_CHUNK_SIZE 128
|
||||
|
||||
/* Speck128 */
|
||||
|
||||
struct speck128_xts_tfm_ctx {
|
||||
struct speck128_tfm_ctx main_key;
|
||||
struct speck128_tfm_ctx tweak_key;
|
||||
};
|
||||
|
||||
asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
|
||||
void *dst, const void *src,
|
||||
unsigned int nbytes, void *tweak);
|
||||
|
||||
asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
|
||||
void *dst, const void *src,
|
||||
unsigned int nbytes, void *tweak);
|
||||
|
||||
typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
|
||||
u8 *, const u8 *);
|
||||
typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
|
||||
const void *, unsigned int, void *);
|
||||
|
||||
static __always_inline int
|
||||
__speck128_xts_crypt(struct skcipher_request *req,
|
||||
speck128_crypt_one_t crypt_one,
|
||||
speck128_xts_crypt_many_t crypt_many)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
le128 tweak;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
|
||||
if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
|
||||
unsigned int count;
|
||||
|
||||
count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
|
||||
kernel_neon_begin();
|
||||
(*crypt_many)(ctx->main_key.round_keys,
|
||||
ctx->main_key.nrounds,
|
||||
dst, src, count, &tweak);
|
||||
kernel_neon_end();
|
||||
dst += count;
|
||||
src += count;
|
||||
nbytes -= count;
|
||||
}
|
||||
|
||||
/* Handle any remainder with generic code */
|
||||
while (nbytes >= sizeof(tweak)) {
|
||||
le128_xor((le128 *)dst, (const le128 *)src, &tweak);
|
||||
(*crypt_one)(&ctx->main_key, dst, dst);
|
||||
le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
|
||||
gf128mul_x_ble(&tweak, &tweak);
|
||||
|
||||
dst += sizeof(tweak);
|
||||
src += sizeof(tweak);
|
||||
nbytes -= sizeof(tweak);
|
||||
}
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int speck128_xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __speck128_xts_crypt(req, crypto_speck128_encrypt,
|
||||
speck128_xts_encrypt_neon);
|
||||
}
|
||||
|
||||
static int speck128_xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __speck128_xts_crypt(req, crypto_speck128_decrypt,
|
||||
speck128_xts_decrypt_neon);
|
||||
}
|
||||
|
||||
static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
keylen /= 2;
|
||||
|
||||
err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
|
||||
}
|
||||
|
||||
/* Speck64 */
|
||||
|
||||
struct speck64_xts_tfm_ctx {
|
||||
struct speck64_tfm_ctx main_key;
|
||||
struct speck64_tfm_ctx tweak_key;
|
||||
};
|
||||
|
||||
asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
|
||||
void *dst, const void *src,
|
||||
unsigned int nbytes, void *tweak);
|
||||
|
||||
asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
|
||||
void *dst, const void *src,
|
||||
unsigned int nbytes, void *tweak);
|
||||
|
||||
typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
|
||||
u8 *, const u8 *);
|
||||
typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
|
||||
const void *, unsigned int, void *);
|
||||
|
||||
static __always_inline int
|
||||
__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
|
||||
speck64_xts_crypt_many_t crypt_many)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
__le64 tweak;
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, true);
|
||||
|
||||
crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
|
||||
|
||||
while (walk.nbytes > 0) {
|
||||
unsigned int nbytes = walk.nbytes;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
|
||||
if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
|
||||
unsigned int count;
|
||||
|
||||
count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
|
||||
kernel_neon_begin();
|
||||
(*crypt_many)(ctx->main_key.round_keys,
|
||||
ctx->main_key.nrounds,
|
||||
dst, src, count, &tweak);
|
||||
kernel_neon_end();
|
||||
dst += count;
|
||||
src += count;
|
||||
nbytes -= count;
|
||||
}
|
||||
|
||||
/* Handle any remainder with generic code */
|
||||
while (nbytes >= sizeof(tweak)) {
|
||||
*(__le64 *)dst = *(__le64 *)src ^ tweak;
|
||||
(*crypt_one)(&ctx->main_key, dst, dst);
|
||||
*(__le64 *)dst ^= tweak;
|
||||
tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
|
||||
((tweak & cpu_to_le64(1ULL << 63)) ?
|
||||
0x1B : 0));
|
||||
dst += sizeof(tweak);
|
||||
src += sizeof(tweak);
|
||||
nbytes -= sizeof(tweak);
|
||||
}
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int speck64_xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __speck64_xts_crypt(req, crypto_speck64_encrypt,
|
||||
speck64_xts_encrypt_neon);
|
||||
}
|
||||
|
||||
static int speck64_xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __speck64_xts_crypt(req, crypto_speck64_decrypt,
|
||||
speck64_xts_decrypt_neon);
|
||||
}
|
||||
|
||||
static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
keylen /= 2;
|
||||
|
||||
err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
|
||||
}
|
||||
|
||||
static struct skcipher_alg speck_algs[] = {
|
||||
{
|
||||
.base.cra_name = "xts(speck128)",
|
||||
.base.cra_driver_name = "xts-speck128-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = SPECK128_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * SPECK128_128_KEY_SIZE,
|
||||
.max_keysize = 2 * SPECK128_256_KEY_SIZE,
|
||||
.ivsize = SPECK128_BLOCK_SIZE,
|
||||
.walksize = SPECK_NEON_CHUNK_SIZE,
|
||||
.setkey = speck128_xts_setkey,
|
||||
.encrypt = speck128_xts_encrypt,
|
||||
.decrypt = speck128_xts_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "xts(speck64)",
|
||||
.base.cra_driver_name = "xts-speck64-neon",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = SPECK64_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
|
||||
.base.cra_alignmask = 7,
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * SPECK64_96_KEY_SIZE,
|
||||
.max_keysize = 2 * SPECK64_128_KEY_SIZE,
|
||||
.ivsize = SPECK64_BLOCK_SIZE,
|
||||
.walksize = SPECK_NEON_CHUNK_SIZE,
|
||||
.setkey = speck64_xts_setkey,
|
||||
.encrypt = speck64_xts_encrypt,
|
||||
.decrypt = speck64_xts_decrypt,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init speck_neon_module_init(void)
|
||||
{
|
||||
if (!(elf_hwcap & HWCAP_ASIMD))
|
||||
return -ENODEV;
|
||||
return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
|
||||
}
|
||||
|
||||
static void __exit speck_neon_module_exit(void)
|
||||
{
|
||||
crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
|
||||
}
|
||||
|
||||
module_init(speck_neon_module_init);
|
||||
module_exit(speck_neon_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("xts(speck128)");
|
||||
MODULE_ALIAS_CRYPTO("xts-speck128-neon");
|
||||
MODULE_ALIAS_CRYPTO("xts(speck64)");
|
||||
MODULE_ALIAS_CRYPTO("xts-speck64-neon");
|
File diff suppressed because it is too large
Load Diff
@ -72,6 +72,21 @@ struct aesni_xts_ctx {
|
||||
u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
|
||||
};
|
||||
|
||||
#define GCM_BLOCK_LEN 16
|
||||
|
||||
struct gcm_context_data {
|
||||
/* init, update and finalize context data */
|
||||
u8 aad_hash[GCM_BLOCK_LEN];
|
||||
u64 aad_length;
|
||||
u64 in_length;
|
||||
u8 partial_block_enc_key[GCM_BLOCK_LEN];
|
||||
u8 orig_IV[GCM_BLOCK_LEN];
|
||||
u8 current_counter[GCM_BLOCK_LEN];
|
||||
u64 partial_block_len;
|
||||
u64 unused;
|
||||
u8 hash_keys[GCM_BLOCK_LEN * 8];
|
||||
};
|
||||
|
||||
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
||||
unsigned int key_len);
|
||||
asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
@ -105,6 +120,7 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
|
||||
/* asmlinkage void aesni_gcm_enc()
|
||||
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
|
||||
* struct gcm_context_data. May be uninitialized.
|
||||
* u8 *out, Ciphertext output. Encrypt in-place is allowed.
|
||||
* const u8 *in, Plaintext input
|
||||
* unsigned long plaintext_len, Length of data in bytes for encryption.
|
||||
@ -117,13 +133,15 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
* unsigned long auth_tag_len), Authenticated Tag Length in bytes.
|
||||
* Valid values are 16 (most likely), 12 or 8.
|
||||
*/
|
||||
asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
|
||||
asmlinkage void aesni_gcm_enc(void *ctx,
|
||||
struct gcm_context_data *gdata, u8 *out,
|
||||
const u8 *in, unsigned long plaintext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
|
||||
/* asmlinkage void aesni_gcm_dec()
|
||||
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
|
||||
* struct gcm_context_data. May be uninitialized.
|
||||
* u8 *out, Plaintext output. Decrypt in-place is allowed.
|
||||
* const u8 *in, Ciphertext input
|
||||
* unsigned long ciphertext_len, Length of data in bytes for decryption.
|
||||
@ -137,11 +155,28 @@ asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
|
||||
* unsigned long auth_tag_len) Authenticated Tag Length in bytes.
|
||||
* Valid values are 16 (most likely), 12 or 8.
|
||||
*/
|
||||
asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
|
||||
asmlinkage void aesni_gcm_dec(void *ctx,
|
||||
struct gcm_context_data *gdata, u8 *out,
|
||||
const u8 *in, unsigned long ciphertext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
|
||||
/* Scatter / Gather routines, with args similar to above */
|
||||
asmlinkage void aesni_gcm_init(void *ctx,
|
||||
struct gcm_context_data *gdata,
|
||||
u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad,
|
||||
unsigned long aad_len);
|
||||
asmlinkage void aesni_gcm_enc_update(void *ctx,
|
||||
struct gcm_context_data *gdata, u8 *out,
|
||||
const u8 *in, unsigned long plaintext_len);
|
||||
asmlinkage void aesni_gcm_dec_update(void *ctx,
|
||||
struct gcm_context_data *gdata, u8 *out,
|
||||
const u8 *in,
|
||||
unsigned long ciphertext_len);
|
||||
asmlinkage void aesni_gcm_finalize(void *ctx,
|
||||
struct gcm_context_data *gdata,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
|
||||
#ifdef CONFIG_AS_AVX
|
||||
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
|
||||
@ -167,15 +202,17 @@ asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
|
||||
const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
|
||||
static void aesni_gcm_enc_avx(void *ctx, u8 *out,
|
||||
static void aesni_gcm_enc_avx(void *ctx,
|
||||
struct gcm_context_data *data, u8 *out,
|
||||
const u8 *in, unsigned long plaintext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len)
|
||||
{
|
||||
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
|
||||
if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
|
||||
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
|
||||
aad_len, auth_tag, auth_tag_len);
|
||||
aesni_gcm_enc(ctx, data, out, in,
|
||||
plaintext_len, iv, hash_subkey, aad,
|
||||
aad_len, auth_tag, auth_tag_len);
|
||||
} else {
|
||||
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
|
||||
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
|
||||
@ -183,15 +220,17 @@ static void aesni_gcm_enc_avx(void *ctx, u8 *out,
|
||||
}
|
||||
}
|
||||
|
||||
static void aesni_gcm_dec_avx(void *ctx, u8 *out,
|
||||
static void aesni_gcm_dec_avx(void *ctx,
|
||||
struct gcm_context_data *data, u8 *out,
|
||||
const u8 *in, unsigned long ciphertext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len)
|
||||
{
|
||||
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
|
||||
if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
|
||||
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
|
||||
aad_len, auth_tag, auth_tag_len);
|
||||
aesni_gcm_dec(ctx, data, out, in,
|
||||
ciphertext_len, iv, hash_subkey, aad,
|
||||
aad_len, auth_tag, auth_tag_len);
|
||||
} else {
|
||||
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
|
||||
aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
|
||||
@ -218,15 +257,17 @@ asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
|
||||
const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
|
||||
static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
|
||||
static void aesni_gcm_enc_avx2(void *ctx,
|
||||
struct gcm_context_data *data, u8 *out,
|
||||
const u8 *in, unsigned long plaintext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len)
|
||||
{
|
||||
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
|
||||
if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
|
||||
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
|
||||
aad_len, auth_tag, auth_tag_len);
|
||||
aesni_gcm_enc(ctx, data, out, in,
|
||||
plaintext_len, iv, hash_subkey, aad,
|
||||
aad_len, auth_tag, auth_tag_len);
|
||||
} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
|
||||
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
|
||||
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
|
||||
@ -238,15 +279,17 @@ static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
|
||||
}
|
||||
}
|
||||
|
||||
static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
|
||||
static void aesni_gcm_dec_avx2(void *ctx,
|
||||
struct gcm_context_data *data, u8 *out,
|
||||
const u8 *in, unsigned long ciphertext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len)
|
||||
{
|
||||
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
|
||||
if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
|
||||
aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
|
||||
aad, aad_len, auth_tag, auth_tag_len);
|
||||
aesni_gcm_dec(ctx, data, out, in,
|
||||
ciphertext_len, iv, hash_subkey,
|
||||
aad, aad_len, auth_tag, auth_tag_len);
|
||||
} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
|
||||
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
|
||||
aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
|
||||
@ -259,15 +302,19 @@ static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
|
||||
}
|
||||
#endif
|
||||
|
||||
static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
|
||||
const u8 *in, unsigned long plaintext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
static void (*aesni_gcm_enc_tfm)(void *ctx,
|
||||
struct gcm_context_data *data, u8 *out,
|
||||
const u8 *in, unsigned long plaintext_len,
|
||||
u8 *iv, u8 *hash_subkey, const u8 *aad,
|
||||
unsigned long aad_len, u8 *auth_tag,
|
||||
unsigned long auth_tag_len);
|
||||
|
||||
static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
|
||||
const u8 *in, unsigned long ciphertext_len, u8 *iv,
|
||||
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
|
||||
u8 *auth_tag, unsigned long auth_tag_len);
|
||||
static void (*aesni_gcm_dec_tfm)(void *ctx,
|
||||
struct gcm_context_data *data, u8 *out,
|
||||
const u8 *in, unsigned long ciphertext_len,
|
||||
u8 *iv, u8 *hash_subkey, const u8 *aad,
|
||||
unsigned long aad_len, u8 *auth_tag,
|
||||
unsigned long auth_tag_len);
|
||||
|
||||
static inline struct
|
||||
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
|
||||
@ -744,6 +791,127 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
|
||||
unsigned int assoclen, u8 *hash_subkey,
|
||||
u8 *iv, void *aes_ctx)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
||||
struct gcm_context_data data AESNI_ALIGN_ATTR;
|
||||
struct scatter_walk dst_sg_walk = {};
|
||||
unsigned long left = req->cryptlen;
|
||||
unsigned long len, srclen, dstlen;
|
||||
struct scatter_walk assoc_sg_walk;
|
||||
struct scatter_walk src_sg_walk;
|
||||
struct scatterlist src_start[2];
|
||||
struct scatterlist dst_start[2];
|
||||
struct scatterlist *src_sg;
|
||||
struct scatterlist *dst_sg;
|
||||
u8 *src, *dst, *assoc;
|
||||
u8 *assocmem = NULL;
|
||||
u8 authTag[16];
|
||||
|
||||
if (!enc)
|
||||
left -= auth_tag_len;
|
||||
|
||||
/* Linearize assoc, if not already linear */
|
||||
if (req->src->length >= assoclen && req->src->length &&
|
||||
(!PageHighMem(sg_page(req->src)) ||
|
||||
req->src->offset + req->src->length < PAGE_SIZE)) {
|
||||
scatterwalk_start(&assoc_sg_walk, req->src);
|
||||
assoc = scatterwalk_map(&assoc_sg_walk);
|
||||
} else {
|
||||
/* assoc can be any length, so must be on heap */
|
||||
assocmem = kmalloc(assoclen, GFP_ATOMIC);
|
||||
if (unlikely(!assocmem))
|
||||
return -ENOMEM;
|
||||
assoc = assocmem;
|
||||
|
||||
scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
|
||||
}
|
||||
|
||||
src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
|
||||
scatterwalk_start(&src_sg_walk, src_sg);
|
||||
if (req->src != req->dst) {
|
||||
dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
|
||||
scatterwalk_start(&dst_sg_walk, dst_sg);
|
||||
}
|
||||
|
||||
kernel_fpu_begin();
|
||||
aesni_gcm_init(aes_ctx, &data, iv,
|
||||
hash_subkey, assoc, assoclen);
|
||||
if (req->src != req->dst) {
|
||||
while (left) {
|
||||
src = scatterwalk_map(&src_sg_walk);
|
||||
dst = scatterwalk_map(&dst_sg_walk);
|
||||
srclen = scatterwalk_clamp(&src_sg_walk, left);
|
||||
dstlen = scatterwalk_clamp(&dst_sg_walk, left);
|
||||
len = min(srclen, dstlen);
|
||||
if (len) {
|
||||
if (enc)
|
||||
aesni_gcm_enc_update(aes_ctx, &data,
|
||||
dst, src, len);
|
||||
else
|
||||
aesni_gcm_dec_update(aes_ctx, &data,
|
||||
dst, src, len);
|
||||
}
|
||||
left -= len;
|
||||
|
||||
scatterwalk_unmap(src);
|
||||
scatterwalk_unmap(dst);
|
||||
scatterwalk_advance(&src_sg_walk, len);
|
||||
scatterwalk_advance(&dst_sg_walk, len);
|
||||
scatterwalk_done(&src_sg_walk, 0, left);
|
||||
scatterwalk_done(&dst_sg_walk, 1, left);
|
||||
}
|
||||
} else {
|
||||
while (left) {
|
||||
dst = src = scatterwalk_map(&src_sg_walk);
|
||||
len = scatterwalk_clamp(&src_sg_walk, left);
|
||||
if (len) {
|
||||
if (enc)
|
||||
aesni_gcm_enc_update(aes_ctx, &data,
|
||||
src, src, len);
|
||||
else
|
||||
aesni_gcm_dec_update(aes_ctx, &data,
|
||||
src, src, len);
|
||||
}
|
||||
left -= len;
|
||||
scatterwalk_unmap(src);
|
||||
scatterwalk_advance(&src_sg_walk, len);
|
||||
scatterwalk_done(&src_sg_walk, 1, left);
|
||||
}
|
||||
}
|
||||
aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
|
||||
kernel_fpu_end();
|
||||
|
||||
if (!assocmem)
|
||||
scatterwalk_unmap(assoc);
|
||||
else
|
||||
kfree(assocmem);
|
||||
|
||||
if (!enc) {
|
||||
u8 authTagMsg[16];
|
||||
|
||||
/* Copy out original authTag */
|
||||
scatterwalk_map_and_copy(authTagMsg, req->src,
|
||||
req->assoclen + req->cryptlen -
|
||||
auth_tag_len,
|
||||
auth_tag_len, 0);
|
||||
|
||||
/* Compare generated tag with passed in tag. */
|
||||
return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
|
||||
-EBADMSG : 0;
|
||||
}
|
||||
|
||||
/* Copy in the authTag */
|
||||
scatterwalk_map_and_copy(authTag, req->dst,
|
||||
req->assoclen + req->cryptlen,
|
||||
auth_tag_len, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
|
||||
u8 *hash_subkey, u8 *iv, void *aes_ctx)
|
||||
{
|
||||
@ -753,7 +921,14 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
|
||||
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
|
||||
struct scatter_walk src_sg_walk;
|
||||
struct scatter_walk dst_sg_walk = {};
|
||||
struct gcm_context_data data AESNI_ALIGN_ATTR;
|
||||
|
||||
if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
|
||||
aesni_gcm_enc_tfm == aesni_gcm_enc ||
|
||||
req->cryptlen < AVX_GEN2_OPTSIZE) {
|
||||
return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
|
||||
aes_ctx);
|
||||
}
|
||||
if (sg_is_last(req->src) &&
|
||||
(!PageHighMem(sg_page(req->src)) ||
|
||||
req->src->offset + req->src->length <= PAGE_SIZE) &&
|
||||
@ -782,7 +957,7 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
|
||||
}
|
||||
|
||||
kernel_fpu_begin();
|
||||
aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
|
||||
aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
|
||||
hash_subkey, assoc, assoclen,
|
||||
dst + req->cryptlen, auth_tag_len);
|
||||
kernel_fpu_end();
|
||||
@ -817,8 +992,15 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
|
||||
u8 authTag[16];
|
||||
struct scatter_walk src_sg_walk;
|
||||
struct scatter_walk dst_sg_walk = {};
|
||||
struct gcm_context_data data AESNI_ALIGN_ATTR;
|
||||
int retval = 0;
|
||||
|
||||
if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
|
||||
aesni_gcm_enc_tfm == aesni_gcm_enc ||
|
||||
req->cryptlen < AVX_GEN2_OPTSIZE) {
|
||||
return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
|
||||
aes_ctx);
|
||||
}
|
||||
tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
|
||||
|
||||
if (sg_is_last(req->src) &&
|
||||
@ -849,7 +1031,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
|
||||
|
||||
|
||||
kernel_fpu_begin();
|
||||
aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
|
||||
aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
|
||||
hash_subkey, assoc, assoclen,
|
||||
authTag, auth_tag_len);
|
||||
kernel_fpu_end();
|
||||
|
@ -25,13 +25,13 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/blowfish.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
|
||||
/* regular block cipher functions */
|
||||
asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src,
|
||||
@ -77,20 +77,28 @@ static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
|
||||
}
|
||||
|
||||
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
|
||||
static int blowfish_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return blowfish_setkey(&tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static int ecb_crypt(struct skcipher_request *req,
|
||||
void (*fn)(struct bf_ctx *, u8 *, const u8 *),
|
||||
void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
|
||||
{
|
||||
struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = BF_BLOCK_SIZE;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = blkcipher_walk_virt(desc, walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk->nbytes)) {
|
||||
u8 *wsrc = walk->src.virt.addr;
|
||||
u8 *wdst = walk->dst.virt.addr;
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
u8 *wsrc = walk.src.virt.addr;
|
||||
u8 *wdst = walk.dst.virt.addr;
|
||||
|
||||
/* Process four block batch */
|
||||
if (nbytes >= bsize * 4) {
|
||||
@ -116,34 +124,25 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
done:
|
||||
err = blkcipher_walk_done(desc, walk, nbytes);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way);
|
||||
return ecb_crypt(req, blowfish_enc_blk, blowfish_enc_blk_4way);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way);
|
||||
return ecb_crypt(req, blowfish_dec_blk, blowfish_dec_blk_4way);
|
||||
}
|
||||
|
||||
static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static unsigned int __cbc_encrypt(struct bf_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = BF_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
@ -164,27 +163,27 @@ static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
nbytes = __cbc_encrypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = __cbc_encrypt(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static unsigned int __cbc_decrypt(struct bf_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = BF_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
@ -245,24 +244,25 @@ done:
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
nbytes = __cbc_decrypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = __cbc_decrypt(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
|
||||
static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk)
|
||||
{
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 keystream[BF_BLOCK_SIZE];
|
||||
@ -276,10 +276,8 @@ static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
|
||||
crypto_inc(ctrblk, BF_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk)
|
||||
{
|
||||
struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = BF_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
@ -332,29 +330,30 @@ done:
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
|
||||
nbytes = __ctr_crypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = __ctr_crypt(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
if (nbytes) {
|
||||
ctr_crypt_final(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg bf_algs[4] = { {
|
||||
static struct crypto_alg bf_cipher_alg = {
|
||||
.cra_name = "blowfish",
|
||||
.cra_driver_name = "blowfish-asm",
|
||||
.cra_priority = 200,
|
||||
@ -372,66 +371,50 @@ static struct crypto_alg bf_algs[4] = { {
|
||||
.cia_decrypt = blowfish_decrypt,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(blowfish)",
|
||||
.cra_driver_name = "ecb-blowfish-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = BF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct bf_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = BF_MIN_KEY_SIZE,
|
||||
.max_keysize = BF_MAX_KEY_SIZE,
|
||||
.setkey = blowfish_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
};
|
||||
|
||||
static struct skcipher_alg bf_skcipher_algs[] = {
|
||||
{
|
||||
.base.cra_name = "ecb(blowfish)",
|
||||
.base.cra_driver_name = "ecb-blowfish-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = BF_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct bf_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = BF_MIN_KEY_SIZE,
|
||||
.max_keysize = BF_MAX_KEY_SIZE,
|
||||
.setkey = blowfish_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(blowfish)",
|
||||
.base.cra_driver_name = "cbc-blowfish-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = BF_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct bf_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = BF_MIN_KEY_SIZE,
|
||||
.max_keysize = BF_MAX_KEY_SIZE,
|
||||
.ivsize = BF_BLOCK_SIZE,
|
||||
.setkey = blowfish_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "ctr(blowfish)",
|
||||
.base.cra_driver_name = "ctr-blowfish-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct bf_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = BF_MIN_KEY_SIZE,
|
||||
.max_keysize = BF_MAX_KEY_SIZE,
|
||||
.ivsize = BF_BLOCK_SIZE,
|
||||
.chunksize = BF_BLOCK_SIZE,
|
||||
.setkey = blowfish_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(blowfish)",
|
||||
.cra_driver_name = "cbc-blowfish-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = BF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct bf_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = BF_MIN_KEY_SIZE,
|
||||
.max_keysize = BF_MAX_KEY_SIZE,
|
||||
.ivsize = BF_BLOCK_SIZE,
|
||||
.setkey = blowfish_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(blowfish)",
|
||||
.cra_driver_name = "ctr-blowfish-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct bf_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = BF_MIN_KEY_SIZE,
|
||||
.max_keysize = BF_MAX_KEY_SIZE,
|
||||
.ivsize = BF_BLOCK_SIZE,
|
||||
.setkey = blowfish_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
};
|
||||
|
||||
static bool is_blacklisted_cpu(void)
|
||||
{
|
||||
@ -456,6 +439,8 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!force && is_blacklisted_cpu()) {
|
||||
printk(KERN_INFO
|
||||
"blowfish-x86_64: performance on this CPU "
|
||||
@ -464,12 +449,23 @@ static int __init init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(bf_algs, ARRAY_SIZE(bf_algs));
|
||||
err = crypto_register_alg(&bf_cipher_alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = crypto_register_skciphers(bf_skcipher_algs,
|
||||
ARRAY_SIZE(bf_skcipher_algs));
|
||||
if (err)
|
||||
crypto_unregister_alg(&bf_cipher_alg);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_algs(bf_algs, ARRAY_SIZE(bf_algs));
|
||||
crypto_unregister_alg(&bf_cipher_alg);
|
||||
crypto_unregister_skciphers(bf_skcipher_algs,
|
||||
ARRAY_SIZE(bf_skcipher_algs));
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
|
@ -10,18 +10,15 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/crypto/camellia.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
|
||||
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
|
||||
@ -150,413 +147,120 @@ static const struct common_glue_ctx camellia_dec_xts = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
|
||||
return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
|
||||
&tfm->base.crt_flags);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&camellia_enc, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&camellia_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
|
||||
req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
|
||||
}
|
||||
|
||||
static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
|
||||
CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
|
||||
nbytes);
|
||||
return glue_ctr_req_128bit(&camellia_ctr, req);
|
||||
}
|
||||
|
||||
static inline void camellia_fpu_end(bool fpu_enabled)
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
glue_fpu_end(fpu_enabled);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&camellia_enc_xts, req,
|
||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
static int xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
|
||||
&tfm->crt_flags);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&camellia_dec_xts, req,
|
||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
struct crypt_priv {
|
||||
struct camellia_ctx *ctx;
|
||||
bool fpu_enabled;
|
||||
static struct skcipher_alg camellia_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(camellia)",
|
||||
.base.cra_driver_name = "__ecb-camellia-aesni-avx2",
|
||||
.base.cra_priority = 500,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(camellia)",
|
||||
.base.cra_driver_name = "__cbc-camellia-aesni-avx2",
|
||||
.base.cra_priority = 500,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(camellia)",
|
||||
.base.cra_driver_name = "__ctr-camellia-aesni-avx2",
|
||||
.base.cra_priority = 500,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.chunksize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}, {
|
||||
.base.cra_name = "__xts(camellia)",
|
||||
.base.cra_driver_name = "__xts-camellia-aesni-avx2",
|
||||
.base.cra_priority = 500,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = xts_camellia_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
};
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes >= CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_ecb_enc_32way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
camellia_enc_blk(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes >= CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_ecb_dec_32way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
camellia_dec_blk(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->camellia_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
camellia_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->camellia_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
camellia_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static struct crypto_alg cmll_algs[10] = { {
|
||||
.cra_name = "__ecb-camellia-aesni-avx2",
|
||||
.cra_driver_name = "__driver-ecb-camellia-aesni-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__cbc-camellia-aesni-avx2",
|
||||
.cra_driver_name = "__driver-cbc-camellia-aesni-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-camellia-aesni-avx2",
|
||||
.cra_driver_name = "__driver-ctr-camellia-aesni-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__lrw-camellia-aesni-avx2",
|
||||
.cra_driver_name = "__driver-lrw-camellia-aesni-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = lrw_camellia_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = lrw_camellia_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__xts-camellia-aesni-avx2",
|
||||
.cra_driver_name = "__driver-xts-camellia-aesni-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = xts_camellia_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ecb(camellia)",
|
||||
.cra_driver_name = "ecb-camellia-aesni-avx2",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(camellia)",
|
||||
.cra_driver_name = "cbc-camellia-aesni-avx2",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(camellia)",
|
||||
.cra_driver_name = "ctr-camellia-aesni-avx2",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_encrypt,
|
||||
.geniv = "chainiv",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(camellia)",
|
||||
.cra_driver_name = "lrw-camellia-aesni-avx2",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(camellia)",
|
||||
.cra_driver_name = "xts-camellia-aesni-avx2",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
|
||||
|
||||
static int __init camellia_aesni_init(void)
|
||||
{
|
||||
@ -576,12 +280,15 @@ static int __init camellia_aesni_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
|
||||
return simd_register_skciphers_compat(camellia_algs,
|
||||
ARRAY_SIZE(camellia_algs),
|
||||
camellia_simd_algs);
|
||||
}
|
||||
|
||||
static void __exit camellia_aesni_fini(void)
|
||||
{
|
||||
crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
|
||||
simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
|
||||
camellia_simd_algs);
|
||||
}
|
||||
|
||||
module_init(camellia_aesni_init);
|
||||
|
@ -10,18 +10,15 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/crypto/camellia.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
|
||||
|
||||
@ -154,401 +151,142 @@ static const struct common_glue_ctx camellia_dec_xts = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
|
||||
return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
|
||||
&tfm->base.crt_flags);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&camellia_enc, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&camellia_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
|
||||
req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
|
||||
}
|
||||
|
||||
static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
|
||||
CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
|
||||
nbytes);
|
||||
return glue_ctr_req_128bit(&camellia_ctr, req);
|
||||
}
|
||||
|
||||
static inline void camellia_fpu_end(bool fpu_enabled)
|
||||
int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
glue_fpu_end(fpu_enabled);
|
||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
u32 *flags = &tfm->base.crt_flags;
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* first half of xts-key is for crypt */
|
||||
err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* second half of xts-key is for tweak */
|
||||
return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
|
||||
flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xts_camellia_setkey);
|
||||
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&camellia_enc_xts, req,
|
||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
static int xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
|
||||
&tfm->crt_flags);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&camellia_dec_xts, req,
|
||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
struct crypt_priv {
|
||||
struct camellia_ctx *ctx;
|
||||
bool fpu_enabled;
|
||||
static struct skcipher_alg camellia_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(camellia)",
|
||||
.base.cra_driver_name = "__ecb-camellia-aesni",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(camellia)",
|
||||
.base.cra_driver_name = "__cbc-camellia-aesni",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(camellia)",
|
||||
.base.cra_driver_name = "__ctr-camellia-aesni",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.chunksize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}, {
|
||||
.base.cra_name = "__xts(camellia)",
|
||||
.base.cra_driver_name = "__xts-camellia-aesni",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = xts_camellia_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
};
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
camellia_enc_blk(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
|
||||
camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
camellia_dec_blk(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->camellia_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
camellia_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->camellia_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
camellia_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static struct crypto_alg cmll_algs[10] = { {
|
||||
.cra_name = "__ecb-camellia-aesni",
|
||||
.cra_driver_name = "__driver-ecb-camellia-aesni",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__cbc-camellia-aesni",
|
||||
.cra_driver_name = "__driver-cbc-camellia-aesni",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-camellia-aesni",
|
||||
.cra_driver_name = "__driver-ctr-camellia-aesni",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__lrw-camellia-aesni",
|
||||
.cra_driver_name = "__driver-lrw-camellia-aesni",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = lrw_camellia_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = lrw_camellia_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__xts-camellia-aesni",
|
||||
.cra_driver_name = "__driver-xts-camellia-aesni",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = xts_camellia_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ecb(camellia)",
|
||||
.cra_driver_name = "ecb-camellia-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(camellia)",
|
||||
.cra_driver_name = "cbc-camellia-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(camellia)",
|
||||
.cra_driver_name = "ctr-camellia-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_encrypt,
|
||||
.geniv = "chainiv",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(camellia)",
|
||||
.cra_driver_name = "lrw-camellia-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(camellia)",
|
||||
.cra_driver_name = "xts-camellia-aesni",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
|
||||
|
||||
static int __init camellia_aesni_init(void)
|
||||
{
|
||||
@ -567,12 +305,15 @@ static int __init camellia_aesni_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
|
||||
return simd_register_skciphers_compat(camellia_algs,
|
||||
ARRAY_SIZE(camellia_algs),
|
||||
camellia_simd_algs);
|
||||
}
|
||||
|
||||
static void __exit camellia_aesni_fini(void)
|
||||
{
|
||||
crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
|
||||
simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
|
||||
camellia_simd_algs);
|
||||
}
|
||||
|
||||
module_init(camellia_aesni_init);
|
||||
|
@ -23,15 +23,12 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/crypto/camellia.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
|
||||
@ -1272,13 +1269,19 @@ int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__camellia_setkey);
|
||||
|
||||
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
|
||||
return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len,
|
||||
&tfm->crt_flags);
|
||||
}
|
||||
|
||||
static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
return camellia_setkey(&tfm->base, key, key_len);
|
||||
}
|
||||
|
||||
void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
|
||||
{
|
||||
u128 iv = *src;
|
||||
@ -1373,188 +1376,33 @@ static const struct common_glue_ctx camellia_dec_cbc = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&camellia_enc, req);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&camellia_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
|
||||
req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
|
||||
return glue_ctr_req_128bit(&camellia_ctr, req);
|
||||
}
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
|
||||
struct camellia_ctx *ctx = priv;
|
||||
int i;
|
||||
|
||||
while (nbytes >= 2 * bsize) {
|
||||
camellia_enc_blk_2way(ctx, srcdst, srcdst);
|
||||
srcdst += bsize * 2;
|
||||
nbytes -= bsize * 2;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
camellia_enc_blk(ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
|
||||
struct camellia_ctx *ctx = priv;
|
||||
int i;
|
||||
|
||||
while (nbytes >= 2 * bsize) {
|
||||
camellia_dec_blk_2way(ctx, srcdst, srcdst);
|
||||
srcdst += bsize * 2;
|
||||
nbytes -= bsize * 2;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
camellia_dec_blk(ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = __camellia_setkey(&ctx->camellia_ctx, key,
|
||||
keylen - CAMELLIA_BLOCK_SIZE,
|
||||
&tfm->crt_flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return lrw_init_table(&ctx->lrw_table,
|
||||
key + keylen - CAMELLIA_BLOCK_SIZE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_camellia_setkey);
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[2 * 4];
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &ctx->camellia_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
|
||||
return lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[2 * 4];
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &ctx->camellia_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
|
||||
return lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
}
|
||||
|
||||
void lrw_camellia_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
lrw_free_table(&ctx->lrw_table);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_camellia_exit_tfm);
|
||||
|
||||
int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct camellia_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
int err;
|
||||
|
||||
err = xts_check_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* first half of xts-key is for crypt */
|
||||
err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* second half of xts-key is for tweak */
|
||||
return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
|
||||
flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xts_camellia_setkey);
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
le128 buf[2 * 4];
|
||||
struct xts_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.tweak_ctx = &ctx->tweak_ctx,
|
||||
.tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
.crypt_ctx = &ctx->crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
|
||||
return xts_crypt(desc, dst, src, nbytes, &req);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
le128 buf[2 * 4];
|
||||
struct xts_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.tweak_ctx = &ctx->tweak_ctx,
|
||||
.tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
|
||||
.crypt_ctx = &ctx->crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
|
||||
return xts_crypt(desc, dst, src, nbytes, &req);
|
||||
}
|
||||
|
||||
static struct crypto_alg camellia_algs[6] = { {
|
||||
static struct crypto_alg camellia_cipher_alg = {
|
||||
.cra_name = "camellia",
|
||||
.cra_driver_name = "camellia-asm",
|
||||
.cra_priority = 200,
|
||||
@ -1572,109 +1420,50 @@ static struct crypto_alg camellia_algs[6] = { {
|
||||
.cia_decrypt = camellia_decrypt
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(camellia)",
|
||||
.cra_driver_name = "ecb-camellia-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(camellia)",
|
||||
.cra_driver_name = "cbc-camellia-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(camellia)",
|
||||
.cra_driver_name = "ctr-camellia-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(camellia)",
|
||||
.cra_driver_name = "lrw-camellia-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = lrw_camellia_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE +
|
||||
CAMELLIA_BLOCK_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = lrw_camellia_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(camellia)",
|
||||
.cra_driver_name = "xts-camellia-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct camellia_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = xts_camellia_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
};
|
||||
|
||||
static struct skcipher_alg camellia_skcipher_algs[] = {
|
||||
{
|
||||
.base.cra_name = "ecb(camellia)",
|
||||
.base.cra_driver_name = "ecb-camellia-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.setkey = camellia_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(camellia)",
|
||||
.base.cra_driver_name = "cbc-camellia-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "ctr(camellia)",
|
||||
.base.cra_driver_name = "ctr-camellia-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct camellia_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAMELLIA_MIN_KEY_SIZE,
|
||||
.max_keysize = CAMELLIA_MAX_KEY_SIZE,
|
||||
.ivsize = CAMELLIA_BLOCK_SIZE,
|
||||
.chunksize = CAMELLIA_BLOCK_SIZE,
|
||||
.setkey = camellia_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}
|
||||
};
|
||||
|
||||
static bool is_blacklisted_cpu(void)
|
||||
{
|
||||
@ -1700,6 +1489,8 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!force && is_blacklisted_cpu()) {
|
||||
printk(KERN_INFO
|
||||
"camellia-x86_64: performance on this CPU "
|
||||
@ -1708,12 +1499,23 @@ static int __init init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(camellia_algs, ARRAY_SIZE(camellia_algs));
|
||||
err = crypto_register_alg(&camellia_cipher_alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = crypto_register_skciphers(camellia_skcipher_algs,
|
||||
ARRAY_SIZE(camellia_skcipher_algs));
|
||||
if (err)
|
||||
crypto_unregister_alg(&camellia_cipher_alg);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_algs(camellia_algs, ARRAY_SIZE(camellia_algs));
|
||||
crypto_unregister_alg(&camellia_cipher_alg);
|
||||
crypto_unregister_skciphers(camellia_skcipher_algs,
|
||||
ARRAY_SIZE(camellia_skcipher_algs));
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
|
@ -21,18 +21,14 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/cast5.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CAST5_PARALLEL_BLOCKS 16
|
||||
|
||||
@ -45,10 +41,17 @@ asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst,
|
||||
asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src,
|
||||
__be64 *iv);
|
||||
|
||||
static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes)
|
||||
static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return cast5_setkey(&tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static inline bool cast5_fpu_begin(bool fpu_enabled, struct skcipher_walk *walk,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS,
|
||||
NULL, fpu_enabled, nbytes);
|
||||
walk, fpu_enabled, nbytes);
|
||||
}
|
||||
|
||||
static inline void cast5_fpu_end(bool fpu_enabled)
|
||||
@ -56,29 +59,28 @@ static inline void cast5_fpu_end(bool fpu_enabled)
|
||||
return glue_fpu_end(fpu_enabled);
|
||||
}
|
||||
|
||||
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
|
||||
bool enc)
|
||||
static int ecb_crypt(struct skcipher_request *req, bool enc)
|
||||
{
|
||||
bool fpu_enabled = false;
|
||||
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
const unsigned int bsize = CAST5_BLOCK_SIZE;
|
||||
unsigned int nbytes;
|
||||
void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
|
||||
int err;
|
||||
|
||||
fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
err = blkcipher_walk_virt(desc, walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
u8 *wsrc = walk.src.virt.addr;
|
||||
u8 *wdst = walk.dst.virt.addr;
|
||||
|
||||
while ((nbytes = walk->nbytes)) {
|
||||
u8 *wsrc = walk->src.virt.addr;
|
||||
u8 *wdst = walk->dst.virt.addr;
|
||||
|
||||
fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
|
||||
fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
|
||||
|
||||
/* Process multi-block batch */
|
||||
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
|
||||
fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
|
||||
do {
|
||||
fn(ctx, wdst, wsrc);
|
||||
|
||||
@ -103,76 +105,58 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
done:
|
||||
err = blkcipher_walk_done(desc, walk, nbytes);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
cast5_fpu_end(fpu_enabled);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_crypt(desc, &walk, true);
|
||||
return ecb_crypt(req, true);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_crypt(desc, &walk, false);
|
||||
return ecb_crypt(req, false);
|
||||
}
|
||||
|
||||
static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
const unsigned int bsize = CAST5_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
u64 *dst = (u64 *)walk->dst.virt.addr;
|
||||
u64 *iv = (u64 *)walk->iv;
|
||||
|
||||
do {
|
||||
*dst = *src ^ *iv;
|
||||
__cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
|
||||
iv = dst;
|
||||
|
||||
src += 1;
|
||||
dst += 1;
|
||||
nbytes -= bsize;
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
*(u64 *)walk->iv = *iv;
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
nbytes = __cbc_encrypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
u64 *src = (u64 *)walk.src.virt.addr;
|
||||
u64 *dst = (u64 *)walk.dst.virt.addr;
|
||||
u64 *iv = (u64 *)walk.iv;
|
||||
|
||||
do {
|
||||
*dst = *src ^ *iv;
|
||||
__cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
|
||||
iv = dst;
|
||||
src++;
|
||||
dst++;
|
||||
nbytes -= bsize;
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
*(u64 *)walk.iv = *iv;
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static unsigned int __cbc_decrypt(struct cast5_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
const unsigned int bsize = CAST5_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
@ -224,31 +208,29 @@ done:
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
bool fpu_enabled = false;
|
||||
struct blkcipher_walk walk;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
|
||||
nbytes = __cbc_decrypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
|
||||
nbytes = __cbc_decrypt(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
cast5_fpu_end(fpu_enabled);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ctr_crypt_final(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static void ctr_crypt_final(struct skcipher_walk *walk, struct cast5_ctx *ctx)
|
||||
{
|
||||
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 keystream[CAST5_BLOCK_SIZE];
|
||||
u8 *src = walk->src.virt.addr;
|
||||
@ -261,10 +243,9 @@ static void ctr_crypt_final(struct blkcipher_desc *desc,
|
||||
crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static unsigned int __ctr_crypt(struct skcipher_walk *walk,
|
||||
struct cast5_ctx *ctx)
|
||||
{
|
||||
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
const unsigned int bsize = CAST5_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
@ -307,162 +288,80 @@ done:
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
bool fpu_enabled = false;
|
||||
struct blkcipher_walk walk;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
|
||||
fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
|
||||
nbytes = __ctr_crypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
|
||||
nbytes = __ctr_crypt(&walk, ctx);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
cast5_fpu_end(fpu_enabled);
|
||||
|
||||
if (walk.nbytes) {
|
||||
ctr_crypt_final(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
ctr_crypt_final(&walk, ctx);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct skcipher_alg cast5_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(cast5)",
|
||||
.base.cra_driver_name = "__ecb-cast5-avx",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAST5_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct cast5_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.setkey = cast5_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(cast5)",
|
||||
.base.cra_driver_name = "__cbc-cast5-avx",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAST5_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct cast5_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.ivsize = CAST5_BLOCK_SIZE,
|
||||
.setkey = cast5_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(cast5)",
|
||||
.base.cra_driver_name = "__ctr-cast5-avx",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct cast5_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.ivsize = CAST5_BLOCK_SIZE,
|
||||
.chunksize = CAST5_BLOCK_SIZE,
|
||||
.setkey = cast5_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}
|
||||
};
|
||||
|
||||
static struct crypto_alg cast5_algs[6] = { {
|
||||
.cra_name = "__ecb-cast5-avx",
|
||||
.cra_driver_name = "__driver-ecb-cast5-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAST5_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cast5_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.setkey = cast5_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__cbc-cast5-avx",
|
||||
.cra_driver_name = "__driver-cbc-cast5-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAST5_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cast5_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.setkey = cast5_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-cast5-avx",
|
||||
.cra_driver_name = "__driver-ctr-cast5-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct cast5_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.ivsize = CAST5_BLOCK_SIZE,
|
||||
.setkey = cast5_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ecb(cast5)",
|
||||
.cra_driver_name = "ecb-cast5-avx",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAST5_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(cast5)",
|
||||
.cra_driver_name = "cbc-cast5-avx",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAST5_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.ivsize = CAST5_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(cast5)",
|
||||
.cra_driver_name = "ctr-cast5-avx",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAST5_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST5_MAX_KEY_SIZE,
|
||||
.ivsize = CAST5_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_encrypt,
|
||||
.geniv = "chainiv",
|
||||
},
|
||||
},
|
||||
} };
|
||||
static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)];
|
||||
|
||||
static int __init cast5_init(void)
|
||||
{
|
||||
@ -474,12 +373,15 @@ static int __init cast5_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
|
||||
return simd_register_skciphers_compat(cast5_algs,
|
||||
ARRAY_SIZE(cast5_algs),
|
||||
cast5_simd_algs);
|
||||
}
|
||||
|
||||
static void __exit cast5_exit(void)
|
||||
{
|
||||
crypto_unregister_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
|
||||
simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
|
||||
cast5_simd_algs);
|
||||
}
|
||||
|
||||
module_init(cast5_init);
|
||||
|
@ -24,19 +24,13 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/cast6.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
|
||||
#define CAST6_PARALLEL_BLOCKS 8
|
||||
@ -56,6 +50,12 @@ asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
|
||||
asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
|
||||
const u8 *src, le128 *iv);
|
||||
|
||||
static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return cast6_setkey(&tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
||||
{
|
||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
|
||||
@ -157,164 +157,30 @@ static const struct common_glue_ctx cast6_dec_xts = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&cast6_enc, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&cast6_enc, req);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&cast6_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&cast6_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__cast6_encrypt), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt),
|
||||
req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&cast6_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&cast6_ctr, desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
static inline bool cast6_fpu_begin(bool fpu_enabled, unsigned int nbytes)
|
||||
{
|
||||
return glue_fpu_begin(CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS,
|
||||
NULL, fpu_enabled, nbytes);
|
||||
}
|
||||
|
||||
static inline void cast6_fpu_end(bool fpu_enabled)
|
||||
{
|
||||
glue_fpu_end(fpu_enabled);
|
||||
}
|
||||
|
||||
struct crypt_priv {
|
||||
struct cast6_ctx *ctx;
|
||||
bool fpu_enabled;
|
||||
};
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = CAST6_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
|
||||
cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
__cast6_encrypt(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = CAST6_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
|
||||
cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
__cast6_decrypt(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
struct cast6_lrw_ctx {
|
||||
struct lrw_table_ctx lrw_table;
|
||||
struct cast6_ctx cast6_ctx;
|
||||
};
|
||||
|
||||
static int lrw_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = __cast6_setkey(&ctx->cast6_ctx, key, keylen - CAST6_BLOCK_SIZE,
|
||||
&tfm->crt_flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return lrw_init_table(&ctx->lrw_table, key + keylen - CAST6_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[CAST6_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->cast6_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
cast6_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[CAST6_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->cast6_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
cast6_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void lrw_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
lrw_free_table(&ctx->lrw_table);
|
||||
return glue_ctr_req_128bit(&cast6_ctr, req);
|
||||
}
|
||||
|
||||
struct cast6_xts_ctx {
|
||||
@ -322,14 +188,14 @@ struct cast6_xts_ctx {
|
||||
struct cast6_ctx crypt_ctx;
|
||||
};
|
||||
|
||||
static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
u32 *flags = &tfm->base.crt_flags;
|
||||
int err;
|
||||
|
||||
err = xts_check_key(tfm, key, keylen);
|
||||
err = xts_verify_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -343,245 +209,87 @@ static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
flags);
|
||||
}
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&cast6_enc_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(__cast6_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
return glue_xts_req_128bit(&cast6_enc_xts, req,
|
||||
XTS_TWEAK_CAST(__cast6_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&cast6_dec_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(__cast6_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
return glue_xts_req_128bit(&cast6_dec_xts, req,
|
||||
XTS_TWEAK_CAST(__cast6_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static struct crypto_alg cast6_algs[10] = { {
|
||||
.cra_name = "__ecb-cast6-avx",
|
||||
.cra_driver_name = "__driver-ecb-cast6-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cast6_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.setkey = cast6_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
static struct skcipher_alg cast6_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(cast6)",
|
||||
.base.cra_driver_name = "__ecb-cast6-avx",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct cast6_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.setkey = cast6_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(cast6)",
|
||||
.base.cra_driver_name = "__cbc-cast6-avx",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct cast6_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = cast6_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(cast6)",
|
||||
.base.cra_driver_name = "__ctr-cast6-avx",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct cast6_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.chunksize = CAST6_BLOCK_SIZE,
|
||||
.setkey = cast6_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}, {
|
||||
.base.cra_name = "__xts(cast6)",
|
||||
.base.cra_driver_name = "__xts-cast6-avx",
|
||||
.base.cra_priority = 200,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct cast6_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * CAST6_MAX_KEY_SIZE,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = xts_cast6_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__cbc-cast6-avx",
|
||||
.cra_driver_name = "__driver-cbc-cast6-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cast6_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.setkey = cast6_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-cast6-avx",
|
||||
.cra_driver_name = "__driver-ctr-cast6-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct cast6_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = cast6_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__lrw-cast6-avx",
|
||||
.cra_driver_name = "__driver-lrw-cast6-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cast6_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = lrw_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE +
|
||||
CAST6_BLOCK_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE +
|
||||
CAST6_BLOCK_SIZE,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = lrw_cast6_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__xts-cast6-avx",
|
||||
.cra_driver_name = "__driver-xts-cast6-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct cast6_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE * 2,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = xts_cast6_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ecb(cast6)",
|
||||
.cra_driver_name = "ecb-cast6-avx",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(cast6)",
|
||||
.cra_driver_name = "cbc-cast6-avx",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(cast6)",
|
||||
.cra_driver_name = "ctr-cast6-avx",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_encrypt,
|
||||
.geniv = "chainiv",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(cast6)",
|
||||
.cra_driver_name = "lrw-cast6-avx",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE +
|
||||
CAST6_BLOCK_SIZE,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE +
|
||||
CAST6_BLOCK_SIZE,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(cast6)",
|
||||
.cra_driver_name = "xts-cast6-avx",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = CAST6_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = CAST6_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = CAST6_MAX_KEY_SIZE * 2,
|
||||
.ivsize = CAST6_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
};
|
||||
|
||||
static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)];
|
||||
|
||||
static int __init cast6_init(void)
|
||||
{
|
||||
@ -593,12 +301,15 @@ static int __init cast6_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
|
||||
return simd_register_skciphers_compat(cast6_algs,
|
||||
ARRAY_SIZE(cast6_algs),
|
||||
cast6_simd_algs);
|
||||
}
|
||||
|
||||
static void __exit cast6_exit(void)
|
||||
{
|
||||
crypto_unregister_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
|
||||
simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
|
||||
cast6_simd_algs);
|
||||
}
|
||||
|
||||
module_init(cast6_init);
|
||||
|
@ -20,13 +20,13 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/des.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
|
||||
struct des3_ede_x86_ctx {
|
||||
u32 enc_expkey[DES3_EDE_EXPKEY_WORDS];
|
||||
@ -83,18 +83,18 @@ static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
|
||||
}
|
||||
|
||||
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
|
||||
const u32 *expkey)
|
||||
static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
|
||||
{
|
||||
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
const unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = blkcipher_walk_virt(desc, walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk->nbytes)) {
|
||||
u8 *wsrc = walk->src.virt.addr;
|
||||
u8 *wdst = walk->dst.virt.addr;
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
u8 *wsrc = walk.src.virt.addr;
|
||||
u8 *wdst = walk.dst.virt.addr;
|
||||
|
||||
/* Process four block batch */
|
||||
if (nbytes >= bsize * 3) {
|
||||
@ -121,36 +121,31 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
done:
|
||||
err = blkcipher_walk_done(desc, walk, nbytes);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_crypt(desc, &walk, ctx->enc_expkey);
|
||||
return ecb_crypt(req, ctx->enc_expkey);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_crypt(desc, &walk, ctx->dec_expkey);
|
||||
return ecb_crypt(req, ctx->dec_expkey);
|
||||
}
|
||||
|
||||
static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
@ -171,27 +166,27 @@ static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
nbytes = __cbc_encrypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = __cbc_encrypt(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
@ -250,25 +245,26 @@ done:
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
nbytes = __cbc_decrypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = __cbc_decrypt(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
|
||||
struct blkcipher_walk *walk)
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 keystream[DES3_EDE_BLOCK_SIZE];
|
||||
@ -282,10 +278,9 @@ static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
|
||||
crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
static unsigned int __ctr_crypt(struct des3_ede_x86_ctx *ctx,
|
||||
struct skcipher_walk *walk)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
__be64 *src = (__be64 *)walk->src.virt.addr;
|
||||
@ -333,23 +328,24 @@ done:
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
|
||||
nbytes = __ctr_crypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = __ctr_crypt(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
if (nbytes) {
|
||||
ctr_crypt_final(ctx, &walk);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -381,7 +377,14 @@ static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg des3_ede_algs[4] = { {
|
||||
static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return des3_ede_x86_setkey(&tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static struct crypto_alg des3_ede_cipher = {
|
||||
.cra_name = "des3_ede",
|
||||
.cra_driver_name = "des3_ede-asm",
|
||||
.cra_priority = 200,
|
||||
@ -399,66 +402,50 @@ static struct crypto_alg des3_ede_algs[4] = { {
|
||||
.cia_decrypt = des3_ede_x86_decrypt,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "ecb-des3_ede-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = des3_ede_x86_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "cbc-des3_ede-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_x86_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(des3_ede)",
|
||||
.cra_driver_name = "ctr-des3_ede-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_x86_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
};
|
||||
|
||||
static struct skcipher_alg des3_ede_skciphers[] = {
|
||||
{
|
||||
.base.cra_name = "ecb(des3_ede)",
|
||||
.base.cra_driver_name = "ecb-des3_ede-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = des3_ede_x86_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(des3_ede)",
|
||||
.base.cra_driver_name = "cbc-des3_ede-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_x86_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "ctr(des3_ede)",
|
||||
.base.cra_driver_name = "ctr-des3_ede-asm",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.chunksize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_x86_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}
|
||||
};
|
||||
|
||||
static bool is_blacklisted_cpu(void)
|
||||
{
|
||||
@ -483,17 +470,30 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
|
||||
|
||||
static int __init des3_ede_x86_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!force && is_blacklisted_cpu()) {
|
||||
pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
|
||||
err = crypto_register_alg(&des3_ede_cipher);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = crypto_register_skciphers(des3_ede_skciphers,
|
||||
ARRAY_SIZE(des3_ede_skciphers));
|
||||
if (err)
|
||||
crypto_unregister_alg(&des3_ede_cipher);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit des3_ede_x86_fini(void)
|
||||
{
|
||||
crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
|
||||
crypto_unregister_alg(&des3_ede_cipher);
|
||||
crypto_unregister_skciphers(des3_ede_skciphers,
|
||||
ARRAY_SIZE(des3_ede_skciphers));
|
||||
}
|
||||
|
||||
module_init(des3_ede_x86_init);
|
||||
|
@ -29,313 +29,212 @@
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
|
||||
static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
|
||||
struct skcipher_request *req)
|
||||
{
|
||||
void *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
const unsigned int bsize = 128 / 8;
|
||||
unsigned int nbytes, i, func_bytes;
|
||||
struct skcipher_walk walk;
|
||||
bool fpu_enabled = false;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = blkcipher_walk_virt(desc, walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk->nbytes)) {
|
||||
u8 *wsrc = walk->src.virt.addr;
|
||||
u8 *wdst = walk->dst.virt.addr;
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
const u8 *src = walk.src.virt.addr;
|
||||
u8 *dst = walk.dst.virt.addr;
|
||||
unsigned int func_bytes;
|
||||
unsigned int i;
|
||||
|
||||
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
||||
desc, fpu_enabled, nbytes);
|
||||
|
||||
&walk, fpu_enabled, nbytes);
|
||||
for (i = 0; i < gctx->num_funcs; i++) {
|
||||
func_bytes = bsize * gctx->funcs[i].num_blocks;
|
||||
|
||||
if (nbytes < func_bytes)
|
||||
continue;
|
||||
|
||||
/* Process multi-block batch */
|
||||
if (nbytes >= func_bytes) {
|
||||
do {
|
||||
gctx->funcs[i].fn_u.ecb(ctx, wdst,
|
||||
wsrc);
|
||||
do {
|
||||
gctx->funcs[i].fn_u.ecb(ctx, dst, src);
|
||||
src += func_bytes;
|
||||
dst += func_bytes;
|
||||
nbytes -= func_bytes;
|
||||
} while (nbytes >= func_bytes);
|
||||
|
||||
wsrc += func_bytes;
|
||||
wdst += func_bytes;
|
||||
nbytes -= func_bytes;
|
||||
} while (nbytes >= func_bytes);
|
||||
|
||||
if (nbytes < bsize)
|
||||
goto done;
|
||||
}
|
||||
if (nbytes < bsize)
|
||||
break;
|
||||
}
|
||||
|
||||
done:
|
||||
err = blkcipher_walk_done(desc, walk, nbytes);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
glue_fpu_end(fpu_enabled);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
|
||||
|
||||
int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
|
||||
struct skcipher_request *req)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return __glue_ecb_crypt_128bit(gctx, desc, &walk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
|
||||
|
||||
static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
|
||||
struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
void *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
const unsigned int bsize = 128 / 8;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u128 *src = (u128 *)walk->src.virt.addr;
|
||||
u128 *dst = (u128 *)walk->dst.virt.addr;
|
||||
u128 *iv = (u128 *)walk->iv;
|
||||
|
||||
do {
|
||||
u128_xor(dst, src, iv);
|
||||
fn(ctx, (u8 *)dst, (u8 *)dst);
|
||||
iv = dst;
|
||||
|
||||
src += 1;
|
||||
dst += 1;
|
||||
nbytes -= bsize;
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
*(u128 *)walk->iv = *iv;
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct skcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
const u128 *src = (u128 *)walk.src.virt.addr;
|
||||
u128 *dst = (u128 *)walk.dst.virt.addr;
|
||||
u128 *iv = (u128 *)walk.iv;
|
||||
|
||||
do {
|
||||
u128_xor(dst, src, iv);
|
||||
fn(ctx, (u8 *)dst, (u8 *)dst);
|
||||
iv = dst;
|
||||
src++;
|
||||
dst++;
|
||||
nbytes -= bsize;
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
*(u128 *)walk.iv = *iv;
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
|
||||
EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
|
||||
|
||||
static unsigned int
|
||||
__glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
|
||||
struct skcipher_request *req)
|
||||
{
|
||||
void *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
const unsigned int bsize = 128 / 8;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u128 *src = (u128 *)walk->src.virt.addr;
|
||||
u128 *dst = (u128 *)walk->dst.virt.addr;
|
||||
u128 last_iv;
|
||||
unsigned int num_blocks, func_bytes;
|
||||
unsigned int i;
|
||||
struct skcipher_walk walk;
|
||||
bool fpu_enabled = false;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
/* Start of the last block. */
|
||||
src += nbytes / bsize - 1;
|
||||
dst += nbytes / bsize - 1;
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
last_iv = *src;
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
const u128 *src = walk.src.virt.addr;
|
||||
u128 *dst = walk.dst.virt.addr;
|
||||
unsigned int func_bytes, num_blocks;
|
||||
unsigned int i;
|
||||
u128 last_iv;
|
||||
|
||||
for (i = 0; i < gctx->num_funcs; i++) {
|
||||
num_blocks = gctx->funcs[i].num_blocks;
|
||||
func_bytes = bsize * num_blocks;
|
||||
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
||||
&walk, fpu_enabled, nbytes);
|
||||
/* Start of the last block. */
|
||||
src += nbytes / bsize - 1;
|
||||
dst += nbytes / bsize - 1;
|
||||
|
||||
/* Process multi-block batch */
|
||||
if (nbytes >= func_bytes) {
|
||||
last_iv = *src;
|
||||
|
||||
for (i = 0; i < gctx->num_funcs; i++) {
|
||||
num_blocks = gctx->funcs[i].num_blocks;
|
||||
func_bytes = bsize * num_blocks;
|
||||
|
||||
if (nbytes < func_bytes)
|
||||
continue;
|
||||
|
||||
/* Process multi-block batch */
|
||||
do {
|
||||
nbytes -= func_bytes - bsize;
|
||||
src -= num_blocks - 1;
|
||||
dst -= num_blocks - 1;
|
||||
|
||||
gctx->funcs[i].fn_u.cbc(ctx, dst, src);
|
||||
|
||||
nbytes -= bsize;
|
||||
nbytes -= func_bytes;
|
||||
if (nbytes < bsize)
|
||||
goto done;
|
||||
|
||||
u128_xor(dst, dst, src - 1);
|
||||
src -= 1;
|
||||
dst -= 1;
|
||||
u128_xor(dst, dst, --src);
|
||||
dst--;
|
||||
} while (nbytes >= func_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
u128_xor(dst, dst, (u128 *)walk->iv);
|
||||
*(u128 *)walk->iv = last_iv;
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = 128 / 8;
|
||||
bool fpu_enabled = false;
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
||||
desc, fpu_enabled, nbytes);
|
||||
nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
u128_xor(dst, dst, (u128 *)walk.iv);
|
||||
*(u128 *)walk.iv = last_iv;
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
glue_fpu_end(fpu_enabled);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
|
||||
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
|
||||
|
||||
static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
|
||||
struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
void *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
u8 *src = (u8 *)walk->src.virt.addr;
|
||||
u8 *dst = (u8 *)walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
le128 ctrblk;
|
||||
u128 tmp;
|
||||
|
||||
be128_to_le128(&ctrblk, (be128 *)walk->iv);
|
||||
|
||||
memcpy(&tmp, src, nbytes);
|
||||
fn_ctr(ctx, &tmp, &tmp, &ctrblk);
|
||||
memcpy(dst, &tmp, nbytes);
|
||||
|
||||
le128_to_be128((be128 *)walk->iv, &ctrblk);
|
||||
}
|
||||
|
||||
static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
const unsigned int bsize = 128 / 8;
|
||||
void *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u128 *src = (u128 *)walk->src.virt.addr;
|
||||
u128 *dst = (u128 *)walk->dst.virt.addr;
|
||||
le128 ctrblk;
|
||||
unsigned int num_blocks, func_bytes;
|
||||
unsigned int i;
|
||||
|
||||
be128_to_le128(&ctrblk, (be128 *)walk->iv);
|
||||
|
||||
/* Process multi-block batch */
|
||||
for (i = 0; i < gctx->num_funcs; i++) {
|
||||
num_blocks = gctx->funcs[i].num_blocks;
|
||||
func_bytes = bsize * num_blocks;
|
||||
|
||||
if (nbytes >= func_bytes) {
|
||||
do {
|
||||
gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
|
||||
|
||||
src += num_blocks;
|
||||
dst += num_blocks;
|
||||
nbytes -= func_bytes;
|
||||
} while (nbytes >= func_bytes);
|
||||
|
||||
if (nbytes < bsize)
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
le128_to_be128((be128 *)walk->iv, &ctrblk);
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
|
||||
struct skcipher_request *req)
|
||||
{
|
||||
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
const unsigned int bsize = 128 / 8;
|
||||
struct skcipher_walk walk;
|
||||
bool fpu_enabled = false;
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, bsize);
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while ((nbytes = walk.nbytes) >= bsize) {
|
||||
const u128 *src = walk.src.virt.addr;
|
||||
u128 *dst = walk.dst.virt.addr;
|
||||
unsigned int func_bytes, num_blocks;
|
||||
unsigned int i;
|
||||
le128 ctrblk;
|
||||
|
||||
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
||||
desc, fpu_enabled, nbytes);
|
||||
nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
&walk, fpu_enabled, nbytes);
|
||||
|
||||
glue_fpu_end(fpu_enabled);
|
||||
be128_to_le128(&ctrblk, (be128 *)walk.iv);
|
||||
|
||||
if (walk.nbytes) {
|
||||
glue_ctr_crypt_final_128bit(
|
||||
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
for (i = 0; i < gctx->num_funcs; i++) {
|
||||
num_blocks = gctx->funcs[i].num_blocks;
|
||||
func_bytes = bsize * num_blocks;
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
|
||||
if (nbytes < func_bytes)
|
||||
continue;
|
||||
|
||||
static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
void *ctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
const unsigned int bsize = 128 / 8;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u128 *src = (u128 *)walk->src.virt.addr;
|
||||
u128 *dst = (u128 *)walk->dst.virt.addr;
|
||||
unsigned int num_blocks, func_bytes;
|
||||
unsigned int i;
|
||||
|
||||
/* Process multi-block batch */
|
||||
for (i = 0; i < gctx->num_funcs; i++) {
|
||||
num_blocks = gctx->funcs[i].num_blocks;
|
||||
func_bytes = bsize * num_blocks;
|
||||
|
||||
if (nbytes >= func_bytes) {
|
||||
/* Process multi-block batch */
|
||||
do {
|
||||
gctx->funcs[i].fn_u.xts(ctx, dst, src,
|
||||
(le128 *)walk->iv);
|
||||
|
||||
gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
|
||||
src += num_blocks;
|
||||
dst += num_blocks;
|
||||
nbytes -= func_bytes;
|
||||
} while (nbytes >= func_bytes);
|
||||
|
||||
if (nbytes < bsize)
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
|
||||
le128_to_be128((be128 *)walk.iv, &ctrblk);
|
||||
err = skcipher_walk_done(&walk, nbytes);
|
||||
}
|
||||
|
||||
done:
|
||||
return nbytes;
|
||||
glue_fpu_end(fpu_enabled);
|
||||
|
||||
if (nbytes) {
|
||||
le128 ctrblk;
|
||||
u128 tmp;
|
||||
|
||||
be128_to_le128(&ctrblk, (be128 *)walk.iv);
|
||||
memcpy(&tmp, walk.src.virt.addr, nbytes);
|
||||
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
|
||||
&ctrblk);
|
||||
memcpy(walk.dst.virt.addr, &tmp, nbytes);
|
||||
le128_to_be128((be128 *)walk.iv, &ctrblk);
|
||||
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
|
||||
|
||||
static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
||||
void *ctx,
|
||||
@ -372,46 +271,6 @@ done:
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
/* for implementations implementing faster XTS IV generator */
|
||||
int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes,
|
||||
void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
|
||||
void *tweak_ctx, void *crypt_ctx)
|
||||
{
|
||||
const unsigned int bsize = 128 / 8;
|
||||
bool fpu_enabled = false;
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
nbytes = walk.nbytes;
|
||||
if (!nbytes)
|
||||
return err;
|
||||
|
||||
/* set minimum length to bsize, for tweak_fn */
|
||||
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
||||
desc, fpu_enabled,
|
||||
nbytes < bsize ? bsize : nbytes);
|
||||
|
||||
/* calculate first value of T */
|
||||
tweak_fn(tweak_ctx, walk.iv, walk.iv);
|
||||
|
||||
while (nbytes) {
|
||||
nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = walk.nbytes;
|
||||
}
|
||||
|
||||
glue_fpu_end(fpu_enabled);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
|
||||
|
||||
int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
||||
struct skcipher_request *req,
|
||||
common_glue_func_t tweak_fn, void *tweak_ctx,
|
||||
@ -429,9 +288,9 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
||||
return err;
|
||||
|
||||
/* set minimum length to bsize, for tweak_fn */
|
||||
fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
||||
&walk, fpu_enabled,
|
||||
nbytes < bsize ? bsize : nbytes);
|
||||
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
||||
&walk, fpu_enabled,
|
||||
nbytes < bsize ? bsize : nbytes);
|
||||
|
||||
/* calculate first value of T */
|
||||
tweak_fn(tweak_ctx, walk.iv, walk.iv);
|
||||
|
@ -14,15 +14,12 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/serpent.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/crypto/serpent-avx.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <asm/crypto/serpent-avx.h>
|
||||
|
||||
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
|
||||
|
||||
@ -40,6 +37,12 @@ asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
|
||||
asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
|
||||
const u8 *src, le128 *iv);
|
||||
|
||||
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
|
||||
}
|
||||
|
||||
static const struct common_glue_ctx serpent_enc = {
|
||||
.num_funcs = 3,
|
||||
.fpu_blocks_limit = 8,
|
||||
@ -136,403 +139,113 @@ static const struct common_glue_ctx serpent_dec_xts = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&serpent_enc, req);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&serpent_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
|
||||
req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
|
||||
return glue_ctr_req_128bit(&serpent_ctr, req);
|
||||
}
|
||||
|
||||
static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
/* since reusing AVX functions, starts using FPU at 8 parallel blocks */
|
||||
return glue_fpu_begin(SERPENT_BLOCK_SIZE, 8, NULL, fpu_enabled, nbytes);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&serpent_enc_xts, req,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static inline void serpent_fpu_end(bool fpu_enabled)
|
||||
static int xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
glue_fpu_end(fpu_enabled);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&serpent_dec_xts, req,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
struct crypt_priv {
|
||||
struct serpent_ctx *ctx;
|
||||
bool fpu_enabled;
|
||||
static struct skcipher_alg serpent_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(serpent)",
|
||||
.base.cra_driver_name = "__ecb-serpent-avx2",
|
||||
.base.cra_priority = 600,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(serpent)",
|
||||
.base.cra_driver_name = "__cbc-serpent-avx2",
|
||||
.base.cra_priority = 600,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(serpent)",
|
||||
.base.cra_driver_name = "__ctr-serpent-avx2",
|
||||
.base.cra_priority = 600,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.chunksize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}, {
|
||||
.base.cra_name = "__xts(serpent)",
|
||||
.base.cra_driver_name = "__xts-serpent-avx2",
|
||||
.base.cra_priority = 600,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = xts_serpent_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
};
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = SERPENT_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes >= SERPENT_AVX2_PARALLEL_BLOCKS * bsize) {
|
||||
serpent_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
|
||||
serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
__serpent_encrypt(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = SERPENT_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes >= SERPENT_AVX2_PARALLEL_BLOCKS * bsize) {
|
||||
serpent_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
|
||||
serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
|
||||
srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
|
||||
nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
__serpent_decrypt(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->serpent_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->serpent_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static struct crypto_alg srp_algs[10] = { {
|
||||
.cra_name = "__ecb-serpent-avx2",
|
||||
.cra_driver_name = "__driver-ecb-serpent-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[0].cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__cbc-serpent-avx2",
|
||||
.cra_driver_name = "__driver-cbc-serpent-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[1].cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-serpent-avx2",
|
||||
.cra_driver_name = "__driver-ctr-serpent-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[2].cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__lrw-serpent-avx2",
|
||||
.cra_driver_name = "__driver-lrw-serpent-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[3].cra_list),
|
||||
.cra_exit = lrw_serpent_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = lrw_serpent_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__xts-serpent-avx2",
|
||||
.cra_driver_name = "__driver-xts-serpent-avx2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[4].cra_list),
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE * 2,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = xts_serpent_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ecb(serpent)",
|
||||
.cra_driver_name = "ecb-serpent-avx2",
|
||||
.cra_priority = 600,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[5].cra_list),
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(serpent)",
|
||||
.cra_driver_name = "cbc-serpent-avx2",
|
||||
.cra_priority = 600,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[6].cra_list),
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(serpent)",
|
||||
.cra_driver_name = "ctr-serpent-avx2",
|
||||
.cra_priority = 600,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[7].cra_list),
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_encrypt,
|
||||
.geniv = "chainiv",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(serpent)",
|
||||
.cra_driver_name = "lrw-serpent-avx2",
|
||||
.cra_priority = 600,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[8].cra_list),
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(serpent)",
|
||||
.cra_driver_name = "xts-serpent-avx2",
|
||||
.cra_priority = 600,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_list = LIST_HEAD_INIT(srp_algs[9].cra_list),
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE * 2,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
@ -548,12 +261,15 @@ static int __init init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(srp_algs, ARRAY_SIZE(srp_algs));
|
||||
return simd_register_skciphers_compat(serpent_algs,
|
||||
ARRAY_SIZE(serpent_algs),
|
||||
serpent_simd_algs);
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
|
||||
simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
|
||||
serpent_simd_algs);
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
|
@ -24,21 +24,15 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/serpent.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/crypto/serpent-avx.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <asm/crypto/serpent-avx.h>
|
||||
|
||||
/* 8-way parallel cipher functions */
|
||||
asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
|
||||
@ -91,6 +85,31 @@ void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(serpent_xts_dec);
|
||||
|
||||
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
|
||||
}
|
||||
|
||||
int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* first half of xts-key is for crypt */
|
||||
err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* second half of xts-key is for tweak */
|
||||
return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xts_serpent_setkey);
|
||||
|
||||
static const struct common_glue_ctx serpent_enc = {
|
||||
.num_funcs = 2,
|
||||
@ -170,423 +189,113 @@ static const struct common_glue_ctx serpent_dec_xts = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&serpent_enc, req);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&serpent_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
|
||||
req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
|
||||
return glue_ctr_req_128bit(&serpent_ctr, req);
|
||||
}
|
||||
|
||||
static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
|
||||
NULL, fpu_enabled, nbytes);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&serpent_enc_xts, req,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static inline void serpent_fpu_end(bool fpu_enabled)
|
||||
static int xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
glue_fpu_end(fpu_enabled);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&serpent_dec_xts, req,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
struct crypt_priv {
|
||||
struct serpent_ctx *ctx;
|
||||
bool fpu_enabled;
|
||||
static struct skcipher_alg serpent_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(serpent)",
|
||||
.base.cra_driver_name = "__ecb-serpent-avx",
|
||||
.base.cra_priority = 500,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(serpent)",
|
||||
.base.cra_driver_name = "__cbc-serpent-avx",
|
||||
.base.cra_priority = 500,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(serpent)",
|
||||
.base.cra_driver_name = "__ctr-serpent-avx",
|
||||
.base.cra_priority = 500,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.chunksize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}, {
|
||||
.base.cra_name = "__xts(serpent)",
|
||||
.base.cra_driver_name = "__xts-serpent-avx",
|
||||
.base.cra_priority = 500,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = xts_serpent_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
};
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = SERPENT_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
|
||||
serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
__serpent_encrypt(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = SERPENT_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
|
||||
serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
__serpent_decrypt(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
|
||||
SERPENT_BLOCK_SIZE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return lrw_init_table(&ctx->lrw_table, key + keylen -
|
||||
SERPENT_BLOCK_SIZE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_serpent_setkey);
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->serpent_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->serpent_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void lrw_serpent_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
lrw_free_table(&ctx->lrw_table);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_serpent_exit_tfm);
|
||||
|
||||
int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_check_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* first half of xts-key is for crypt */
|
||||
err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* second half of xts-key is for tweak */
|
||||
return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xts_serpent_setkey);
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static struct crypto_alg serpent_algs[10] = { {
|
||||
.cra_name = "__ecb-serpent-avx",
|
||||
.cra_driver_name = "__driver-ecb-serpent-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__cbc-serpent-avx",
|
||||
.cra_driver_name = "__driver-cbc-serpent-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-serpent-avx",
|
||||
.cra_driver_name = "__driver-ctr-serpent-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__lrw-serpent-avx",
|
||||
.cra_driver_name = "__driver-lrw-serpent-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = lrw_serpent_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = lrw_serpent_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__xts-serpent-avx",
|
||||
.cra_driver_name = "__driver-xts-serpent-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE * 2,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = xts_serpent_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ecb(serpent)",
|
||||
.cra_driver_name = "ecb-serpent-avx",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(serpent)",
|
||||
.cra_driver_name = "cbc-serpent-avx",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(serpent)",
|
||||
.cra_driver_name = "ctr-serpent-avx",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_encrypt,
|
||||
.geniv = "chainiv",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(serpent)",
|
||||
.cra_driver_name = "lrw-serpent-avx",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(serpent)",
|
||||
.cra_driver_name = "xts-serpent-avx",
|
||||
.cra_priority = 500,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE * 2,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
|
||||
|
||||
static int __init serpent_init(void)
|
||||
{
|
||||
@ -598,12 +307,15 @@ static int __init serpent_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
|
||||
return simd_register_skciphers_compat(serpent_algs,
|
||||
ARRAY_SIZE(serpent_algs),
|
||||
serpent_simd_algs);
|
||||
}
|
||||
|
||||
static void __exit serpent_exit(void)
|
||||
{
|
||||
crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
|
||||
simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
|
||||
serpent_simd_algs);
|
||||
}
|
||||
|
||||
module_init(serpent_init);
|
||||
|
@ -30,21 +30,22 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/serpent.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/serpent.h>
|
||||
#include <asm/crypto/serpent-sse2.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
|
||||
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
|
||||
}
|
||||
|
||||
static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
|
||||
{
|
||||
u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
|
||||
@ -139,464 +140,79 @@ static const struct common_glue_ctx serpent_dec_cbc = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&serpent_enc, req);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&serpent_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
|
||||
req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
|
||||
return glue_ctr_req_128bit(&serpent_ctr, req);
|
||||
}
|
||||
|
||||
static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
|
||||
{
|
||||
return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
|
||||
NULL, fpu_enabled, nbytes);
|
||||
}
|
||||
|
||||
static inline void serpent_fpu_end(bool fpu_enabled)
|
||||
{
|
||||
glue_fpu_end(fpu_enabled);
|
||||
}
|
||||
|
||||
struct crypt_priv {
|
||||
struct serpent_ctx *ctx;
|
||||
bool fpu_enabled;
|
||||
static struct skcipher_alg serpent_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(serpent)",
|
||||
.base.cra_driver_name = "__ecb-serpent-sse2",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(serpent)",
|
||||
.base.cra_driver_name = "__cbc-serpent-sse2",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(serpent)",
|
||||
.base.cra_driver_name = "__ctr-serpent-sse2",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.chunksize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
};
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = SERPENT_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
|
||||
serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
__serpent_encrypt(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = SERPENT_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
|
||||
serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
__serpent_decrypt(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
struct serpent_lrw_ctx {
|
||||
struct lrw_table_ctx lrw_table;
|
||||
struct serpent_ctx serpent_ctx;
|
||||
};
|
||||
|
||||
static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
|
||||
SERPENT_BLOCK_SIZE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return lrw_init_table(&ctx->lrw_table, key + keylen -
|
||||
SERPENT_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->serpent_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->serpent_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void lrw_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
lrw_free_table(&ctx->lrw_table);
|
||||
}
|
||||
|
||||
struct serpent_xts_ctx {
|
||||
struct serpent_ctx tweak_ctx;
|
||||
struct serpent_ctx crypt_ctx;
|
||||
};
|
||||
|
||||
static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = xts_check_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* first half of xts-key is for crypt */
|
||||
err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* second half of xts-key is for tweak */
|
||||
return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
|
||||
}
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
le128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->crypt_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct xts_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.tweak_ctx = &ctx->tweak_ctx,
|
||||
.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = xts_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
le128 buf[SERPENT_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->crypt_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct xts_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.tweak_ctx = &ctx->tweak_ctx,
|
||||
.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = xts_crypt(desc, dst, src, nbytes, &req);
|
||||
serpent_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct crypto_alg serpent_algs[10] = { {
|
||||
.cra_name = "__ecb-serpent-sse2",
|
||||
.cra_driver_name = "__driver-ecb-serpent-sse2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__cbc-serpent-sse2",
|
||||
.cra_driver_name = "__driver-cbc-serpent-sse2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-serpent-sse2",
|
||||
.cra_driver_name = "__driver-ctr-serpent-sse2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct serpent_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = serpent_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__lrw-serpent-sse2",
|
||||
.cra_driver_name = "__driver-lrw-serpent-sse2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = lrw_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = lrw_serpent_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__xts-serpent-sse2",
|
||||
.cra_driver_name = "__driver-xts-serpent-sse2",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct serpent_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE * 2,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = xts_serpent_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ecb(serpent)",
|
||||
.cra_driver_name = "ecb-serpent-sse2",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(serpent)",
|
||||
.cra_driver_name = "cbc-serpent-sse2",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(serpent)",
|
||||
.cra_driver_name = "ctr-serpent-sse2",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_encrypt,
|
||||
.geniv = "chainiv",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(serpent)",
|
||||
.cra_driver_name = "lrw-serpent-sse2",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE +
|
||||
SERPENT_BLOCK_SIZE,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(serpent)",
|
||||
.cra_driver_name = "xts-serpent-sse2",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = SERPENT_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = SERPENT_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = SERPENT_MAX_KEY_SIZE * 2,
|
||||
.ivsize = SERPENT_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
|
||||
|
||||
static int __init serpent_sse2_init(void)
|
||||
{
|
||||
@ -605,12 +221,15 @@ static int __init serpent_sse2_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
|
||||
return simd_register_skciphers_compat(serpent_algs,
|
||||
ARRAY_SIZE(serpent_algs),
|
||||
serpent_simd_algs);
|
||||
}
|
||||
|
||||
static void __exit serpent_sse2_exit(void)
|
||||
{
|
||||
crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
|
||||
simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
|
||||
serpent_simd_algs);
|
||||
}
|
||||
|
||||
module_init(serpent_sse2_init);
|
||||
|
@ -106,13 +106,6 @@ static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
|
||||
static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
|
||||
(struct sha1_mb_mgr *state);
|
||||
|
||||
static inline void sha1_init_digest(uint32_t *digest)
|
||||
{
|
||||
static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
|
||||
SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
|
||||
memcpy(digest, initial_digest, sizeof(initial_digest));
|
||||
}
|
||||
|
||||
static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
|
||||
uint64_t total_len)
|
||||
{
|
||||
@ -244,11 +237,8 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
|
||||
uint32_t len,
|
||||
int flags)
|
||||
{
|
||||
if (flags & (~HASH_ENTIRE)) {
|
||||
/*
|
||||
* User should not pass anything other than FIRST, UPDATE, or
|
||||
* LAST
|
||||
*/
|
||||
if (flags & ~(HASH_UPDATE | HASH_LAST)) {
|
||||
/* User should not pass anything other than UPDATE or LAST */
|
||||
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
|
||||
return ctx;
|
||||
}
|
||||
@ -259,24 +249,12 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
|
||||
return ctx;
|
||||
}
|
||||
|
||||
if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
|
||||
if (ctx->status & HASH_CTX_STS_COMPLETE) {
|
||||
/* Cannot update a finished job. */
|
||||
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
|
||||
return ctx;
|
||||
}
|
||||
|
||||
|
||||
if (flags & HASH_FIRST) {
|
||||
/* Init digest */
|
||||
sha1_init_digest(ctx->job.result_digest);
|
||||
|
||||
/* Reset byte counter */
|
||||
ctx->total_length = 0;
|
||||
|
||||
/* Clear extra blocks */
|
||||
ctx->partial_block_buffer_length = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we made it here, there were no errors during this call to
|
||||
* submit
|
||||
|
@ -57,11 +57,9 @@
|
||||
#include "sha1_mb_mgr.h"
|
||||
|
||||
#define HASH_UPDATE 0x00
|
||||
#define HASH_FIRST 0x01
|
||||
#define HASH_LAST 0x02
|
||||
#define HASH_ENTIRE 0x03
|
||||
#define HASH_DONE 0x04
|
||||
#define HASH_FINAL 0x08
|
||||
#define HASH_LAST 0x01
|
||||
#define HASH_DONE 0x02
|
||||
#define HASH_FINAL 0x04
|
||||
|
||||
#define HASH_CTX_STS_IDLE 0x00
|
||||
#define HASH_CTX_STS_PROCESSING 0x01
|
||||
|
@ -106,14 +106,6 @@ static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
|
||||
static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
|
||||
(struct sha256_mb_mgr *state);
|
||||
|
||||
inline void sha256_init_digest(uint32_t *digest)
|
||||
{
|
||||
static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = {
|
||||
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
|
||||
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7};
|
||||
memcpy(digest, initial_digest, sizeof(initial_digest));
|
||||
}
|
||||
|
||||
inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
|
||||
uint64_t total_len)
|
||||
{
|
||||
@ -245,10 +237,8 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
|
||||
uint32_t len,
|
||||
int flags)
|
||||
{
|
||||
if (flags & (~HASH_ENTIRE)) {
|
||||
/* User should not pass anything other than FIRST, UPDATE
|
||||
* or LAST
|
||||
*/
|
||||
if (flags & ~(HASH_UPDATE | HASH_LAST)) {
|
||||
/* User should not pass anything other than UPDATE or LAST */
|
||||
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
|
||||
return ctx;
|
||||
}
|
||||
@ -259,23 +249,12 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
|
||||
return ctx;
|
||||
}
|
||||
|
||||
if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
|
||||
if (ctx->status & HASH_CTX_STS_COMPLETE) {
|
||||
/* Cannot update a finished job. */
|
||||
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
|
||||
return ctx;
|
||||
}
|
||||
|
||||
if (flags & HASH_FIRST) {
|
||||
/* Init digest */
|
||||
sha256_init_digest(ctx->job.result_digest);
|
||||
|
||||
/* Reset byte counter */
|
||||
ctx->total_length = 0;
|
||||
|
||||
/* Clear extra blocks */
|
||||
ctx->partial_block_buffer_length = 0;
|
||||
}
|
||||
|
||||
/* If we made it here, there was no error during this call to submit */
|
||||
ctx->error = HASH_CTX_ERROR_NONE;
|
||||
|
||||
|
@ -57,11 +57,9 @@
|
||||
#include "sha256_mb_mgr.h"
|
||||
|
||||
#define HASH_UPDATE 0x00
|
||||
#define HASH_FIRST 0x01
|
||||
#define HASH_LAST 0x02
|
||||
#define HASH_ENTIRE 0x03
|
||||
#define HASH_DONE 0x04
|
||||
#define HASH_FINAL 0x08
|
||||
#define HASH_LAST 0x01
|
||||
#define HASH_DONE 0x02
|
||||
#define HASH_FINAL 0x04
|
||||
|
||||
#define HASH_CTX_STS_IDLE 0x00
|
||||
#define HASH_CTX_STS_PROCESSING 0x01
|
||||
|
@ -107,15 +107,6 @@ static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
|
||||
static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
|
||||
(struct sha512_mb_mgr *state);
|
||||
|
||||
inline void sha512_init_digest(uint64_t *digest)
|
||||
{
|
||||
static const uint64_t initial_digest[SHA512_DIGEST_LENGTH] = {
|
||||
SHA512_H0, SHA512_H1, SHA512_H2,
|
||||
SHA512_H3, SHA512_H4, SHA512_H5,
|
||||
SHA512_H6, SHA512_H7 };
|
||||
memcpy(digest, initial_digest, sizeof(initial_digest));
|
||||
}
|
||||
|
||||
inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
|
||||
uint64_t total_len)
|
||||
{
|
||||
@ -263,11 +254,8 @@ static struct sha512_hash_ctx
|
||||
|
||||
mgr = cstate->mgr;
|
||||
spin_lock_irqsave(&cstate->work_lock, irqflags);
|
||||
if (flags & (~HASH_ENTIRE)) {
|
||||
/*
|
||||
* User should not pass anything other than FIRST, UPDATE, or
|
||||
* LAST
|
||||
*/
|
||||
if (flags & ~(HASH_UPDATE | HASH_LAST)) {
|
||||
/* User should not pass anything other than UPDATE or LAST */
|
||||
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
|
||||
goto unlock;
|
||||
}
|
||||
@ -278,24 +266,12 @@ static struct sha512_hash_ctx
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
|
||||
if (ctx->status & HASH_CTX_STS_COMPLETE) {
|
||||
/* Cannot update a finished job. */
|
||||
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
||||
if (flags & HASH_FIRST) {
|
||||
/* Init digest */
|
||||
sha512_init_digest(ctx->job.result_digest);
|
||||
|
||||
/* Reset byte counter */
|
||||
ctx->total_length = 0;
|
||||
|
||||
/* Clear extra blocks */
|
||||
ctx->partial_block_buffer_length = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we made it here, there were no errors during this call to
|
||||
* submit
|
||||
|
@ -57,11 +57,9 @@
|
||||
#include "sha512_mb_mgr.h"
|
||||
|
||||
#define HASH_UPDATE 0x00
|
||||
#define HASH_FIRST 0x01
|
||||
#define HASH_LAST 0x02
|
||||
#define HASH_ENTIRE 0x03
|
||||
#define HASH_DONE 0x04
|
||||
#define HASH_FINAL 0x08
|
||||
#define HASH_LAST 0x01
|
||||
#define HASH_DONE 0x02
|
||||
#define HASH_FINAL 0x04
|
||||
|
||||
#define HASH_CTX_STS_IDLE 0x00
|
||||
#define HASH_CTX_STS_PROCESSING 0x01
|
||||
|
@ -24,24 +24,15 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/err.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#include <crypto/twofish.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/ctr.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/crypto/twofish.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/crypto/twofish.h>
|
||||
|
||||
#define TWOFISH_PARALLEL_BLOCKS 8
|
||||
|
||||
@ -61,6 +52,12 @@ asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
|
||||
asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
|
||||
const u8 *src, le128 *iv);
|
||||
|
||||
static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return twofish_setkey(&tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
|
||||
const u8 *src)
|
||||
{
|
||||
@ -79,6 +76,31 @@ static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
|
||||
GLUE_FUNC_CAST(twofish_dec_blk));
|
||||
}
|
||||
|
||||
struct twofish_xts_ctx {
|
||||
struct twofish_ctx tweak_ctx;
|
||||
struct twofish_ctx crypt_ctx;
|
||||
};
|
||||
|
||||
static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
u32 *flags = &tfm->base.crt_flags;
|
||||
int err;
|
||||
|
||||
err = xts_verify_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* first half of xts-key is for crypt */
|
||||
err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* second half of xts-key is for tweak */
|
||||
return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
|
||||
flags);
|
||||
}
|
||||
|
||||
static const struct common_glue_ctx twofish_enc = {
|
||||
.num_funcs = 3,
|
||||
@ -170,389 +192,113 @@ static const struct common_glue_ctx twofish_dec_xts = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&twofish_enc, req);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&twofish_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
|
||||
req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
|
||||
return glue_ctr_req_128bit(&twofish_ctr, req);
|
||||
}
|
||||
|
||||
static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes)
|
||||
static int xts_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_fpu_begin(TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS, NULL,
|
||||
fpu_enabled, nbytes);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&twofish_enc_xts, req,
|
||||
XTS_TWEAK_CAST(twofish_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static inline void twofish_fpu_end(bool fpu_enabled)
|
||||
static int xts_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
glue_fpu_end(fpu_enabled);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
return glue_xts_req_128bit(&twofish_dec_xts, req,
|
||||
XTS_TWEAK_CAST(twofish_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
struct crypt_priv {
|
||||
struct twofish_ctx *ctx;
|
||||
bool fpu_enabled;
|
||||
static struct skcipher_alg twofish_algs[] = {
|
||||
{
|
||||
.base.cra_name = "__ecb(twofish)",
|
||||
.base.cra_driver_name = "__ecb-twofish-avx",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.setkey = twofish_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__cbc(twofish)",
|
||||
.base.cra_driver_name = "__cbc-twofish-avx",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = twofish_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "__ctr(twofish)",
|
||||
.base.cra_driver_name = "__ctr-twofish-avx",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.chunksize = TF_BLOCK_SIZE,
|
||||
.setkey = twofish_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
}, {
|
||||
.base.cra_name = "__xts(twofish)",
|
||||
.base.cra_driver_name = "__xts-twofish-avx",
|
||||
.base.cra_priority = 400,
|
||||
.base.cra_flags = CRYPTO_ALG_INTERNAL,
|
||||
.base.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct twofish_xts_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = 2 * TF_MIN_KEY_SIZE,
|
||||
.max_keysize = 2 * TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = xts_twofish_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
};
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = TF_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
|
||||
twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
|
||||
twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
|
||||
|
||||
nbytes %= bsize * 3;
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
twofish_enc_blk(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = TF_BLOCK_SIZE;
|
||||
struct crypt_priv *ctx = priv;
|
||||
int i;
|
||||
|
||||
ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
|
||||
|
||||
if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
|
||||
twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
|
||||
twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
|
||||
|
||||
nbytes %= bsize * 3;
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
twofish_dec_blk(ctx->ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[TWOFISH_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->twofish_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
twofish_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[TWOFISH_PARALLEL_BLOCKS];
|
||||
struct crypt_priv crypt_ctx = {
|
||||
.ctx = &ctx->twofish_ctx,
|
||||
.fpu_enabled = false,
|
||||
};
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
int ret;
|
||||
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
ret = lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
twofish_fpu_end(crypt_ctx.fpu_enabled);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&twofish_enc_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(twofish_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
|
||||
return glue_xts_crypt_128bit(&twofish_dec_xts, desc, dst, src, nbytes,
|
||||
XTS_TWEAK_CAST(twofish_enc_blk),
|
||||
&ctx->tweak_ctx, &ctx->crypt_ctx);
|
||||
}
|
||||
|
||||
static struct crypto_alg twofish_algs[10] = { {
|
||||
.cra_name = "__ecb-twofish-avx",
|
||||
.cra_driver_name = "__driver-ecb-twofish-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.setkey = twofish_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__cbc-twofish-avx",
|
||||
.cra_driver_name = "__driver-cbc-twofish-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.setkey = twofish_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__ctr-twofish-avx",
|
||||
.cra_driver_name = "__driver-ctr-twofish-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = twofish_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__lrw-twofish-avx",
|
||||
.cra_driver_name = "__driver-lrw-twofish-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = lrw_twofish_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE +
|
||||
TF_BLOCK_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE +
|
||||
TF_BLOCK_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = lrw_twofish_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "__xts-twofish-avx",
|
||||
.cra_driver_name = "__driver-xts-twofish-avx",
|
||||
.cra_priority = 0,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
|
||||
CRYPTO_ALG_INTERNAL,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = TF_MAX_KEY_SIZE * 2,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = xts_twofish_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ecb(twofish)",
|
||||
.cra_driver_name = "ecb-twofish-avx",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(twofish)",
|
||||
.cra_driver_name = "cbc-twofish-avx",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = __ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(twofish)",
|
||||
.cra_driver_name = "ctr-twofish-avx",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_encrypt,
|
||||
.geniv = "chainiv",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(twofish)",
|
||||
.cra_driver_name = "lrw-twofish-avx",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE +
|
||||
TF_BLOCK_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE +
|
||||
TF_BLOCK_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(twofish)",
|
||||
.cra_driver_name = "xts-twofish-avx",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct async_helper_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_ablkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = ablk_init,
|
||||
.cra_exit = ablk_exit,
|
||||
.cra_u = {
|
||||
.ablkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = TF_MAX_KEY_SIZE * 2,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = ablk_set_key,
|
||||
.encrypt = ablk_encrypt,
|
||||
.decrypt = ablk_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)];
|
||||
|
||||
static int __init twofish_init(void)
|
||||
{
|
||||
@ -563,12 +309,15 @@ static int __init twofish_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
|
||||
return simd_register_skciphers_compat(twofish_algs,
|
||||
ARRAY_SIZE(twofish_algs),
|
||||
twofish_simd_algs);
|
||||
}
|
||||
|
||||
static void __exit twofish_exit(void)
|
||||
{
|
||||
crypto_unregister_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
|
||||
simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
|
||||
twofish_simd_algs);
|
||||
}
|
||||
|
||||
module_init(twofish_init);
|
||||
|
@ -20,22 +20,26 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <asm/crypto/twofish.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/twofish.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/twofish.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <asm/crypto/twofish.h>
|
||||
#include <asm/crypto/glue_helper.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/xts.h>
|
||||
|
||||
EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way);
|
||||
EXPORT_SYMBOL_GPL(twofish_dec_blk_3way);
|
||||
|
||||
static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return twofish_setkey(&tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
|
||||
const u8 *src)
|
||||
{
|
||||
@ -151,284 +155,74 @@ static const struct common_glue_ctx twofish_dec_cbc = {
|
||||
} }
|
||||
};
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&twofish_enc, req);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ecb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
|
||||
return glue_ecb_req_128bit(&twofish_dec, req);
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
|
||||
dst, src, nbytes);
|
||||
return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
|
||||
req);
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int cbc_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
|
||||
nbytes);
|
||||
return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
static int ctr_crypt(struct skcipher_request *req)
|
||||
{
|
||||
return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
|
||||
return glue_ctr_req_128bit(&twofish_ctr, req);
|
||||
}
|
||||
|
||||
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = TF_BLOCK_SIZE;
|
||||
struct twofish_ctx *ctx = priv;
|
||||
int i;
|
||||
|
||||
if (nbytes == 3 * bsize) {
|
||||
twofish_enc_blk_3way(ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
twofish_enc_blk(ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
|
||||
{
|
||||
const unsigned int bsize = TF_BLOCK_SIZE;
|
||||
struct twofish_ctx *ctx = priv;
|
||||
int i;
|
||||
|
||||
if (nbytes == 3 * bsize) {
|
||||
twofish_dec_blk_3way(ctx, srcdst, srcdst);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
|
||||
twofish_dec_blk(ctx, srcdst, srcdst);
|
||||
}
|
||||
|
||||
int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
int err;
|
||||
|
||||
err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
|
||||
&tfm->crt_flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_twofish_setkey);
|
||||
|
||||
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[3];
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &ctx->twofish_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
|
||||
return lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
}
|
||||
|
||||
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
be128 buf[3];
|
||||
struct lrw_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.table_ctx = &ctx->lrw_table,
|
||||
.crypt_ctx = &ctx->twofish_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
|
||||
return lrw_crypt(desc, dst, src, nbytes, &req);
|
||||
}
|
||||
|
||||
void lrw_twofish_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
lrw_free_table(&ctx->lrw_table);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_twofish_exit_tfm);
|
||||
|
||||
int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
int err;
|
||||
|
||||
err = xts_check_key(tfm, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* first half of xts-key is for crypt */
|
||||
err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* second half of xts-key is for tweak */
|
||||
return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
|
||||
flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xts_twofish_setkey);
|
||||
|
||||
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
le128 buf[3];
|
||||
struct xts_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.tweak_ctx = &ctx->tweak_ctx,
|
||||
.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
|
||||
.crypt_ctx = &ctx->crypt_ctx,
|
||||
.crypt_fn = encrypt_callback,
|
||||
};
|
||||
|
||||
return xts_crypt(desc, dst, src, nbytes, &req);
|
||||
}
|
||||
|
||||
static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
le128 buf[3];
|
||||
struct xts_crypt_req req = {
|
||||
.tbuf = buf,
|
||||
.tbuflen = sizeof(buf),
|
||||
|
||||
.tweak_ctx = &ctx->tweak_ctx,
|
||||
.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
|
||||
.crypt_ctx = &ctx->crypt_ctx,
|
||||
.crypt_fn = decrypt_callback,
|
||||
};
|
||||
|
||||
return xts_crypt(desc, dst, src, nbytes, &req);
|
||||
}
|
||||
|
||||
static struct crypto_alg tf_algs[5] = { {
|
||||
.cra_name = "ecb(twofish)",
|
||||
.cra_driver_name = "ecb-twofish-3way",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.setkey = twofish_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
static struct skcipher_alg tf_skciphers[] = {
|
||||
{
|
||||
.base.cra_name = "ecb(twofish)",
|
||||
.base.cra_driver_name = "ecb-twofish-3way",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.setkey = twofish_setkey_skcipher,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "cbc(twofish)",
|
||||
.base.cra_driver_name = "cbc-twofish-3way",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = twofish_setkey_skcipher,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
}, {
|
||||
.base.cra_name = "ctr(twofish)",
|
||||
.base.cra_driver_name = "ctr-twofish-3way",
|
||||
.base.cra_priority = 300,
|
||||
.base.cra_blocksize = 1,
|
||||
.base.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.chunksize = TF_BLOCK_SIZE,
|
||||
.setkey = twofish_setkey_skcipher,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(twofish)",
|
||||
.cra_driver_name = "cbc-twofish-3way",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = twofish_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(twofish)",
|
||||
.cra_driver_name = "ctr-twofish-3way",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct twofish_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = twofish_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "lrw(twofish)",
|
||||
.cra_driver_name = "lrw-twofish-3way",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_lrw_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_exit = lrw_twofish_exit_tfm,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
|
||||
.max_keysize = TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = lrw_twofish_setkey,
|
||||
.encrypt = lrw_encrypt,
|
||||
.decrypt = lrw_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "xts(twofish)",
|
||||
.cra_driver_name = "xts-twofish-3way",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = TF_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct twofish_xts_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = TF_MIN_KEY_SIZE * 2,
|
||||
.max_keysize = TF_MAX_KEY_SIZE * 2,
|
||||
.ivsize = TF_BLOCK_SIZE,
|
||||
.setkey = xts_twofish_setkey,
|
||||
.encrypt = xts_encrypt,
|
||||
.decrypt = xts_decrypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
};
|
||||
|
||||
static bool is_blacklisted_cpu(void)
|
||||
{
|
||||
@ -478,12 +272,13 @@ static int __init init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(tf_algs, ARRAY_SIZE(tf_algs));
|
||||
return crypto_register_skciphers(tf_skciphers,
|
||||
ARRAY_SIZE(tf_skciphers));
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_algs(tf_algs, ARRAY_SIZE(tf_algs));
|
||||
crypto_unregister_skciphers(tf_skciphers, ARRAY_SIZE(tf_skciphers));
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
|
@ -2,8 +2,9 @@
|
||||
#ifndef ASM_X86_CAMELLIA_H
|
||||
#define ASM_X86_CAMELLIA_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#define CAMELLIA_MIN_KEY_SIZE 16
|
||||
#define CAMELLIA_MAX_KEY_SIZE 32
|
||||
@ -11,16 +12,13 @@
|
||||
#define CAMELLIA_TABLE_BYTE_LEN 272
|
||||
#define CAMELLIA_PARALLEL_BLOCKS 2
|
||||
|
||||
struct crypto_skcipher;
|
||||
|
||||
struct camellia_ctx {
|
||||
u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
|
||||
u32 key_length;
|
||||
};
|
||||
|
||||
struct camellia_lrw_ctx {
|
||||
struct lrw_table_ctx lrw_table;
|
||||
struct camellia_ctx camellia_ctx;
|
||||
};
|
||||
|
||||
struct camellia_xts_ctx {
|
||||
struct camellia_ctx tweak_ctx;
|
||||
struct camellia_ctx crypt_ctx;
|
||||
@ -30,11 +28,7 @@ extern int __camellia_setkey(struct camellia_ctx *cctx,
|
||||
const unsigned char *key,
|
||||
unsigned int key_len, u32 *flags);
|
||||
|
||||
extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
|
||||
|
||||
extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
|
||||
/* regular block cipher functions */
|
||||
|
@ -45,7 +45,7 @@ struct common_glue_ctx {
|
||||
};
|
||||
|
||||
static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
|
||||
struct blkcipher_desc *desc,
|
||||
struct skcipher_walk *walk,
|
||||
bool fpu_enabled, unsigned int nbytes)
|
||||
{
|
||||
if (likely(fpu_blocks_limit < 0))
|
||||
@ -61,33 +61,6 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
|
||||
if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
|
||||
return false;
|
||||
|
||||
if (desc) {
|
||||
/* prevent sleeping if FPU is in use */
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
}
|
||||
|
||||
kernel_fpu_begin();
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool glue_skwalk_fpu_begin(unsigned int bsize,
|
||||
int fpu_blocks_limit,
|
||||
struct skcipher_walk *walk,
|
||||
bool fpu_enabled, unsigned int nbytes)
|
||||
{
|
||||
if (likely(fpu_blocks_limit < 0))
|
||||
return false;
|
||||
|
||||
if (fpu_enabled)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Vector-registers are only used when chunk to be processed is large
|
||||
* enough, so do not enable FPU until it is necessary.
|
||||
*/
|
||||
if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
|
||||
return false;
|
||||
|
||||
/* prevent sleeping if FPU is in use */
|
||||
skcipher_walk_atomise(walk);
|
||||
|
||||
@ -126,41 +99,17 @@ static inline void le128_inc(le128 *i)
|
||||
i->b = cpu_to_le64(b);
|
||||
}
|
||||
|
||||
extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes);
|
||||
extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
|
||||
struct skcipher_request *req);
|
||||
|
||||
extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src,
|
||||
unsigned int nbytes);
|
||||
extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
|
||||
struct skcipher_request *req);
|
||||
|
||||
extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src,
|
||||
unsigned int nbytes);
|
||||
extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
|
||||
struct skcipher_request *req);
|
||||
|
||||
extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes);
|
||||
|
||||
extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes,
|
||||
common_glue_func_t tweak_fn, void *tweak_ctx,
|
||||
void *crypt_ctx);
|
||||
|
||||
extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
|
||||
struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes,
|
||||
common_glue_func_t tweak_fn, void *tweak_ctx,
|
||||
void *crypt_ctx);
|
||||
extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
|
||||
struct skcipher_request *req);
|
||||
|
||||
extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
||||
struct skcipher_request *req,
|
||||
|
@ -2,16 +2,14 @@
|
||||
#ifndef ASM_X86_SERPENT_AVX_H
|
||||
#define ASM_X86_SERPENT_AVX_H
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/serpent.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct crypto_skcipher;
|
||||
|
||||
#define SERPENT_PARALLEL_BLOCKS 8
|
||||
|
||||
struct serpent_lrw_ctx {
|
||||
struct lrw_table_ctx lrw_table;
|
||||
struct serpent_ctx serpent_ctx;
|
||||
};
|
||||
|
||||
struct serpent_xts_ctx {
|
||||
struct serpent_ctx tweak_ctx;
|
||||
struct serpent_ctx crypt_ctx;
|
||||
@ -38,12 +36,7 @@ extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
|
||||
extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
|
||||
extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
|
||||
|
||||
extern int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
|
||||
extern void lrw_serpent_exit_tfm(struct crypto_tfm *tfm);
|
||||
|
||||
extern int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
|
||||
#endif
|
||||
|
@ -4,19 +4,8 @@
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/twofish.h>
|
||||
#include <crypto/lrw.h>
|
||||
#include <crypto/b128ops.h>
|
||||
|
||||
struct twofish_lrw_ctx {
|
||||
struct lrw_table_ctx lrw_table;
|
||||
struct twofish_ctx twofish_ctx;
|
||||
};
|
||||
|
||||
struct twofish_xts_ctx {
|
||||
struct twofish_ctx tweak_ctx;
|
||||
struct twofish_ctx crypt_ctx;
|
||||
};
|
||||
|
||||
/* regular block cipher functions from twofish_x86_64 module */
|
||||
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
|
||||
const u8 *src);
|
||||
@ -36,12 +25,4 @@ extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
|
||||
extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
|
||||
le128 *iv);
|
||||
|
||||
extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
|
||||
extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
|
||||
|
||||
extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
|
||||
#endif /* ASM_X86_TWOFISH_H */
|
||||
|
129
crypto/Kconfig
129
crypto/Kconfig
@ -245,10 +245,6 @@ config CRYPTO_TEST
|
||||
help
|
||||
Quick & dirty crypto test module.
|
||||
|
||||
config CRYPTO_ABLK_HELPER
|
||||
tristate
|
||||
select CRYPTO_CRYPTD
|
||||
|
||||
config CRYPTO_SIMD
|
||||
tristate
|
||||
select CRYPTO_CRYPTD
|
||||
@ -324,6 +320,14 @@ config CRYPTO_CBC
|
||||
CBC: Cipher Block Chaining mode
|
||||
This block cipher algorithm is required for IPSec.
|
||||
|
||||
config CRYPTO_CFB
|
||||
tristate "CFB support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
help
|
||||
CFB: Cipher FeedBack mode
|
||||
This block cipher algorithm is required for TPM2 Cryptography.
|
||||
|
||||
config CRYPTO_CTR
|
||||
tristate "CTR support"
|
||||
select CRYPTO_BLKCIPHER
|
||||
@ -1114,7 +1118,7 @@ config CRYPTO_BLOWFISH_COMMON
|
||||
config CRYPTO_BLOWFISH_X86_64
|
||||
tristate "Blowfish cipher algorithm (x86_64)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_BLOWFISH_COMMON
|
||||
help
|
||||
Blowfish cipher algorithm (x86_64), by Bruce Schneier.
|
||||
@ -1145,10 +1149,8 @@ config CRYPTO_CAMELLIA_X86_64
|
||||
tristate "Camellia cipher algorithm (x86_64)"
|
||||
depends on X86 && 64BIT
|
||||
depends on CRYPTO
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Camellia cipher algorithm module (x86_64).
|
||||
|
||||
@ -1164,12 +1166,10 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
|
||||
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
depends on CRYPTO
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_CAMELLIA_X86_64
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SIMD
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Camellia cipher algorithm module (x86_64/AES-NI/AVX).
|
||||
@ -1186,14 +1186,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
|
||||
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)"
|
||||
depends on X86 && 64BIT
|
||||
depends on CRYPTO
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_CAMELLIA_X86_64
|
||||
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Camellia cipher algorithm module (x86_64/AES-NI/AVX2).
|
||||
|
||||
@ -1238,11 +1231,10 @@ config CRYPTO_CAST5
|
||||
config CRYPTO_CAST5_AVX_X86_64
|
||||
tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_CAST_COMMON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_CAST5
|
||||
select CRYPTO_CAST_COMMON
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
The CAST5 encryption algorithm (synonymous with CAST-128) is
|
||||
described in RFC2144.
|
||||
@ -1261,13 +1253,11 @@ config CRYPTO_CAST6
|
||||
config CRYPTO_CAST6_AVX_X86_64
|
||||
tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_CAST_COMMON
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_CAST6
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_CAST_COMMON
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SIMD
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
The CAST6 encryption algorithm (synonymous with CAST-256) is
|
||||
@ -1294,7 +1284,7 @@ config CRYPTO_DES_SPARC64
|
||||
config CRYPTO_DES3_EDE_X86_64
|
||||
tristate "Triple DES EDE cipher algorithm (x86-64)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_DES
|
||||
help
|
||||
Triple DES EDE (FIPS 46-3) algorithm.
|
||||
@ -1422,13 +1412,10 @@ config CRYPTO_SERPENT
|
||||
config CRYPTO_SERPENT_SSE2_X86_64
|
||||
tristate "Serpent cipher algorithm (x86_64/SSE2)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SERPENT
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_XTS
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
|
||||
|
||||
@ -1444,13 +1431,10 @@ config CRYPTO_SERPENT_SSE2_X86_64
|
||||
config CRYPTO_SERPENT_SSE2_586
|
||||
tristate "Serpent cipher algorithm (i586/SSE2)"
|
||||
depends on X86 && !64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SERPENT
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_XTS
|
||||
select CRYPTO_SIMD
|
||||
help
|
||||
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
|
||||
|
||||
@ -1466,12 +1450,10 @@ config CRYPTO_SERPENT_SSE2_586
|
||||
config CRYPTO_SERPENT_AVX_X86_64
|
||||
tristate "Serpent cipher algorithm (x86_64/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SERPENT
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_SIMD
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
|
||||
@ -1488,14 +1470,7 @@ config CRYPTO_SERPENT_AVX_X86_64
|
||||
config CRYPTO_SERPENT_AVX2_X86_64
|
||||
tristate "Serpent cipher algorithm (x86_64/AVX2)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SERPENT
|
||||
select CRYPTO_SERPENT_AVX_X86_64
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
|
||||
|
||||
@ -1508,6 +1483,45 @@ config CRYPTO_SERPENT_AVX2_X86_64
|
||||
See also:
|
||||
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
|
||||
|
||||
config CRYPTO_SM4
|
||||
tristate "SM4 cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
help
|
||||
SM4 cipher algorithms (OSCCA GB/T 32907-2016).
|
||||
|
||||
SM4 (GBT.32907-2016) is a cryptographic standard issued by the
|
||||
Organization of State Commercial Administration of China (OSCCA)
|
||||
as an authorized cryptographic algorithms for the use within China.
|
||||
|
||||
SMS4 was originally created for use in protecting wireless
|
||||
networks, and is mandated in the Chinese National Standard for
|
||||
Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure)
|
||||
(GB.15629.11-2003).
|
||||
|
||||
The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and
|
||||
standardized through TC 260 of the Standardization Administration
|
||||
of the People's Republic of China (SAC).
|
||||
|
||||
The input, output, and key of SMS4 are each 128 bits.
|
||||
|
||||
See also: <https://eprint.iacr.org/2008/329.pdf>
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CRYPTO_SPECK
|
||||
tristate "Speck cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
help
|
||||
Speck is a lightweight block cipher that is tuned for optimal
|
||||
performance in software (rather than hardware).
|
||||
|
||||
Speck may not be as secure as AES, and should only be used on systems
|
||||
where AES is not fast enough.
|
||||
|
||||
See also: <https://eprint.iacr.org/2013/404.pdf>
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config CRYPTO_TEA
|
||||
tristate "TEA, XTEA and XETA cipher algorithms"
|
||||
select CRYPTO_ALGAPI
|
||||
@ -1581,12 +1595,10 @@ config CRYPTO_TWOFISH_X86_64
|
||||
config CRYPTO_TWOFISH_X86_64_3WAY
|
||||
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_TWOFISH_COMMON
|
||||
select CRYPTO_TWOFISH_X86_64
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Twofish cipher algorithm (x86_64, 3-way parallel).
|
||||
|
||||
@ -1604,15 +1616,12 @@ config CRYPTO_TWOFISH_X86_64_3WAY
|
||||
config CRYPTO_TWOFISH_AVX_X86_64
|
||||
tristate "Twofish cipher algorithm (x86_64/AVX)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_CRYPTD
|
||||
select CRYPTO_ABLK_HELPER
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_GLUE_HELPER_X86
|
||||
select CRYPTO_SIMD
|
||||
select CRYPTO_TWOFISH_COMMON
|
||||
select CRYPTO_TWOFISH_X86_64
|
||||
select CRYPTO_TWOFISH_X86_64_3WAY
|
||||
select CRYPTO_LRW
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Twofish cipher algorithm (x86_64/AVX).
|
||||
|
||||
|
@ -78,6 +78,7 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
|
||||
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
|
||||
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
|
||||
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
|
||||
obj-$(CONFIG_CRYPTO_CFB) += cfb.o
|
||||
obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
|
||||
obj-$(CONFIG_CRYPTO_CTS) += cts.o
|
||||
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
|
||||
@ -100,6 +101,7 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
|
||||
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
|
||||
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
|
||||
CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
|
||||
obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o
|
||||
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
|
||||
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
|
||||
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
|
||||
@ -110,6 +112,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
|
||||
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
|
||||
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
|
||||
obj-$(CONFIG_CRYPTO_SEED) += seed.o
|
||||
obj-$(CONFIG_CRYPTO_SPECK) += speck.o
|
||||
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
|
||||
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
|
||||
@ -149,6 +152,5 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o
|
||||
obj-$(CONFIG_ASYNC_CORE) += async_tx/
|
||||
obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
|
||||
obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
|
||||
obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
|
||||
crypto_simd-y := simd.o
|
||||
obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
|
||||
|
@ -1,150 +0,0 @@
|
||||
/*
|
||||
* Shared async block cipher helpers
|
||||
*
|
||||
* Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
|
||||
*
|
||||
* Based on aesni-intel_glue.c by:
|
||||
* Copyright (C) 2008, Intel Corp.
|
||||
* Author: Huang Ying <ying.huang@intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/cryptd.h>
|
||||
#include <crypto/ablk_helper.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
|
||||
int err;
|
||||
|
||||
crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
|
||||
& CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_ablkcipher_setkey(child, key, key_len);
|
||||
crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
|
||||
& CRYPTO_TFM_RES_MASK);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ablk_set_key);
|
||||
|
||||
int __ablk_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
struct blkcipher_desc desc;
|
||||
|
||||
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
|
||||
desc.info = req->info;
|
||||
desc.flags = 0;
|
||||
|
||||
return crypto_blkcipher_crt(desc.tfm)->encrypt(
|
||||
&desc, req->dst, req->src, req->nbytes);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ablk_encrypt);
|
||||
|
||||
int ablk_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (!may_use_simd() ||
|
||||
(in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
|
||||
struct ablkcipher_request *cryptd_req =
|
||||
ablkcipher_request_ctx(req);
|
||||
|
||||
*cryptd_req = *req;
|
||||
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
|
||||
|
||||
return crypto_ablkcipher_encrypt(cryptd_req);
|
||||
} else {
|
||||
return __ablk_encrypt(req);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ablk_encrypt);
|
||||
|
||||
int ablk_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (!may_use_simd() ||
|
||||
(in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
|
||||
struct ablkcipher_request *cryptd_req =
|
||||
ablkcipher_request_ctx(req);
|
||||
|
||||
*cryptd_req = *req;
|
||||
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
|
||||
|
||||
return crypto_ablkcipher_decrypt(cryptd_req);
|
||||
} else {
|
||||
struct blkcipher_desc desc;
|
||||
|
||||
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
|
||||
desc.info = req->info;
|
||||
desc.flags = 0;
|
||||
|
||||
return crypto_blkcipher_crt(desc.tfm)->decrypt(
|
||||
&desc, req->dst, req->src, req->nbytes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ablk_decrypt);
|
||||
|
||||
void ablk_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
cryptd_free_ablkcipher(ctx->cryptd_tfm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ablk_exit);
|
||||
|
||||
int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
|
||||
{
|
||||
struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct cryptd_ablkcipher *cryptd_tfm;
|
||||
|
||||
cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, CRYPTO_ALG_INTERNAL,
|
||||
CRYPTO_ALG_INTERNAL);
|
||||
if (IS_ERR(cryptd_tfm))
|
||||
return PTR_ERR(cryptd_tfm);
|
||||
|
||||
ctx->cryptd_tfm = cryptd_tfm;
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
|
||||
crypto_ablkcipher_reqsize(&cryptd_tfm->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ablk_init_common);
|
||||
|
||||
int ablk_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
char drv_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
snprintf(drv_name, sizeof(drv_name), "__driver-%s",
|
||||
crypto_tfm_alg_driver_name(tfm));
|
||||
|
||||
return ablk_init_common(tfm, drv_name);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ablk_init);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
@ -92,13 +92,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
||||
|
||||
if (nbytes && walk->offset & alignmask && !err) {
|
||||
walk->offset = ALIGN(walk->offset, alignmask + 1);
|
||||
walk->data += walk->offset;
|
||||
|
||||
nbytes = min(nbytes,
|
||||
((unsigned int)(PAGE_SIZE)) - walk->offset);
|
||||
walk->entrylen -= nbytes;
|
||||
|
||||
return nbytes;
|
||||
if (nbytes) {
|
||||
walk->data += walk->offset;
|
||||
return nbytes;
|
||||
}
|
||||
}
|
||||
|
||||
if (walk->flags & CRYPTO_ALG_ASYNC)
|
||||
@ -446,24 +447,12 @@ static int ahash_def_finup(struct ahash_request *req)
|
||||
return ahash_def_finup_finish1(req, err);
|
||||
}
|
||||
|
||||
static int ahash_no_export(struct ahash_request *req, void *out)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int ahash_no_import(struct ahash_request *req, const void *in)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
|
||||
struct ahash_alg *alg = crypto_ahash_alg(hash);
|
||||
|
||||
hash->setkey = ahash_nosetkey;
|
||||
hash->export = ahash_no_export;
|
||||
hash->import = ahash_no_import;
|
||||
|
||||
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
|
||||
return crypto_init_shash_ops_async(tfm);
|
||||
@ -473,16 +462,14 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
|
||||
hash->final = alg->final;
|
||||
hash->finup = alg->finup ?: ahash_def_finup;
|
||||
hash->digest = alg->digest;
|
||||
hash->export = alg->export;
|
||||
hash->import = alg->import;
|
||||
|
||||
if (alg->setkey) {
|
||||
hash->setkey = alg->setkey;
|
||||
if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
|
||||
crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
|
||||
}
|
||||
if (alg->export)
|
||||
hash->export = alg->export;
|
||||
if (alg->import)
|
||||
hash->import = alg->import;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -543,9 +543,6 @@ int crypto_register_instance(struct crypto_template *tmpl,
|
||||
inst->alg.cra_module = tmpl->module;
|
||||
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
|
||||
|
||||
if (unlikely(!crypto_mod_get(&inst->alg)))
|
||||
return -EAGAIN;
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
|
||||
larval = __crypto_register_alg(&inst->alg);
|
||||
@ -563,14 +560,9 @@ unlock:
|
||||
goto err;
|
||||
|
||||
crypto_wait_for_test(larval);
|
||||
|
||||
/* Remove instance if test failed */
|
||||
if (!(inst->alg.cra_flags & CRYPTO_ALG_TESTED))
|
||||
crypto_unregister_instance(inst);
|
||||
err = 0;
|
||||
|
||||
err:
|
||||
crypto_mod_put(&inst->alg);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_register_instance);
|
||||
|
34
crypto/api.c
34
crypto/api.c
@ -193,17 +193,24 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
|
||||
return alg;
|
||||
}
|
||||
|
||||
struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask)
|
||||
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
|
||||
u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
u32 test = 0;
|
||||
|
||||
if (!((type | mask) & CRYPTO_ALG_TESTED))
|
||||
test |= CRYPTO_ALG_TESTED;
|
||||
|
||||
down_read(&crypto_alg_sem);
|
||||
alg = __crypto_alg_lookup(name, type, mask);
|
||||
alg = __crypto_alg_lookup(name, type | test, mask | test);
|
||||
if (!alg && test)
|
||||
alg = __crypto_alg_lookup(name, type, mask) ?
|
||||
ERR_PTR(-ELIBBAD) : NULL;
|
||||
up_read(&crypto_alg_sem);
|
||||
|
||||
return alg;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_alg_lookup);
|
||||
|
||||
static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
|
||||
u32 mask)
|
||||
@ -227,10 +234,12 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
|
||||
alg = crypto_alg_lookup(name, type, mask);
|
||||
}
|
||||
|
||||
if (alg)
|
||||
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
|
||||
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
|
||||
alg = crypto_larval_wait(alg);
|
||||
else if (!alg)
|
||||
alg = crypto_larval_add(name, type, mask);
|
||||
|
||||
return crypto_larval_add(name, type, mask);
|
||||
return alg;
|
||||
}
|
||||
|
||||
int crypto_probing_notify(unsigned long val, void *v)
|
||||
@ -253,11 +262,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
|
||||
struct crypto_alg *larval;
|
||||
int ok;
|
||||
|
||||
if (!((type | mask) & CRYPTO_ALG_TESTED)) {
|
||||
type |= CRYPTO_ALG_TESTED;
|
||||
mask |= CRYPTO_ALG_TESTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the internal flag is set for a cipher, require a caller to
|
||||
* to invoke the cipher with the internal flag to use that cipher.
|
||||
@ -485,20 +489,14 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
|
||||
const struct crypto_type *frontend,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
|
||||
crypto_alg_mod_lookup;
|
||||
|
||||
if (frontend) {
|
||||
type &= frontend->maskclear;
|
||||
mask &= frontend->maskclear;
|
||||
type |= frontend->type;
|
||||
mask |= frontend->maskset;
|
||||
|
||||
if (frontend->lookup)
|
||||
lookup = frontend->lookup;
|
||||
}
|
||||
|
||||
return lookup(alg_name, type, mask);
|
||||
return crypto_alg_mod_lookup(alg_name, type, mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_find_alg);
|
||||
|
||||
|
353
crypto/cfb.c
Normal file
353
crypto/cfb.c
Normal file
@ -0,0 +1,353 @@
|
||||
//SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* CFB: Cipher FeedBack mode
|
||||
*
|
||||
* Copyright (c) 2018 James.Bottomley@HansenPartnership.com
|
||||
*
|
||||
* CFB is a stream cipher mode which is layered on to a block
|
||||
* encryption scheme. It works very much like a one time pad where
|
||||
* the pad is generated initially from the encrypted IV and then
|
||||
* subsequently from the encrypted previous block of ciphertext. The
|
||||
* pad is XOR'd into the plain text to get the final ciphertext.
|
||||
*
|
||||
* The scheme of CFB is best described by wikipedia:
|
||||
*
|
||||
* https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
|
||||
*
|
||||
* Note that since the pad for both encryption and decryption is
|
||||
* generated by an encryption operation, CFB never uses the block
|
||||
* decryption function.
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct crypto_cfb_ctx {
|
||||
struct crypto_cipher *child;
|
||||
};
|
||||
|
||||
static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
|
||||
return crypto_cipher_blocksize(child);
|
||||
}
|
||||
|
||||
static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
|
||||
const u8 *src, u8 *dst)
|
||||
{
|
||||
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_cipher_encrypt_one(ctx->child, dst, src);
|
||||
}
|
||||
|
||||
/* final encrypt and decrypt is the same */
|
||||
static void crypto_cfb_final(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
|
||||
u8 tmp[bsize + alignmask];
|
||||
u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
crypto_cfb_encrypt_one(tfm, iv, stream);
|
||||
crypto_xor_cpy(dst, stream, src, nbytes);
|
||||
}
|
||||
|
||||
static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
|
||||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, dst);
|
||||
crypto_xor(dst, src, bsize);
|
||||
memcpy(iv, dst, bsize);
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
u8 tmp[bsize];
|
||||
|
||||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, tmp);
|
||||
crypto_xor(src, tmp, bsize);
|
||||
iv = src;
|
||||
|
||||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_cfb_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct skcipher_walk walk;
|
||||
unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes >= bsize) {
|
||||
if (walk.src.virt.addr == walk.dst.virt.addr)
|
||||
err = crypto_cfb_encrypt_inplace(&walk, tfm);
|
||||
else
|
||||
err = crypto_cfb_encrypt_segment(&walk, tfm);
|
||||
err = skcipher_walk_done(&walk, err);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
crypto_cfb_final(&walk, tfm);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
|
||||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, dst);
|
||||
crypto_xor(dst, iv, bsize);
|
||||
iv = src;
|
||||
|
||||
src += bsize;
|
||||
dst += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *iv = walk->iv;
|
||||
u8 tmp[bsize];
|
||||
|
||||
do {
|
||||
crypto_cfb_encrypt_one(tfm, iv, tmp);
|
||||
memcpy(iv, src, bsize);
|
||||
crypto_xor(src, tmp, bsize);
|
||||
src += bsize;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
memcpy(walk->iv, iv, bsize);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
if (walk->src.virt.addr == walk->dst.virt.addr)
|
||||
return crypto_cfb_decrypt_inplace(walk, tfm);
|
||||
else
|
||||
return crypto_cfb_decrypt_segment(walk, tfm);
|
||||
}
|
||||
|
||||
static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent);
|
||||
struct crypto_cipher *child = ctx->child;
|
||||
int err;
|
||||
|
||||
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_cipher_setkey(child, key, keylen);
|
||||
crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_cfb_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct skcipher_walk walk;
|
||||
const unsigned int bsize = crypto_cfb_bsize(tfm);
|
||||
int err;
|
||||
|
||||
err = skcipher_walk_virt(&walk, req, false);
|
||||
|
||||
while (walk.nbytes >= bsize) {
|
||||
err = crypto_cfb_decrypt_blocks(&walk, tfm);
|
||||
err = skcipher_walk_done(&walk, err);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
crypto_cfb_final(&walk, tfm);
|
||||
err = skcipher_walk_done(&walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||
struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
|
||||
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct crypto_cipher *cipher;
|
||||
|
||||
cipher = crypto_spawn_cipher(spawn);
|
||||
if (IS_ERR(cipher))
|
||||
return PTR_ERR(cipher);
|
||||
|
||||
ctx->child = cipher;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
crypto_free_cipher(ctx->child);
|
||||
}
|
||||
|
||||
static void crypto_cfb_free(struct skcipher_instance *inst)
|
||||
{
|
||||
crypto_drop_skcipher(skcipher_instance_ctx(inst));
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
|
||||
{
|
||||
struct skcipher_instance *inst;
|
||||
struct crypto_attr_type *algt;
|
||||
struct crypto_spawn *spawn;
|
||||
struct crypto_alg *alg;
|
||||
u32 mask;
|
||||
int err;
|
||||
|
||||
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
err = PTR_ERR(algt);
|
||||
if (IS_ERR(algt))
|
||||
goto err_free_inst;
|
||||
|
||||
mask = CRYPTO_ALG_TYPE_MASK |
|
||||
crypto_requires_off(algt->type, algt->mask,
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
|
||||
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
|
||||
err = PTR_ERR(alg);
|
||||
if (IS_ERR(alg))
|
||||
goto err_free_inst;
|
||||
|
||||
spawn = skcipher_instance_ctx(inst);
|
||||
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
|
||||
CRYPTO_ALG_TYPE_MASK);
|
||||
crypto_mod_put(alg);
|
||||
if (err)
|
||||
goto err_free_inst;
|
||||
|
||||
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
|
||||
inst->alg.base.cra_priority = alg->cra_priority;
|
||||
/* we're a stream cipher independend of the crypto cra_blocksize */
|
||||
inst->alg.base.cra_blocksize = 1;
|
||||
inst->alg.base.cra_alignmask = alg->cra_alignmask;
|
||||
|
||||
inst->alg.ivsize = alg->cra_blocksize;
|
||||
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
|
||||
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
|
||||
|
||||
inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx);
|
||||
|
||||
inst->alg.init = crypto_cfb_init_tfm;
|
||||
inst->alg.exit = crypto_cfb_exit_tfm;
|
||||
|
||||
inst->alg.setkey = crypto_cfb_setkey;
|
||||
inst->alg.encrypt = crypto_cfb_encrypt;
|
||||
inst->alg.decrypt = crypto_cfb_decrypt;
|
||||
|
||||
inst->free = crypto_cfb_free;
|
||||
|
||||
err = skcipher_register_instance(tmpl, inst);
|
||||
if (err)
|
||||
goto err_drop_spawn;
|
||||
|
||||
out:
|
||||
return err;
|
||||
|
||||
err_drop_spawn:
|
||||
crypto_drop_spawn(spawn);
|
||||
err_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct crypto_template crypto_cfb_tmpl = {
|
||||
.name = "cfb",
|
||||
.create = crypto_cfb_create,
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init crypto_cfb_module_init(void)
|
||||
{
|
||||
return crypto_register_template(&crypto_cfb_tmpl);
|
||||
}
|
||||
|
||||
static void __exit crypto_cfb_module_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&crypto_cfb_tmpl);
|
||||
}
|
||||
|
||||
module_init(crypto_cfb_module_init);
|
||||
module_exit(crypto_cfb_module_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("CFB block cipher algorithm");
|
||||
MODULE_ALIAS_CRYPTO("cfb");
|
@ -15,12 +15,49 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/delay.h>
|
||||
#include <crypto/engine.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
#include "internal.h"
|
||||
|
||||
#define CRYPTO_ENGINE_MAX_QLEN 10
|
||||
|
||||
/**
|
||||
* crypto_finalize_request - finalize one request if the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
*/
|
||||
static void crypto_finalize_request(struct crypto_engine *engine,
|
||||
struct crypto_async_request *req, int err)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool finalize_cur_req = false;
|
||||
int ret;
|
||||
struct crypto_engine_ctx *enginectx;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
if (engine->cur_req == req)
|
||||
finalize_cur_req = true;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
|
||||
if (finalize_cur_req) {
|
||||
enginectx = crypto_tfm_ctx(req->tfm);
|
||||
if (engine->cur_req_prepared &&
|
||||
enginectx->op.unprepare_request) {
|
||||
ret = enginectx->op.unprepare_request(engine, req);
|
||||
if (ret)
|
||||
dev_err(engine->dev, "failed to unprepare request\n");
|
||||
}
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
engine->cur_req = NULL;
|
||||
engine->cur_req_prepared = false;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
}
|
||||
|
||||
req->complete(req, err);
|
||||
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
}
|
||||
|
||||
/**
|
||||
* crypto_pump_requests - dequeue one request from engine queue to process
|
||||
* @engine: the hardware engine
|
||||
@ -34,11 +71,10 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
bool in_kthread)
|
||||
{
|
||||
struct crypto_async_request *async_req, *backlog;
|
||||
struct ahash_request *hreq;
|
||||
struct ablkcipher_request *breq;
|
||||
unsigned long flags;
|
||||
bool was_busy = false;
|
||||
int ret, rtype;
|
||||
int ret;
|
||||
struct crypto_engine_ctx *enginectx;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
|
||||
@ -94,7 +130,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
|
||||
rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
|
||||
/* Until here we get the request need to be encrypted successfully */
|
||||
if (!was_busy && engine->prepare_crypt_hardware) {
|
||||
ret = engine->prepare_crypt_hardware(engine);
|
||||
@ -104,57 +139,31 @@ static void crypto_pump_requests(struct crypto_engine *engine,
|
||||
}
|
||||
}
|
||||
|
||||
switch (rtype) {
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
hreq = ahash_request_cast(engine->cur_req);
|
||||
if (engine->prepare_hash_request) {
|
||||
ret = engine->prepare_hash_request(engine, hreq);
|
||||
if (ret) {
|
||||
dev_err(engine->dev, "failed to prepare request: %d\n",
|
||||
ret);
|
||||
goto req_err;
|
||||
}
|
||||
engine->cur_req_prepared = true;
|
||||
}
|
||||
ret = engine->hash_one_request(engine, hreq);
|
||||
enginectx = crypto_tfm_ctx(async_req->tfm);
|
||||
|
||||
if (enginectx->op.prepare_request) {
|
||||
ret = enginectx->op.prepare_request(engine, async_req);
|
||||
if (ret) {
|
||||
dev_err(engine->dev, "failed to hash one request from queue\n");
|
||||
dev_err(engine->dev, "failed to prepare request: %d\n",
|
||||
ret);
|
||||
goto req_err;
|
||||
}
|
||||
return;
|
||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
||||
breq = ablkcipher_request_cast(engine->cur_req);
|
||||
if (engine->prepare_cipher_request) {
|
||||
ret = engine->prepare_cipher_request(engine, breq);
|
||||
if (ret) {
|
||||
dev_err(engine->dev, "failed to prepare request: %d\n",
|
||||
ret);
|
||||
goto req_err;
|
||||
}
|
||||
engine->cur_req_prepared = true;
|
||||
}
|
||||
ret = engine->cipher_one_request(engine, breq);
|
||||
if (ret) {
|
||||
dev_err(engine->dev, "failed to cipher one request from queue\n");
|
||||
goto req_err;
|
||||
}
|
||||
return;
|
||||
default:
|
||||
dev_err(engine->dev, "failed to prepare request of unknown type\n");
|
||||
return;
|
||||
engine->cur_req_prepared = true;
|
||||
}
|
||||
if (!enginectx->op.do_one_request) {
|
||||
dev_err(engine->dev, "failed to do request\n");
|
||||
ret = -EINVAL;
|
||||
goto req_err;
|
||||
}
|
||||
ret = enginectx->op.do_one_request(engine, async_req);
|
||||
if (ret) {
|
||||
dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
|
||||
goto req_err;
|
||||
}
|
||||
return;
|
||||
|
||||
req_err:
|
||||
switch (rtype) {
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
hreq = ahash_request_cast(engine->cur_req);
|
||||
crypto_finalize_hash_request(engine, hreq, ret);
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
||||
breq = ablkcipher_request_cast(engine->cur_req);
|
||||
crypto_finalize_cipher_request(engine, breq, ret);
|
||||
break;
|
||||
}
|
||||
crypto_finalize_request(engine, async_req, ret);
|
||||
return;
|
||||
|
||||
out:
|
||||
@ -170,13 +179,12 @@ static void crypto_pump_work(struct kthread_work *work)
|
||||
}
|
||||
|
||||
/**
|
||||
* crypto_transfer_cipher_request - transfer the new request into the
|
||||
* enginequeue
|
||||
* crypto_transfer_request - transfer the new request into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_cipher_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req,
|
||||
static int crypto_transfer_request(struct crypto_engine *engine,
|
||||
struct crypto_async_request *req,
|
||||
bool need_pump)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -189,7 +197,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
|
||||
ret = ablkcipher_enqueue_request(&engine->queue, req);
|
||||
ret = crypto_enqueue_request(&engine->queue, req);
|
||||
|
||||
if (!engine->busy && need_pump)
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
@ -197,102 +205,131 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
|
||||
|
||||
/**
|
||||
* crypto_transfer_cipher_request_to_engine - transfer one request to list
|
||||
* crypto_transfer_request_to_engine - transfer one request to list
|
||||
* into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req)
|
||||
static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
|
||||
struct crypto_async_request *req)
|
||||
{
|
||||
return crypto_transfer_cipher_request(engine, req, true);
|
||||
return crypto_transfer_request(engine, req, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_transfer_hash_request - transfer the new request into the
|
||||
* enginequeue
|
||||
* crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
|
||||
* to list into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
* TODO: Remove this function when skcipher conversion is finished
|
||||
*/
|
||||
int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req)
|
||||
{
|
||||
return crypto_transfer_request_to_engine(engine, &req->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_transfer_aead_request_to_engine - transfer one aead_request
|
||||
* to list into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_hash_request(struct crypto_engine *engine,
|
||||
struct ahash_request *req, bool need_pump)
|
||||
int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
|
||||
struct aead_request *req)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
|
||||
if (!engine->running) {
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
|
||||
ret = ahash_enqueue_request(&engine->queue, req);
|
||||
|
||||
if (!engine->busy && need_pump)
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
return ret;
|
||||
return crypto_transfer_request_to_engine(engine, &req->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_transfer_hash_request_to_engine - transfer one request to list
|
||||
* into the engine queue
|
||||
* crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
|
||||
* to list into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct akcipher_request *req)
|
||||
{
|
||||
return crypto_transfer_request_to_engine(engine, &req->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_transfer_hash_request_to_engine - transfer one ahash_request
|
||||
* to list into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
|
||||
struct ahash_request *req)
|
||||
{
|
||||
return crypto_transfer_hash_request(engine, req, true);
|
||||
return crypto_transfer_request_to_engine(engine, &req->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_finalize_cipher_request - finalize one request if the request is done
|
||||
* crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
|
||||
* to list into the engine queue
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be listed into the engine queue
|
||||
*/
|
||||
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
|
||||
struct skcipher_request *req)
|
||||
{
|
||||
return crypto_transfer_request_to_engine(engine, &req->base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
|
||||
|
||||
/**
|
||||
* crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
|
||||
* the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
* TODO: Remove this function when skcipher conversion is finished
|
||||
*/
|
||||
void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req, int err)
|
||||
{
|
||||
return crypto_finalize_request(engine, &req->base, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
|
||||
|
||||
/**
|
||||
* crypto_finalize_aead_request - finalize one aead_request if
|
||||
* the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
*/
|
||||
void crypto_finalize_cipher_request(struct crypto_engine *engine,
|
||||
struct ablkcipher_request *req, int err)
|
||||
void crypto_finalize_aead_request(struct crypto_engine *engine,
|
||||
struct aead_request *req, int err)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool finalize_cur_req = false;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
if (engine->cur_req == &req->base)
|
||||
finalize_cur_req = true;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
|
||||
if (finalize_cur_req) {
|
||||
if (engine->cur_req_prepared &&
|
||||
engine->unprepare_cipher_request) {
|
||||
ret = engine->unprepare_cipher_request(engine, req);
|
||||
if (ret)
|
||||
dev_err(engine->dev, "failed to unprepare request\n");
|
||||
}
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
engine->cur_req = NULL;
|
||||
engine->cur_req_prepared = false;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
}
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
return crypto_finalize_request(engine, &req->base, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
|
||||
|
||||
/**
|
||||
* crypto_finalize_hash_request - finalize one request if the request is done
|
||||
* crypto_finalize_akcipher_request - finalize one akcipher_request if
|
||||
* the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
*/
|
||||
void crypto_finalize_akcipher_request(struct crypto_engine *engine,
|
||||
struct akcipher_request *req, int err)
|
||||
{
|
||||
return crypto_finalize_request(engine, &req->base, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
|
||||
|
||||
/**
|
||||
* crypto_finalize_hash_request - finalize one ahash_request if
|
||||
* the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
@ -300,34 +337,24 @@ EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
|
||||
void crypto_finalize_hash_request(struct crypto_engine *engine,
|
||||
struct ahash_request *req, int err)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool finalize_cur_req = false;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
if (engine->cur_req == &req->base)
|
||||
finalize_cur_req = true;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
|
||||
if (finalize_cur_req) {
|
||||
if (engine->cur_req_prepared &&
|
||||
engine->unprepare_hash_request) {
|
||||
ret = engine->unprepare_hash_request(engine, req);
|
||||
if (ret)
|
||||
dev_err(engine->dev, "failed to unprepare request\n");
|
||||
}
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
engine->cur_req = NULL;
|
||||
engine->cur_req_prepared = false;
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
}
|
||||
|
||||
req->base.complete(&req->base, err);
|
||||
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
return crypto_finalize_request(engine, &req->base, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
|
||||
|
||||
/**
|
||||
* crypto_finalize_skcipher_request - finalize one skcipher_request if
|
||||
* the request is done
|
||||
* @engine: the hardware engine
|
||||
* @req: the request need to be finalized
|
||||
* @err: error number
|
||||
*/
|
||||
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
|
||||
struct skcipher_request *req, int err)
|
||||
{
|
||||
return crypto_finalize_request(engine, &req->base, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
|
||||
|
||||
/**
|
||||
* crypto_engine_start - start the hardware engine
|
||||
* @engine: the hardware engine need to be started
|
||||
|
@ -271,7 +271,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
|
||||
return -ENOENT;
|
||||
|
||||
err = -ENOMEM;
|
||||
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
|
||||
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (!skb)
|
||||
goto drop_alg;
|
||||
|
||||
|
23
crypto/ecc.c
23
crypto/ecc.c
@ -1025,9 +1025,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
{
|
||||
int ret = 0;
|
||||
struct ecc_point *product, *pk;
|
||||
u64 priv[ndigits];
|
||||
u64 rand_z[ndigits];
|
||||
unsigned int nbytes;
|
||||
u64 *priv, *rand_z;
|
||||
const struct ecc_curve *curve = ecc_get_curve(curve_id);
|
||||
|
||||
if (!private_key || !public_key || !curve) {
|
||||
@ -1035,14 +1033,22 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
goto out;
|
||||
}
|
||||
|
||||
nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
|
||||
priv = kmalloc_array(ndigits, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
get_random_bytes(rand_z, nbytes);
|
||||
rand_z = kmalloc_array(ndigits, sizeof(*rand_z), GFP_KERNEL);
|
||||
if (!rand_z) {
|
||||
ret = -ENOMEM;
|
||||
goto kfree_out;
|
||||
}
|
||||
|
||||
pk = ecc_alloc_point(ndigits);
|
||||
if (!pk) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto kfree_out;
|
||||
}
|
||||
|
||||
product = ecc_alloc_point(ndigits);
|
||||
@ -1051,6 +1057,8 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
goto err_alloc_product;
|
||||
}
|
||||
|
||||
get_random_bytes(rand_z, ndigits << ECC_DIGITS_TO_BYTES_SHIFT);
|
||||
|
||||
ecc_swap_digits(public_key, pk->x, ndigits);
|
||||
ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
|
||||
ecc_swap_digits(private_key, priv, ndigits);
|
||||
@ -1065,6 +1073,9 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
|
||||
ecc_free_point(product);
|
||||
err_alloc_product:
|
||||
ecc_free_point(pk);
|
||||
kfree_out:
|
||||
kzfree(priv);
|
||||
kzfree(rand_z);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -89,12 +89,19 @@ static int ecdh_compute_value(struct kpp_request *req)
|
||||
if (!shared_secret)
|
||||
goto free_pubkey;
|
||||
|
||||
copied = sg_copy_to_buffer(req->src, 1, public_key,
|
||||
public_key_sz);
|
||||
if (copied != public_key_sz) {
|
||||
ret = -EINVAL;
|
||||
/* from here on it's invalid parameters */
|
||||
ret = -EINVAL;
|
||||
|
||||
/* must have exactly two points to be on the curve */
|
||||
if (public_key_sz != req->src_len)
|
||||
goto free_all;
|
||||
|
||||
copied = sg_copy_to_buffer(req->src,
|
||||
sg_nents_for_len(req->src,
|
||||
public_key_sz),
|
||||
public_key, public_key_sz);
|
||||
if (copied != public_key_sz)
|
||||
goto free_all;
|
||||
}
|
||||
|
||||
ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
|
||||
ctx->private_key, public_key,
|
||||
@ -111,7 +118,11 @@ static int ecdh_compute_value(struct kpp_request *req)
|
||||
if (ret < 0)
|
||||
goto free_all;
|
||||
|
||||
copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
|
||||
/* might want less than we've got */
|
||||
nbytes = min_t(size_t, nbytes, req->dst_len);
|
||||
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
|
||||
nbytes),
|
||||
buf, nbytes);
|
||||
if (copied != nbytes)
|
||||
ret = -EINVAL;
|
||||
|
||||
|
@ -67,7 +67,6 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
|
||||
}
|
||||
|
||||
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
|
||||
struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
|
||||
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
|
||||
|
||||
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
|
||||
|
154
crypto/lrw.c
154
crypto/lrw.c
@ -28,13 +28,31 @@
|
||||
|
||||
#include <crypto/b128ops.h>
|
||||
#include <crypto/gf128mul.h>
|
||||
#include <crypto/lrw.h>
|
||||
|
||||
#define LRW_BUFFER_SIZE 128u
|
||||
|
||||
#define LRW_BLOCK_SIZE 16
|
||||
|
||||
struct priv {
|
||||
struct crypto_skcipher *child;
|
||||
struct lrw_table_ctx table;
|
||||
|
||||
/*
|
||||
* optimizes multiplying a random (non incrementing, as at the
|
||||
* start of a new sector) value with key2, we could also have
|
||||
* used 4k optimization tables or no optimization at all. In the
|
||||
* latter case we would have to store key2 here
|
||||
*/
|
||||
struct gf128mul_64k *table;
|
||||
|
||||
/*
|
||||
* stores:
|
||||
* key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
|
||||
* key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
|
||||
* key2*{ 0,0,...1,1,1,1,1 }, etc
|
||||
* needed for optimized multiplication of incrementing values
|
||||
* with key2
|
||||
*/
|
||||
be128 mulinc[128];
|
||||
};
|
||||
|
||||
struct rctx {
|
||||
@ -65,11 +83,25 @@ static inline void setbit128_bbe(void *b, int bit)
|
||||
), b);
|
||||
}
|
||||
|
||||
int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
|
||||
static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(parent);
|
||||
struct crypto_skcipher *child = ctx->child;
|
||||
int err, bsize = LRW_BLOCK_SIZE;
|
||||
const u8 *tweak = key + keylen - bsize;
|
||||
be128 tmp = { 0 };
|
||||
int i;
|
||||
|
||||
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_skcipher_setkey(child, key, keylen - bsize);
|
||||
crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (ctx->table)
|
||||
gf128mul_free_64k(ctx->table);
|
||||
|
||||
@ -87,34 +119,6 @@ int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_init_table);
|
||||
|
||||
void lrw_free_table(struct lrw_table_ctx *ctx)
|
||||
{
|
||||
if (ctx->table)
|
||||
gf128mul_free_64k(ctx->table);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_free_table);
|
||||
|
||||
static int setkey(struct crypto_skcipher *parent, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(parent);
|
||||
struct crypto_skcipher *child = ctx->child;
|
||||
int err, bsize = LRW_BLOCK_SIZE;
|
||||
const u8 *tweak = key + keylen - bsize;
|
||||
|
||||
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
||||
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
|
||||
CRYPTO_TFM_REQ_MASK);
|
||||
err = crypto_skcipher_setkey(child, key, keylen - bsize);
|
||||
crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
|
||||
CRYPTO_TFM_RES_MASK);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return lrw_init_table(&ctx->table, tweak);
|
||||
}
|
||||
|
||||
static inline void inc(be128 *iv)
|
||||
{
|
||||
@ -238,7 +242,7 @@ static int pre_crypt(struct skcipher_request *req)
|
||||
/* T <- I*Key2, using the optimization
|
||||
* discussed in the specification */
|
||||
be128_xor(&rctx->t, &rctx->t,
|
||||
&ctx->table.mulinc[get_index128(iv)]);
|
||||
&ctx->mulinc[get_index128(iv)]);
|
||||
inc(iv);
|
||||
} while ((avail -= bs) >= bs);
|
||||
|
||||
@ -301,7 +305,7 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
|
||||
memcpy(&rctx->t, req->iv, sizeof(rctx->t));
|
||||
|
||||
/* T <- I*Key2 */
|
||||
gf128mul_64k_bbe(&rctx->t, ctx->table.table);
|
||||
gf128mul_64k_bbe(&rctx->t, ctx->table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -313,7 +317,7 @@ static void exit_crypt(struct skcipher_request *req)
|
||||
rctx->left = 0;
|
||||
|
||||
if (rctx->ext)
|
||||
kfree(rctx->ext);
|
||||
kzfree(rctx->ext);
|
||||
}
|
||||
|
||||
static int do_encrypt(struct skcipher_request *req, int err)
|
||||
@ -416,85 +420,6 @@ static int decrypt(struct skcipher_request *req)
|
||||
return do_decrypt(req, init_crypt(req, decrypt_done));
|
||||
}
|
||||
|
||||
int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
|
||||
struct scatterlist *ssrc, unsigned int nbytes,
|
||||
struct lrw_crypt_req *req)
|
||||
{
|
||||
const unsigned int bsize = LRW_BLOCK_SIZE;
|
||||
const unsigned int max_blks = req->tbuflen / bsize;
|
||||
struct lrw_table_ctx *ctx = req->table_ctx;
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int nblocks;
|
||||
be128 *iv, *src, *dst, *t;
|
||||
be128 *t_buf = req->tbuf;
|
||||
int err, i;
|
||||
|
||||
BUG_ON(max_blks < 1);
|
||||
|
||||
blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
|
||||
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
nbytes = walk.nbytes;
|
||||
if (!nbytes)
|
||||
return err;
|
||||
|
||||
nblocks = min(walk.nbytes / bsize, max_blks);
|
||||
src = (be128 *)walk.src.virt.addr;
|
||||
dst = (be128 *)walk.dst.virt.addr;
|
||||
|
||||
/* calculate first value of T */
|
||||
iv = (be128 *)walk.iv;
|
||||
t_buf[0] = *iv;
|
||||
|
||||
/* T <- I*Key2 */
|
||||
gf128mul_64k_bbe(&t_buf[0], ctx->table);
|
||||
|
||||
i = 0;
|
||||
goto first;
|
||||
|
||||
for (;;) {
|
||||
do {
|
||||
for (i = 0; i < nblocks; i++) {
|
||||
/* T <- I*Key2, using the optimization
|
||||
* discussed in the specification */
|
||||
be128_xor(&t_buf[i], t,
|
||||
&ctx->mulinc[get_index128(iv)]);
|
||||
inc(iv);
|
||||
first:
|
||||
t = &t_buf[i];
|
||||
|
||||
/* PP <- T xor P */
|
||||
be128_xor(dst + i, t, src + i);
|
||||
}
|
||||
|
||||
/* CC <- E(Key2,PP) */
|
||||
req->crypt_fn(req->crypt_ctx, (u8 *)dst,
|
||||
nblocks * bsize);
|
||||
|
||||
/* C <- T xor CC */
|
||||
for (i = 0; i < nblocks; i++)
|
||||
be128_xor(dst + i, dst + i, &t_buf[i]);
|
||||
|
||||
src += nblocks;
|
||||
dst += nblocks;
|
||||
nbytes -= nblocks * bsize;
|
||||
nblocks = min(nbytes / bsize, max_blks);
|
||||
} while (nblocks > 0);
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = walk.nbytes;
|
||||
if (!nbytes)
|
||||
break;
|
||||
|
||||
nblocks = min(nbytes / bsize, max_blks);
|
||||
src = (be128 *)walk.src.virt.addr;
|
||||
dst = (be128 *)walk.dst.virt.addr;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lrw_crypt);
|
||||
|
||||
static int init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||
@ -518,7 +443,8 @@ static void exit_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct priv *ctx = crypto_skcipher_ctx(tfm);
|
||||
|
||||
lrw_free_table(&ctx->table);
|
||||
if (ctx->table)
|
||||
gf128mul_free_64k(ctx->table);
|
||||
crypto_free_skcipher(ctx->child);
|
||||
}
|
||||
|
||||
|
@ -367,7 +367,7 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
|
||||
goto out;
|
||||
|
||||
rctx->out = req->result;
|
||||
err = ahash_mcryptd_update(&rctx->areq);
|
||||
err = crypto_ahash_update(&rctx->areq);
|
||||
if (err) {
|
||||
req->base.complete = rctx->complete;
|
||||
goto out;
|
||||
@ -394,7 +394,7 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
|
||||
goto out;
|
||||
|
||||
rctx->out = req->result;
|
||||
err = ahash_mcryptd_final(&rctx->areq);
|
||||
err = crypto_ahash_final(&rctx->areq);
|
||||
if (err) {
|
||||
req->base.complete = rctx->complete;
|
||||
goto out;
|
||||
@ -420,7 +420,7 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
|
||||
if (unlikely(err == -EINPROGRESS))
|
||||
goto out;
|
||||
rctx->out = req->result;
|
||||
err = ahash_mcryptd_finup(&rctx->areq);
|
||||
err = crypto_ahash_finup(&rctx->areq);
|
||||
|
||||
if (err) {
|
||||
req->base.complete = rctx->complete;
|
||||
@ -455,7 +455,7 @@ static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
|
||||
rctx->complete, req_async);
|
||||
|
||||
rctx->out = req->result;
|
||||
err = ahash_mcryptd_digest(desc);
|
||||
err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
|
||||
|
||||
out:
|
||||
local_bh_disable();
|
||||
@ -612,32 +612,6 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
|
||||
|
||||
int ahash_mcryptd_digest(struct ahash_request *desc)
|
||||
{
|
||||
return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
|
||||
}
|
||||
|
||||
int ahash_mcryptd_update(struct ahash_request *desc)
|
||||
{
|
||||
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
||||
|
||||
return crypto_ahash_update(desc);
|
||||
}
|
||||
|
||||
int ahash_mcryptd_finup(struct ahash_request *desc)
|
||||
{
|
||||
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
||||
|
||||
return crypto_ahash_finup(desc);
|
||||
}
|
||||
|
||||
int ahash_mcryptd_final(struct ahash_request *desc)
|
||||
{
|
||||
/* alignment is to be done by multi-buffer crypto algorithm if needed */
|
||||
|
||||
return crypto_ahash_final(desc);
|
||||
}
|
||||
|
||||
struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
|
||||
{
|
||||
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
|
||||
|
17
crypto/md4.c
17
crypto/md4.c
@ -64,23 +64,6 @@ static inline u32 H(u32 x, u32 y, u32 z)
|
||||
#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
|
||||
#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
|
||||
|
||||
/* XXX: this stuff can be optimized */
|
||||
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
|
||||
{
|
||||
while (words--) {
|
||||
__le32_to_cpus(buf);
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
|
||||
{
|
||||
while (words--) {
|
||||
__cpu_to_le32s(buf);
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
static void md4_transform(u32 *hash, u32 const *in)
|
||||
{
|
||||
u32 a, b, c, d;
|
||||
|
17
crypto/md5.c
17
crypto/md5.c
@ -32,23 +32,6 @@ const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
|
||||
|
||||
/* XXX: this stuff can be optimized */
|
||||
static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
|
||||
{
|
||||
while (words--) {
|
||||
__le32_to_cpus(buf);
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
|
||||
{
|
||||
while (words--) {
|
||||
__cpu_to_le32s(buf);
|
||||
buf++;
|
||||
}
|
||||
}
|
||||
|
||||
#define F1(x, y, z) (z ^ (x & (y ^ z)))
|
||||
#define F2(x, y, z) F1(z, x, y)
|
||||
#define F3(x, y, z) (x ^ y ^ z)
|
||||
|
@ -192,7 +192,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
|
||||
if (likely(!pad_len))
|
||||
goto out;
|
||||
|
||||
out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
|
||||
out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
|
||||
err = -ENOMEM;
|
||||
if (!out_buf)
|
||||
goto out;
|
||||
|
@ -221,4 +221,54 @@ void simd_skcipher_free(struct simd_skcipher_alg *salg)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(simd_skcipher_free);
|
||||
|
||||
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
|
||||
struct simd_skcipher_alg **simd_algs)
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
const char *algname;
|
||||
const char *drvname;
|
||||
const char *basename;
|
||||
struct simd_skcipher_alg *simd;
|
||||
|
||||
err = crypto_register_skciphers(algs, count);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
|
||||
WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
|
||||
algname = algs[i].base.cra_name + 2;
|
||||
drvname = algs[i].base.cra_driver_name + 2;
|
||||
basename = algs[i].base.cra_driver_name;
|
||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||
err = PTR_ERR(simd);
|
||||
if (IS_ERR(simd))
|
||||
goto err_unregister;
|
||||
simd_algs[i] = simd;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_unregister:
|
||||
simd_unregister_skciphers(algs, count, simd_algs);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(simd_register_skciphers_compat);
|
||||
|
||||
void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
|
||||
struct simd_skcipher_alg **simd_algs)
|
||||
{
|
||||
int i;
|
||||
|
||||
crypto_unregister_skciphers(algs, count);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (simd_algs[i]) {
|
||||
simd_skcipher_free(simd_algs[i]);
|
||||
simd_algs[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
244
crypto/sm4_generic.c
Normal file
244
crypto/sm4_generic.c
Normal file
@ -0,0 +1,244 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
/*
|
||||
* SM4 Cipher Algorithm.
|
||||
*
|
||||
* Copyright (C) 2018 ARM Limited or its affiliates.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
#include <crypto/sm4.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
static const u32 fk[4] = {
|
||||
0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
|
||||
};
|
||||
|
||||
static const u8 sbox[256] = {
|
||||
0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
|
||||
0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
|
||||
0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
|
||||
0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
|
||||
0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
|
||||
0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
|
||||
0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
|
||||
0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
|
||||
0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
|
||||
0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
|
||||
0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
|
||||
0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
|
||||
0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
|
||||
0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
|
||||
0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
|
||||
0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
|
||||
0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
|
||||
0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
|
||||
0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
|
||||
0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
|
||||
0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
|
||||
0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
|
||||
0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
|
||||
0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
|
||||
0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
|
||||
0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
|
||||
0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
|
||||
0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
|
||||
0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
|
||||
0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
|
||||
0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
|
||||
0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
|
||||
};
|
||||
|
||||
static const u32 ck[] = {
|
||||
0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
|
||||
0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
|
||||
0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
|
||||
0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
|
||||
0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
|
||||
0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
|
||||
0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
|
||||
0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
|
||||
};
|
||||
|
||||
static u32 sm4_t_non_lin_sub(u32 x)
|
||||
{
|
||||
int i;
|
||||
u8 *b = (u8 *)&x;
|
||||
|
||||
for (i = 0; i < 4; ++i)
|
||||
b[i] = sbox[b[i]];
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
static u32 sm4_key_lin_sub(u32 x)
|
||||
{
|
||||
return x ^ rol32(x, 13) ^ rol32(x, 23);
|
||||
|
||||
}
|
||||
|
||||
static u32 sm4_enc_lin_sub(u32 x)
|
||||
{
|
||||
return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
|
||||
}
|
||||
|
||||
static u32 sm4_key_sub(u32 x)
|
||||
{
|
||||
return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
|
||||
}
|
||||
|
||||
static u32 sm4_enc_sub(u32 x)
|
||||
{
|
||||
return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
|
||||
}
|
||||
|
||||
static u32 sm4_round(const u32 *x, const u32 rk)
|
||||
{
|
||||
return x[0] ^ sm4_enc_sub(x[1] ^ x[2] ^ x[3] ^ rk);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* crypto_sm4_expand_key - Expands the SM4 key as described in GB/T 32907-2016
|
||||
* @ctx: The location where the computed key will be stored.
|
||||
* @in_key: The supplied key.
|
||||
* @key_len: The length of the supplied key.
|
||||
*
|
||||
* Returns 0 on success. The function fails only if an invalid key size (or
|
||||
* pointer) is supplied.
|
||||
*/
|
||||
int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
u32 rk[4], t;
|
||||
const u32 *key = (u32 *)in_key;
|
||||
int i;
|
||||
|
||||
if (key_len != SM4_KEY_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < 4; ++i)
|
||||
rk[i] = get_unaligned_be32(&key[i]) ^ fk[i];
|
||||
|
||||
for (i = 0; i < 32; ++i) {
|
||||
t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]);
|
||||
ctx->rkey_enc[i] = t;
|
||||
rk[0] = rk[1];
|
||||
rk[1] = rk[2];
|
||||
rk[2] = rk[3];
|
||||
rk[3] = t;
|
||||
}
|
||||
|
||||
for (i = 0; i < 32; ++i)
|
||||
ctx->rkey_dec[i] = ctx->rkey_enc[31 - i];
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_sm4_expand_key);
|
||||
|
||||
/**
|
||||
* crypto_sm4_set_key - Set the AES key.
|
||||
* @tfm: The %crypto_tfm that is used in the context.
|
||||
* @in_key: The input key.
|
||||
* @key_len: The size of the key.
|
||||
*
|
||||
* Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
|
||||
* is set. The function uses crypto_sm4_expand_key() to expand the key.
|
||||
* &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is
|
||||
* retrieved with crypto_tfm_ctx().
|
||||
*/
|
||||
int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
int ret;
|
||||
|
||||
ret = crypto_sm4_expand_key(ctx, in_key, key_len);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_sm4_set_key);
|
||||
|
||||
static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in)
|
||||
{
|
||||
u32 x[4], i, t;
|
||||
|
||||
for (i = 0; i < 4; ++i)
|
||||
x[i] = get_unaligned_be32(&in[i]);
|
||||
|
||||
for (i = 0; i < 32; ++i) {
|
||||
t = sm4_round(x, rk[i]);
|
||||
x[0] = x[1];
|
||||
x[1] = x[2];
|
||||
x[2] = x[3];
|
||||
x[3] = t;
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; ++i)
|
||||
put_unaligned_be32(x[3 - i], &out[i]);
|
||||
}
|
||||
|
||||
/* encrypt a block of text */
|
||||
|
||||
static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in);
|
||||
}
|
||||
|
||||
/* decrypt a block of text */
|
||||
|
||||
static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in);
|
||||
}
|
||||
|
||||
static struct crypto_alg sm4_alg = {
|
||||
.cra_name = "sm4",
|
||||
.cra_driver_name = "sm4-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = SM4_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct crypto_sm4_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = SM4_KEY_SIZE,
|
||||
.cia_max_keysize = SM4_KEY_SIZE,
|
||||
.cia_setkey = crypto_sm4_set_key,
|
||||
.cia_encrypt = sm4_encrypt,
|
||||
.cia_decrypt = sm4_decrypt
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init sm4_init(void)
|
||||
{
|
||||
return crypto_register_alg(&sm4_alg);
|
||||
}
|
||||
|
||||
static void __exit sm4_fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&sm4_alg);
|
||||
}
|
||||
|
||||
module_init(sm4_init);
|
||||
module_exit(sm4_fini);
|
||||
|
||||
MODULE_DESCRIPTION("SM4 Cipher Algorithm");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("sm4");
|
||||
MODULE_ALIAS_CRYPTO("sm4-generic");
|
307
crypto/speck.c
Normal file
307
crypto/speck.c
Normal file
@ -0,0 +1,307 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Speck: a lightweight block cipher
|
||||
*
|
||||
* Copyright (c) 2018 Google, Inc
|
||||
*
|
||||
* Speck has 10 variants, including 5 block sizes. For now we only implement
|
||||
* the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
|
||||
* Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
|
||||
* and a key size of K bits. The Speck128 variants are believed to be the most
|
||||
* secure variants, and they use the same block size and key sizes as AES. The
|
||||
* Speck64 variants are less secure, but on 32-bit processors are usually
|
||||
* faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
|
||||
* secure and/or not as well suited for implementation on either 32-bit or
|
||||
* 64-bit processors, so are omitted.
|
||||
*
|
||||
* Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
|
||||
* https://eprint.iacr.org/2013/404.pdf
|
||||
*
|
||||
* In a correspondence, the Speck designers have also clarified that the words
|
||||
* should be interpreted in little-endian format, and the words should be
|
||||
* ordered such that the first word of each block is 'y' rather than 'x', and
|
||||
* the first key word (rather than the last) becomes the first round key.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include <crypto/speck.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* Speck128 */
|
||||
|
||||
static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
|
||||
{
|
||||
*x = ror64(*x, 8);
|
||||
*x += *y;
|
||||
*x ^= k;
|
||||
*y = rol64(*y, 3);
|
||||
*y ^= *x;
|
||||
}
|
||||
|
||||
static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
|
||||
{
|
||||
*y ^= *x;
|
||||
*y = ror64(*y, 3);
|
||||
*x ^= k;
|
||||
*x -= *y;
|
||||
*x = rol64(*x, 8);
|
||||
}
|
||||
|
||||
void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
|
||||
u8 *out, const u8 *in)
|
||||
{
|
||||
u64 y = get_unaligned_le64(in);
|
||||
u64 x = get_unaligned_le64(in + 8);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->nrounds; i++)
|
||||
speck128_round(&x, &y, ctx->round_keys[i]);
|
||||
|
||||
put_unaligned_le64(y, out);
|
||||
put_unaligned_le64(x, out + 8);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
|
||||
|
||||
static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
|
||||
}
|
||||
|
||||
void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
|
||||
u8 *out, const u8 *in)
|
||||
{
|
||||
u64 y = get_unaligned_le64(in);
|
||||
u64 x = get_unaligned_le64(in + 8);
|
||||
int i;
|
||||
|
||||
for (i = ctx->nrounds - 1; i >= 0; i--)
|
||||
speck128_unround(&x, &y, ctx->round_keys[i]);
|
||||
|
||||
put_unaligned_le64(y, out);
|
||||
put_unaligned_le64(x, out + 8);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
|
||||
|
||||
static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
|
||||
}
|
||||
|
||||
int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
u64 l[3];
|
||||
u64 k;
|
||||
int i;
|
||||
|
||||
switch (keylen) {
|
||||
case SPECK128_128_KEY_SIZE:
|
||||
k = get_unaligned_le64(key);
|
||||
l[0] = get_unaligned_le64(key + 8);
|
||||
ctx->nrounds = SPECK128_128_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck128_round(&l[0], &k, i);
|
||||
}
|
||||
break;
|
||||
case SPECK128_192_KEY_SIZE:
|
||||
k = get_unaligned_le64(key);
|
||||
l[0] = get_unaligned_le64(key + 8);
|
||||
l[1] = get_unaligned_le64(key + 16);
|
||||
ctx->nrounds = SPECK128_192_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck128_round(&l[i % 2], &k, i);
|
||||
}
|
||||
break;
|
||||
case SPECK128_256_KEY_SIZE:
|
||||
k = get_unaligned_le64(key);
|
||||
l[0] = get_unaligned_le64(key + 8);
|
||||
l[1] = get_unaligned_le64(key + 16);
|
||||
l[2] = get_unaligned_le64(key + 24);
|
||||
ctx->nrounds = SPECK128_256_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck128_round(&l[i % 3], &k, i);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
|
||||
|
||||
static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
|
||||
}
|
||||
|
||||
/* Speck64 */
|
||||
|
||||
static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
|
||||
{
|
||||
*x = ror32(*x, 8);
|
||||
*x += *y;
|
||||
*x ^= k;
|
||||
*y = rol32(*y, 3);
|
||||
*y ^= *x;
|
||||
}
|
||||
|
||||
static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
|
||||
{
|
||||
*y ^= *x;
|
||||
*y = ror32(*y, 3);
|
||||
*x ^= k;
|
||||
*x -= *y;
|
||||
*x = rol32(*x, 8);
|
||||
}
|
||||
|
||||
void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
|
||||
u8 *out, const u8 *in)
|
||||
{
|
||||
u32 y = get_unaligned_le32(in);
|
||||
u32 x = get_unaligned_le32(in + 4);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->nrounds; i++)
|
||||
speck64_round(&x, &y, ctx->round_keys[i]);
|
||||
|
||||
put_unaligned_le32(y, out);
|
||||
put_unaligned_le32(x, out + 4);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
|
||||
|
||||
static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
|
||||
}
|
||||
|
||||
void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
|
||||
u8 *out, const u8 *in)
|
||||
{
|
||||
u32 y = get_unaligned_le32(in);
|
||||
u32 x = get_unaligned_le32(in + 4);
|
||||
int i;
|
||||
|
||||
for (i = ctx->nrounds - 1; i >= 0; i--)
|
||||
speck64_unround(&x, &y, ctx->round_keys[i]);
|
||||
|
||||
put_unaligned_le32(y, out);
|
||||
put_unaligned_le32(x, out + 4);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
|
||||
|
||||
static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
||||
{
|
||||
crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
|
||||
}
|
||||
|
||||
int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
u32 l[3];
|
||||
u32 k;
|
||||
int i;
|
||||
|
||||
switch (keylen) {
|
||||
case SPECK64_96_KEY_SIZE:
|
||||
k = get_unaligned_le32(key);
|
||||
l[0] = get_unaligned_le32(key + 4);
|
||||
l[1] = get_unaligned_le32(key + 8);
|
||||
ctx->nrounds = SPECK64_96_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck64_round(&l[i % 2], &k, i);
|
||||
}
|
||||
break;
|
||||
case SPECK64_128_KEY_SIZE:
|
||||
k = get_unaligned_le32(key);
|
||||
l[0] = get_unaligned_le32(key + 4);
|
||||
l[1] = get_unaligned_le32(key + 8);
|
||||
l[2] = get_unaligned_le32(key + 12);
|
||||
ctx->nrounds = SPECK64_128_NROUNDS;
|
||||
for (i = 0; i < ctx->nrounds; i++) {
|
||||
ctx->round_keys[i] = k;
|
||||
speck64_round(&l[i % 3], &k, i);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
|
||||
|
||||
static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
|
||||
}
|
||||
|
||||
/* Algorithm definitions */
|
||||
|
||||
static struct crypto_alg speck_algs[] = {
|
||||
{
|
||||
.cra_name = "speck128",
|
||||
.cra_driver_name = "speck128-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = SPECK128_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct speck128_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = SPECK128_128_KEY_SIZE,
|
||||
.cia_max_keysize = SPECK128_256_KEY_SIZE,
|
||||
.cia_setkey = speck128_setkey,
|
||||
.cia_encrypt = speck128_encrypt,
|
||||
.cia_decrypt = speck128_decrypt
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "speck64",
|
||||
.cra_driver_name = "speck64-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = SPECK64_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct speck64_tfm_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = SPECK64_96_KEY_SIZE,
|
||||
.cia_max_keysize = SPECK64_128_KEY_SIZE,
|
||||
.cia_setkey = speck64_setkey,
|
||||
.cia_encrypt = speck64_encrypt,
|
||||
.cia_decrypt = speck64_decrypt
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init speck_module_init(void)
|
||||
{
|
||||
return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
|
||||
}
|
||||
|
||||
static void __exit speck_module_exit(void)
|
||||
{
|
||||
crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
|
||||
}
|
||||
|
||||
module_init(speck_module_init);
|
||||
module_exit(speck_module_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Speck block cipher (generic)");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
|
||||
MODULE_ALIAS_CRYPTO("speck128");
|
||||
MODULE_ALIAS_CRYPTO("speck128-generic");
|
||||
MODULE_ALIAS_CRYPTO("speck64");
|
||||
MODULE_ALIAS_CRYPTO("speck64-generic");
|
@ -1983,6 +1983,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
|
||||
case 190:
|
||||
ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
|
||||
break;
|
||||
case 191:
|
||||
ret += tcrypt_test("ecb(sm4)");
|
||||
break;
|
||||
case 200:
|
||||
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
|
||||
speed_template_16_24_32);
|
||||
|
@ -3000,6 +3000,33 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.dec = __VECS(serpent_dec_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(sm4)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = __VECS(sm4_enc_tv_template),
|
||||
.dec = __VECS(sm4_dec_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(speck128)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = __VECS(speck128_enc_tv_template),
|
||||
.dec = __VECS(speck128_dec_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(speck64)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = __VECS(speck64_enc_tv_template),
|
||||
.dec = __VECS(speck64_dec_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "ecb(tea)",
|
||||
.test = alg_test_skcipher,
|
||||
@ -3557,6 +3584,24 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
.dec = __VECS(serpent_xts_dec_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "xts(speck128)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = __VECS(speck128_xts_enc_tv_template),
|
||||
.dec = __VECS(speck128_xts_dec_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "xts(speck64)",
|
||||
.test = alg_test_skcipher,
|
||||
.suite = {
|
||||
.cipher = {
|
||||
.enc = __VECS(speck64_xts_enc_tv_template),
|
||||
.dec = __VECS(speck64_xts_dec_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "xts(twofish)",
|
||||
.test = alg_test_skcipher,
|
||||
|
1882
crypto/testmgr.h
1882
crypto/testmgr.h
File diff suppressed because it is too large
Load Diff
72
crypto/xts.c
72
crypto/xts.c
@ -357,78 +357,6 @@ static int decrypt(struct skcipher_request *req)
|
||||
return do_decrypt(req, init_crypt(req, decrypt_done));
|
||||
}
|
||||
|
||||
int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
|
||||
struct scatterlist *ssrc, unsigned int nbytes,
|
||||
struct xts_crypt_req *req)
|
||||
{
|
||||
const unsigned int bsize = XTS_BLOCK_SIZE;
|
||||
const unsigned int max_blks = req->tbuflen / bsize;
|
||||
struct blkcipher_walk walk;
|
||||
unsigned int nblocks;
|
||||
le128 *src, *dst, *t;
|
||||
le128 *t_buf = req->tbuf;
|
||||
int err, i;
|
||||
|
||||
BUG_ON(max_blks < 1);
|
||||
|
||||
blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
|
||||
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
nbytes = walk.nbytes;
|
||||
if (!nbytes)
|
||||
return err;
|
||||
|
||||
nblocks = min(nbytes / bsize, max_blks);
|
||||
src = (le128 *)walk.src.virt.addr;
|
||||
dst = (le128 *)walk.dst.virt.addr;
|
||||
|
||||
/* calculate first value of T */
|
||||
req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
|
||||
|
||||
i = 0;
|
||||
goto first;
|
||||
|
||||
for (;;) {
|
||||
do {
|
||||
for (i = 0; i < nblocks; i++) {
|
||||
gf128mul_x_ble(&t_buf[i], t);
|
||||
first:
|
||||
t = &t_buf[i];
|
||||
|
||||
/* PP <- T xor P */
|
||||
le128_xor(dst + i, t, src + i);
|
||||
}
|
||||
|
||||
/* CC <- E(Key2,PP) */
|
||||
req->crypt_fn(req->crypt_ctx, (u8 *)dst,
|
||||
nblocks * bsize);
|
||||
|
||||
/* C <- T xor CC */
|
||||
for (i = 0; i < nblocks; i++)
|
||||
le128_xor(dst + i, dst + i, &t_buf[i]);
|
||||
|
||||
src += nblocks;
|
||||
dst += nblocks;
|
||||
nbytes -= nblocks * bsize;
|
||||
nblocks = min(nbytes / bsize, max_blks);
|
||||
} while (nblocks > 0);
|
||||
|
||||
*(le128 *)walk.iv = *t;
|
||||
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
nbytes = walk.nbytes;
|
||||
if (!nbytes)
|
||||
break;
|
||||
|
||||
nblocks = min(nbytes / bsize, max_blks);
|
||||
src = (le128 *)walk.src.virt.addr;
|
||||
dst = (le128 *)walk.dst.virt.addr;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xts_crypt);
|
||||
|
||||
static int init_tfm(struct crypto_skcipher *tfm)
|
||||
{
|
||||
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
|
||||
|
@ -452,3 +452,10 @@ config UML_RANDOM
|
||||
(check your distro, or download from
|
||||
http://sourceforge.net/projects/gkernel/). rngd periodically reads
|
||||
/dev/hwrng and injects the entropy into /dev/random.
|
||||
|
||||
config HW_RANDOM_KEYSTONE
|
||||
depends on ARCH_KEYSTONE
|
||||
default HW_RANDOM
|
||||
tristate "TI Keystone NETCP SA Hardware random number generator"
|
||||
help
|
||||
This option enables Keystone's hardware random generator.
|
||||
|
@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
|
||||
obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
|
||||
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
|
||||
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
|
||||
|
@ -163,6 +163,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
|
||||
|
||||
/* Clock is optional on most platforms */
|
||||
priv->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
priv->rng.name = pdev->name;
|
||||
priv->rng.init = bcm2835_rng_init;
|
||||
|
@ -77,7 +77,7 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
/* Remove the VF */
|
||||
void cavium_rng_remove_vf(struct pci_dev *pdev)
|
||||
static void cavium_rng_remove_vf(struct pci_dev *pdev)
|
||||
{
|
||||
struct cavium_rng *rng;
|
||||
|
||||
|
@ -62,7 +62,7 @@ static int cavium_rng_probe(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
/* Disable VF and RNG Hardware */
|
||||
void cavium_rng_remove(struct pci_dev *pdev)
|
||||
static void cavium_rng_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct cavium_rng_pf *rng;
|
||||
|
||||
|
@ -300,7 +300,7 @@ static int __maybe_unused imx_rngc_resume(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
|
||||
static SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
|
||||
|
||||
static const struct of_device_id imx_rngc_dt_ids[] = {
|
||||
{ .compatible = "fsl,imx25-rngb", .data = NULL, },
|
||||
|
257
drivers/char/hw_random/ks-sa-rng.c
Normal file
257
drivers/char/hw_random/ks-sa-rng.c
Normal file
@ -0,0 +1,257 @@
|
||||
/*
|
||||
* Random Number Generator driver for the Keystone SOC
|
||||
*
|
||||
* Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com
|
||||
*
|
||||
* Authors: Sandeep Nair
|
||||
* Vitaly Andrianov
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#define SA_CMD_STATUS_OFS 0x8
|
||||
|
||||
/* TRNG enable control in SA System module*/
|
||||
#define SA_CMD_STATUS_REG_TRNG_ENABLE BIT(3)
|
||||
|
||||
/* TRNG start control in TRNG module */
|
||||
#define TRNG_CNTL_REG_TRNG_ENABLE BIT(10)
|
||||
|
||||
/* Data ready indicator in STATUS register */
|
||||
#define TRNG_STATUS_REG_READY BIT(0)
|
||||
|
||||
/* Data ready clear control in INTACK register */
|
||||
#define TRNG_INTACK_REG_READY BIT(0)
|
||||
|
||||
/*
|
||||
* Number of samples taken to gather entropy during startup.
|
||||
* If value is 0, the number of samples is 2^24 else
|
||||
* equals value times 2^8.
|
||||
*/
|
||||
#define TRNG_DEF_STARTUP_CYCLES 0
|
||||
#define TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT 16
|
||||
|
||||
/*
|
||||
* Minimum number of samples taken to regenerate entropy
|
||||
* If value is 0, the number of samples is 2^24 else
|
||||
* equals value times 2^6.
|
||||
*/
|
||||
#define TRNG_DEF_MIN_REFILL_CYCLES 1
|
||||
#define TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT 0
|
||||
|
||||
/*
|
||||
* Maximum number of samples taken to regenerate entropy
|
||||
* If value is 0, the number of samples is 2^24 else
|
||||
* equals value times 2^8.
|
||||
*/
|
||||
#define TRNG_DEF_MAX_REFILL_CYCLES 0
|
||||
#define TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT 16
|
||||
|
||||
/* Number of CLK input cycles between samples */
|
||||
#define TRNG_DEF_CLK_DIV_CYCLES 0
|
||||
#define TRNG_CFG_REG_SAMPLE_DIV_SHIFT 8
|
||||
|
||||
/* Maximum retries to get rng data */
|
||||
#define SA_MAX_RNG_DATA_RETRIES 5
|
||||
/* Delay between retries (in usecs) */
|
||||
#define SA_RNG_DATA_RETRY_DELAY 5
|
||||
|
||||
struct trng_regs {
|
||||
u32 output_l;
|
||||
u32 output_h;
|
||||
u32 status;
|
||||
u32 intmask;
|
||||
u32 intack;
|
||||
u32 control;
|
||||
u32 config;
|
||||
};
|
||||
|
||||
struct ks_sa_rng {
|
||||
struct device *dev;
|
||||
struct hwrng rng;
|
||||
struct clk *clk;
|
||||
struct regmap *regmap_cfg;
|
||||
struct trng_regs *reg_rng;
|
||||
};
|
||||
|
||||
static int ks_sa_rng_init(struct hwrng *rng)
|
||||
{
|
||||
u32 value;
|
||||
struct device *dev = (struct device *)rng->priv;
|
||||
struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
|
||||
|
||||
/* Enable RNG module */
|
||||
regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
|
||||
SA_CMD_STATUS_REG_TRNG_ENABLE,
|
||||
SA_CMD_STATUS_REG_TRNG_ENABLE);
|
||||
|
||||
/* Configure RNG module */
|
||||
writel(0, &ks_sa_rng->reg_rng->control);
|
||||
value = TRNG_DEF_STARTUP_CYCLES << TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT;
|
||||
writel(value, &ks_sa_rng->reg_rng->control);
|
||||
|
||||
value = (TRNG_DEF_MIN_REFILL_CYCLES <<
|
||||
TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT) |
|
||||
(TRNG_DEF_MAX_REFILL_CYCLES <<
|
||||
TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT) |
|
||||
(TRNG_DEF_CLK_DIV_CYCLES <<
|
||||
TRNG_CFG_REG_SAMPLE_DIV_SHIFT);
|
||||
|
||||
writel(value, &ks_sa_rng->reg_rng->config);
|
||||
|
||||
/* Disable all interrupts from TRNG */
|
||||
writel(0, &ks_sa_rng->reg_rng->intmask);
|
||||
|
||||
/* Enable RNG */
|
||||
value = readl(&ks_sa_rng->reg_rng->control);
|
||||
value |= TRNG_CNTL_REG_TRNG_ENABLE;
|
||||
writel(value, &ks_sa_rng->reg_rng->control);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ks_sa_rng_cleanup(struct hwrng *rng)
|
||||
{
|
||||
struct device *dev = (struct device *)rng->priv;
|
||||
struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
|
||||
|
||||
/* Disable RNG */
|
||||
writel(0, &ks_sa_rng->reg_rng->control);
|
||||
regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
|
||||
SA_CMD_STATUS_REG_TRNG_ENABLE, 0);
|
||||
}
|
||||
|
||||
static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
|
||||
{
|
||||
struct device *dev = (struct device *)rng->priv;
|
||||
struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
|
||||
|
||||
/* Read random data */
|
||||
data[0] = readl(&ks_sa_rng->reg_rng->output_l);
|
||||
data[1] = readl(&ks_sa_rng->reg_rng->output_h);
|
||||
|
||||
writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
|
||||
|
||||
return sizeof(u32) * 2;
|
||||
}
|
||||
|
||||
static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
|
||||
{
|
||||
struct device *dev = (struct device *)rng->priv;
|
||||
struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
|
||||
|
||||
u32 ready;
|
||||
int j;
|
||||
|
||||
for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) {
|
||||
ready = readl(&ks_sa_rng->reg_rng->status);
|
||||
ready &= TRNG_STATUS_REG_READY;
|
||||
|
||||
if (ready || !wait)
|
||||
break;
|
||||
|
||||
udelay(SA_RNG_DATA_RETRY_DELAY);
|
||||
}
|
||||
|
||||
return ready;
|
||||
}
|
||||
|
||||
static int ks_sa_rng_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ks_sa_rng *ks_sa_rng;
|
||||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
struct resource *mem;
|
||||
|
||||
ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
|
||||
if (!ks_sa_rng)
|
||||
return -ENOMEM;
|
||||
|
||||
ks_sa_rng->dev = dev;
|
||||
ks_sa_rng->rng = (struct hwrng) {
|
||||
.name = "ks_sa_hwrng",
|
||||
.init = ks_sa_rng_init,
|
||||
.data_read = ks_sa_rng_data_read,
|
||||
.data_present = ks_sa_rng_data_present,
|
||||
.cleanup = ks_sa_rng_cleanup,
|
||||
};
|
||||
ks_sa_rng->rng.priv = (unsigned long)dev;
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ks_sa_rng->reg_rng = devm_ioremap_resource(dev, mem);
|
||||
if (IS_ERR(ks_sa_rng->reg_rng))
|
||||
return PTR_ERR(ks_sa_rng->reg_rng);
|
||||
|
||||
ks_sa_rng->regmap_cfg =
|
||||
syscon_regmap_lookup_by_phandle(dev->of_node,
|
||||
"ti,syscon-sa-cfg");
|
||||
|
||||
if (IS_ERR(ks_sa_rng->regmap_cfg)) {
|
||||
dev_err(dev, "syscon_node_to_regmap failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to enable SA power-domain\n");
|
||||
pm_runtime_disable(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, ks_sa_rng);
|
||||
|
||||
return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
|
||||
}
|
||||
|
||||
static int ks_sa_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ks_sa_rng_dt_match[] = {
|
||||
{
|
||||
.compatible = "ti,keystone-rng",
|
||||
},
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ks_sa_rng_dt_match);
|
||||
|
||||
static struct platform_driver ks_sa_rng_driver = {
|
||||
.driver = {
|
||||
.name = "ks-sa-rng",
|
||||
.of_match_table = ks_sa_rng_dt_match,
|
||||
},
|
||||
.probe = ks_sa_rng_probe,
|
||||
.remove = ks_sa_rng_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(ks_sa_rng_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Keystone NETCP SA H/W Random Number Generator driver");
|
||||
MODULE_AUTHOR("Vitaly Andrianov <vitalya@ti.com>");
|
||||
MODULE_LICENSE("GPL");
|
@ -16,16 +16,13 @@
|
||||
* This driver is based on other RNG drivers.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/* RNGA Registers */
|
||||
#define RNGA_CONTROL 0x00
|
||||
@ -197,10 +194,18 @@ static int __exit mxc_rnga_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id mxc_rnga_of_match[] = {
|
||||
{ .compatible = "fsl,imx21-rnga", },
|
||||
{ .compatible = "fsl,imx31-rnga", },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mxc_rnga_of_match);
|
||||
|
||||
static struct platform_driver mxc_rnga_driver = {
|
||||
.driver = {
|
||||
.name = "mxc_rnga",
|
||||
},
|
||||
.name = "mxc_rnga",
|
||||
.of_match_table = mxc_rnga_of_match,
|
||||
},
|
||||
.remove = __exit_p(mxc_rnga_remove),
|
||||
};
|
||||
|
||||
|
@ -150,6 +150,7 @@ struct omap_rng_dev {
|
||||
const struct omap_rng_pdata *pdata;
|
||||
struct hwrng rng;
|
||||
struct clk *clk;
|
||||
struct clk *clk_reg;
|
||||
};
|
||||
|
||||
static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
|
||||
@ -480,6 +481,19 @@ static int omap_rng_probe(struct platform_device *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
|
||||
if (IS_ERR(priv->clk_reg) && PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (!IS_ERR(priv->clk_reg)) {
|
||||
ret = clk_prepare_enable(priv->clk_reg);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Unable to enable the register clk: %d\n",
|
||||
ret);
|
||||
goto err_register;
|
||||
}
|
||||
}
|
||||
|
||||
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
|
||||
get_omap_rng_device_details(priv);
|
||||
if (ret)
|
||||
@ -499,8 +513,8 @@ err_register:
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
if (!IS_ERR(priv->clk))
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_disable_unprepare(priv->clk_reg);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
err_ioremap:
|
||||
dev_err(dev, "initialization failed.\n");
|
||||
return ret;
|
||||
@ -517,8 +531,8 @@ static int omap_rng_remove(struct platform_device *pdev)
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
if (!IS_ERR(priv->clk))
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_disable_unprepare(priv->clk_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -16,15 +16,18 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define RNG_CR 0x00
|
||||
#define RNG_CR_RNGEN BIT(2)
|
||||
#define RNG_CR_CED BIT(5)
|
||||
|
||||
#define RNG_SR 0x04
|
||||
#define RNG_SR_SEIS BIT(6)
|
||||
@ -33,19 +36,12 @@
|
||||
|
||||
#define RNG_DR 0x08
|
||||
|
||||
/*
|
||||
* It takes 40 cycles @ 48MHz to generate each random number (e.g. <1us).
|
||||
* At the time of writing STM32 parts max out at ~200MHz meaning a timeout
|
||||
* of 500 leaves us a very comfortable margin for error. The loop to which
|
||||
* the timeout applies takes at least 4 instructions per iteration so the
|
||||
* timeout is enough to take us up to multi-GHz parts!
|
||||
*/
|
||||
#define RNG_TIMEOUT 500
|
||||
|
||||
struct stm32_rng_private {
|
||||
struct hwrng rng;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
struct reset_control *rst;
|
||||
bool ced;
|
||||
};
|
||||
|
||||
static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
@ -59,13 +55,16 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
|
||||
while (max > sizeof(u32)) {
|
||||
sr = readl_relaxed(priv->base + RNG_SR);
|
||||
/* Manage timeout which is based on timer and take */
|
||||
/* care of initial delay time when enabling rng */
|
||||
if (!sr && wait) {
|
||||
unsigned int timeout = RNG_TIMEOUT;
|
||||
|
||||
do {
|
||||
cpu_relax();
|
||||
sr = readl_relaxed(priv->base + RNG_SR);
|
||||
} while (!sr && --timeout);
|
||||
retval = readl_relaxed_poll_timeout_atomic(priv->base
|
||||
+ RNG_SR,
|
||||
sr, sr,
|
||||
10, 50000);
|
||||
if (retval)
|
||||
dev_err((struct device *)priv->rng.priv,
|
||||
"%s: timeout %x!\n", __func__, sr);
|
||||
}
|
||||
|
||||
/* If error detected or data not ready... */
|
||||
@ -99,7 +98,11 @@ static int stm32_rng_init(struct hwrng *rng)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
|
||||
if (priv->ced)
|
||||
writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
|
||||
else
|
||||
writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED,
|
||||
priv->base + RNG_CR);
|
||||
|
||||
/* clear error indicators */
|
||||
writel_relaxed(0, priv->base + RNG_SR);
|
||||
@ -140,6 +143,15 @@ static int stm32_rng_probe(struct platform_device *ofdev)
|
||||
if (IS_ERR(priv->clk))
|
||||
return PTR_ERR(priv->clk);
|
||||
|
||||
priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
|
||||
if (!IS_ERR(priv->rst)) {
|
||||
reset_control_assert(priv->rst);
|
||||
udelay(2);
|
||||
reset_control_deassert(priv->rst);
|
||||
}
|
||||
|
||||
priv->ced = of_property_read_bool(np, "clock-error-detect");
|
||||
|
||||
dev_set_drvdata(dev, priv);
|
||||
|
||||
priv->rng.name = dev_driver_string(dev),
|
||||
|
@ -464,13 +464,6 @@ if CRYPTO_DEV_UX500
|
||||
source "drivers/crypto/ux500/Kconfig"
|
||||
endif # if CRYPTO_DEV_UX500
|
||||
|
||||
config CRYPTO_DEV_BFIN_CRC
|
||||
tristate "Support for Blackfin CRC hardware"
|
||||
depends on BF60x
|
||||
help
|
||||
Newer Blackfin processors have CRC hardware. Select this if you
|
||||
want to use the Blackfin CRC module.
|
||||
|
||||
config CRYPTO_DEV_ATMEL_AUTHENC
|
||||
tristate "Support for Atmel IPSEC/SSL hw accelerator"
|
||||
depends on HAS_DMA
|
||||
@ -730,4 +723,31 @@ config CRYPTO_DEV_ARTPEC6
|
||||
|
||||
To compile this driver as a module, choose M here.
|
||||
|
||||
config CRYPTO_DEV_CCREE
|
||||
tristate "Support for ARM TrustZone CryptoCell family of security processors"
|
||||
depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
|
||||
default n
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_MD5
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
select CRYPTO_HMAC
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_CBC
|
||||
select CRYPTO_ECB
|
||||
select CRYPTO_CTR
|
||||
select CRYPTO_XTS
|
||||
help
|
||||
Say 'Y' to enable a driver for the REE interface of the Arm
|
||||
TrustZone CryptoCell family of processors. Currently the
|
||||
CryptoCell 712, 710 and 630 are supported.
|
||||
Choose this if you wish to use hardware acceleration of
|
||||
cryptographic operations on the system REE.
|
||||
If unsure say Y.
|
||||
|
||||
endif # CRYPTO_HW
|
||||
|
@ -3,9 +3,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
|
||||
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
|
||||
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
|
||||
|
@ -2155,7 +2155,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
|
||||
badkey:
|
||||
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
memzero_explicit(&key, sizeof(keys));
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2602,16 +2602,13 @@ static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pd
|
||||
}
|
||||
|
||||
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for pdata\n");
|
||||
if (!pdata)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
pdata->dma_slave = devm_kzalloc(&pdev->dev,
|
||||
sizeof(*(pdata->dma_slave)),
|
||||
GFP_KERNEL);
|
||||
if (!pdata->dma_slave) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
|
||||
devm_kfree(&pdev->dev, pdata);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@ -2649,7 +2646,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
|
||||
|
||||
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
|
||||
if (aes_dd == NULL) {
|
||||
dev_err(dev, "unable to alloc data struct.\n");
|
||||
err = -ENOMEM;
|
||||
goto aes_dd_err;
|
||||
}
|
||||
|
@ -2726,18 +2726,14 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd
|
||||
}
|
||||
|
||||
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for pdata\n");
|
||||
if (!pdata)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
pdata->dma_slave = devm_kzalloc(&pdev->dev,
|
||||
sizeof(*(pdata->dma_slave)),
|
||||
GFP_KERNEL);
|
||||
if (!pdata->dma_slave) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
|
||||
if (!pdata->dma_slave)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return pdata;
|
||||
}
|
||||
@ -2758,7 +2754,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
|
||||
|
||||
sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
|
||||
if (sha_dd == NULL) {
|
||||
dev_err(dev, "unable to alloc data struct.\n");
|
||||
err = -ENOMEM;
|
||||
goto sha_dd_err;
|
||||
}
|
||||
|
@ -1312,18 +1312,14 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p
|
||||
}
|
||||
|
||||
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
|
||||
if (!pdata) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for pdata\n");
|
||||
if (!pdata)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
pdata->dma_slave = devm_kzalloc(&pdev->dev,
|
||||
sizeof(*(pdata->dma_slave)),
|
||||
GFP_KERNEL);
|
||||
if (!pdata->dma_slave) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
|
||||
if (!pdata->dma_slave)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return pdata;
|
||||
}
|
||||
@ -1344,7 +1340,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
|
||||
|
||||
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
|
||||
if (tdes_dd == NULL) {
|
||||
dev_err(dev, "unable to alloc data struct.\n");
|
||||
err = -ENOMEM;
|
||||
goto tdes_dd_err;
|
||||
}
|
||||
|
@ -818,7 +818,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
|
||||
|
||||
/* AES hashing keeps key size in type field, so need to copy it here */
|
||||
if (hash_parms.alg == HASH_ALG_AES)
|
||||
hash_parms.type = cipher_parms.type;
|
||||
hash_parms.type = (enum hash_type)cipher_parms.type;
|
||||
else
|
||||
hash_parms.type = spu->spu_hash_type(rctx->total_sent);
|
||||
|
||||
@ -1409,7 +1409,7 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
|
||||
rctx->iv_ctr_len);
|
||||
|
||||
if (ctx->auth.alg == HASH_ALG_AES)
|
||||
hash_parms.type = ctx->cipher_type;
|
||||
hash_parms.type = (enum hash_type)ctx->cipher_type;
|
||||
|
||||
/* General case AAD padding (CCM and RFC4543 special cases below) */
|
||||
aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
|
||||
|
@ -279,7 +279,6 @@ int do_shash(unsigned char *name, unsigned char *result,
|
||||
sdesc = kmalloc(size, GFP_KERNEL);
|
||||
if (!sdesc) {
|
||||
rc = -ENOMEM;
|
||||
pr_err("%s: Memory allocation failure\n", __func__);
|
||||
goto do_shash_err;
|
||||
}
|
||||
sdesc->shash.tfm = hash;
|
||||
|
@ -1,743 +0,0 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* Support Blackfin CRC HW acceleration.
|
||||
*
|
||||
* Copyright 2012 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <asm/dma.h>
|
||||
#include <asm/portmux.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "bfin_crc.h"
|
||||
|
||||
#define CRC_CCRYPTO_QUEUE_LENGTH 5
|
||||
|
||||
#define DRIVER_NAME "bfin-hmac-crc"
|
||||
#define CHKSUM_DIGEST_SIZE 4
|
||||
#define CHKSUM_BLOCK_SIZE 1
|
||||
|
||||
#define CRC_MAX_DMA_DESC 100
|
||||
|
||||
#define CRC_CRYPTO_STATE_UPDATE 1
|
||||
#define CRC_CRYPTO_STATE_FINALUPDATE 2
|
||||
#define CRC_CRYPTO_STATE_FINISH 3
|
||||
|
||||
struct bfin_crypto_crc {
|
||||
struct list_head list;
|
||||
struct device *dev;
|
||||
spinlock_t lock;
|
||||
|
||||
int irq;
|
||||
int dma_ch;
|
||||
u32 poly;
|
||||
struct crc_register *regs;
|
||||
|
||||
struct ahash_request *req; /* current request in operation */
|
||||
struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
|
||||
dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
|
||||
u8 *sg_mid_buf;
|
||||
dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
|
||||
|
||||
struct tasklet_struct done_task;
|
||||
struct crypto_queue queue; /* waiting requests */
|
||||
|
||||
u8 busy:1; /* crc device in operation flag */
|
||||
};
|
||||
|
||||
static struct bfin_crypto_crc_list {
|
||||
struct list_head dev_list;
|
||||
spinlock_t lock;
|
||||
} crc_list;
|
||||
|
||||
struct bfin_crypto_crc_reqctx {
|
||||
struct bfin_crypto_crc *crc;
|
||||
|
||||
unsigned int total; /* total request bytes */
|
||||
size_t sg_buflen; /* bytes for this update */
|
||||
unsigned int sg_nents;
|
||||
struct scatterlist *sg; /* sg list head for this update*/
|
||||
struct scatterlist bufsl[2]; /* chained sg list */
|
||||
|
||||
size_t bufnext_len;
|
||||
size_t buflast_len;
|
||||
u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
|
||||
u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
|
||||
|
||||
u8 flag;
|
||||
};
|
||||
|
||||
struct bfin_crypto_crc_ctx {
|
||||
struct bfin_crypto_crc *crc;
|
||||
u32 key;
|
||||
};
|
||||
|
||||
/*
|
||||
* get element in scatter list by given index
|
||||
*/
|
||||
static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
|
||||
unsigned int index)
|
||||
{
|
||||
struct scatterlist *sg = NULL;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg_list, sg, nents, i)
|
||||
if (i == index)
|
||||
break;
|
||||
|
||||
return sg;
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
|
||||
{
|
||||
writel(0, &crc->regs->datacntrld);
|
||||
writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
|
||||
writel(key, &crc->regs->curresult);
|
||||
|
||||
/* setup CRC interrupts */
|
||||
writel(CMPERRI | DCNTEXPI, &crc->regs->status);
|
||||
writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_init(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
|
||||
struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
|
||||
struct bfin_crypto_crc *crc;
|
||||
|
||||
dev_dbg(ctx->crc->dev, "crc_init\n");
|
||||
spin_lock_bh(&crc_list.lock);
|
||||
list_for_each_entry(crc, &crc_list.dev_list, list) {
|
||||
crc_ctx->crc = crc;
|
||||
break;
|
||||
}
|
||||
spin_unlock_bh(&crc_list.lock);
|
||||
|
||||
if (sg_nents(req->src) > CRC_MAX_DMA_DESC) {
|
||||
dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
|
||||
CRC_MAX_DMA_DESC);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx->crc = crc;
|
||||
ctx->bufnext_len = 0;
|
||||
ctx->buflast_len = 0;
|
||||
ctx->sg_buflen = 0;
|
||||
ctx->total = 0;
|
||||
ctx->flag = 0;
|
||||
|
||||
/* init crc results */
|
||||
put_unaligned_le32(crc_ctx->key, req->result);
|
||||
|
||||
dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
|
||||
crypto_ahash_digestsize(tfm));
|
||||
|
||||
return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
|
||||
}
|
||||
|
||||
static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
|
||||
int i = 0, j = 0;
|
||||
unsigned long dma_config;
|
||||
unsigned int dma_count;
|
||||
unsigned int dma_addr;
|
||||
unsigned int mid_dma_count = 0;
|
||||
int dma_mod;
|
||||
|
||||
dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
|
||||
|
||||
for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
|
||||
dma_addr = sg_dma_address(sg);
|
||||
/* deduce extra bytes in last sg */
|
||||
if (sg_is_last(sg))
|
||||
dma_count = sg_dma_len(sg) - ctx->bufnext_len;
|
||||
else
|
||||
dma_count = sg_dma_len(sg);
|
||||
|
||||
if (mid_dma_count) {
|
||||
/* Append last middle dma buffer to 4 bytes with first
|
||||
bytes in current sg buffer. Move addr of current
|
||||
sg and deduce the length of current sg.
|
||||
*/
|
||||
memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
|
||||
sg_virt(sg),
|
||||
CHKSUM_DIGEST_SIZE - mid_dma_count);
|
||||
dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
|
||||
dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
|
||||
|
||||
dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
|
||||
DMAEN | PSIZE_32 | WDSIZE_32;
|
||||
|
||||
/* setup new dma descriptor for next middle dma */
|
||||
crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
|
||||
crc->sg_cpu[i].cfg = dma_config;
|
||||
crc->sg_cpu[i].x_count = 1;
|
||||
crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
|
||||
dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
|
||||
"cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
|
||||
i, crc->sg_cpu[i].start_addr,
|
||||
crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
|
||||
crc->sg_cpu[i].x_modify);
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
|
||||
/* chop current sg dma len to multiple of 32 bits */
|
||||
mid_dma_count = dma_count % 4;
|
||||
dma_count &= ~0x3;
|
||||
|
||||
if (dma_addr % 4 == 0) {
|
||||
dma_config |= WDSIZE_32;
|
||||
dma_count >>= 2;
|
||||
dma_mod = 4;
|
||||
} else if (dma_addr % 2 == 0) {
|
||||
dma_config |= WDSIZE_16;
|
||||
dma_count >>= 1;
|
||||
dma_mod = 2;
|
||||
} else {
|
||||
dma_config |= WDSIZE_8;
|
||||
dma_mod = 1;
|
||||
}
|
||||
|
||||
crc->sg_cpu[i].start_addr = dma_addr;
|
||||
crc->sg_cpu[i].cfg = dma_config;
|
||||
crc->sg_cpu[i].x_count = dma_count;
|
||||
crc->sg_cpu[i].x_modify = dma_mod;
|
||||
dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
|
||||
"cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
|
||||
i, crc->sg_cpu[i].start_addr,
|
||||
crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
|
||||
crc->sg_cpu[i].x_modify);
|
||||
i++;
|
||||
|
||||
if (mid_dma_count) {
|
||||
/* copy extra bytes to next middle dma buffer */
|
||||
memcpy(crc->sg_mid_buf + (i << 2),
|
||||
(u8*)sg_virt(sg) + (dma_count << 2),
|
||||
mid_dma_count);
|
||||
}
|
||||
}
|
||||
|
||||
dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
|
||||
/* For final update req, append the buffer for next update as well*/
|
||||
if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
|
||||
ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
|
||||
crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
|
||||
CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
|
||||
crc->sg_cpu[i].cfg = dma_config;
|
||||
crc->sg_cpu[i].x_count = 1;
|
||||
crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
|
||||
dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
|
||||
"cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
|
||||
i, crc->sg_cpu[i].start_addr,
|
||||
crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
|
||||
crc->sg_cpu[i].x_modify);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (i == 0)
|
||||
return;
|
||||
|
||||
/* Set the last descriptor to stop mode */
|
||||
crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
|
||||
crc->sg_cpu[i - 1].cfg |= DI_EN;
|
||||
set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
|
||||
set_dma_x_count(crc->dma_ch, 0);
|
||||
set_dma_x_modify(crc->dma_ch, 0);
|
||||
set_dma_config(crc->dma_ch, dma_config);
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
|
||||
struct ahash_request *req)
|
||||
{
|
||||
struct crypto_async_request *async_req, *backlog;
|
||||
struct bfin_crypto_crc_reqctx *ctx;
|
||||
struct scatterlist *sg;
|
||||
int ret = 0;
|
||||
int nsg, i, j;
|
||||
unsigned int nextlen;
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
|
||||
spin_lock_irqsave(&crc->lock, flags);
|
||||
if (req)
|
||||
ret = ahash_enqueue_request(&crc->queue, req);
|
||||
if (crc->busy) {
|
||||
spin_unlock_irqrestore(&crc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
backlog = crypto_get_backlog(&crc->queue);
|
||||
async_req = crypto_dequeue_request(&crc->queue);
|
||||
if (async_req)
|
||||
crc->busy = 1;
|
||||
spin_unlock_irqrestore(&crc->lock, flags);
|
||||
|
||||
if (!async_req)
|
||||
return ret;
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
||||
req = ahash_request_cast(async_req);
|
||||
crc->req = req;
|
||||
ctx = ahash_request_ctx(req);
|
||||
ctx->sg = NULL;
|
||||
ctx->sg_buflen = 0;
|
||||
ctx->sg_nents = 0;
|
||||
|
||||
dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
|
||||
ctx->flag, req->nbytes);
|
||||
|
||||
if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
|
||||
if (ctx->bufnext_len == 0) {
|
||||
crc->busy = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Pack last crc update buffer to 32bit */
|
||||
memset(ctx->bufnext + ctx->bufnext_len, 0,
|
||||
CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
|
||||
} else {
|
||||
/* Pack small data which is less than 32bit to buffer for next update. */
|
||||
if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
|
||||
memcpy(ctx->bufnext + ctx->bufnext_len,
|
||||
sg_virt(req->src), req->nbytes);
|
||||
ctx->bufnext_len += req->nbytes;
|
||||
if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
|
||||
ctx->bufnext_len) {
|
||||
goto finish_update;
|
||||
} else {
|
||||
crc->busy = 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->bufnext_len) {
|
||||
/* Chain in extra bytes of last update */
|
||||
ctx->buflast_len = ctx->bufnext_len;
|
||||
memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
|
||||
|
||||
nsg = ctx->sg_buflen ? 2 : 1;
|
||||
sg_init_table(ctx->bufsl, nsg);
|
||||
sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
|
||||
if (nsg > 1)
|
||||
sg_chain(ctx->bufsl, nsg, req->src);
|
||||
ctx->sg = ctx->bufsl;
|
||||
} else
|
||||
ctx->sg = req->src;
|
||||
|
||||
/* Chop crc buffer size to multiple of 32 bit */
|
||||
nsg = sg_nents(ctx->sg);
|
||||
ctx->sg_nents = nsg;
|
||||
ctx->sg_buflen = ctx->buflast_len + req->nbytes;
|
||||
ctx->bufnext_len = ctx->sg_buflen % 4;
|
||||
ctx->sg_buflen &= ~0x3;
|
||||
|
||||
if (ctx->bufnext_len) {
|
||||
/* copy extra bytes to buffer for next update */
|
||||
memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
|
||||
nextlen = ctx->bufnext_len;
|
||||
for (i = nsg - 1; i >= 0; i--) {
|
||||
sg = sg_get(ctx->sg, nsg, i);
|
||||
j = min(nextlen, sg_dma_len(sg));
|
||||
memcpy(ctx->bufnext + nextlen - j,
|
||||
sg_virt(sg) + sg_dma_len(sg) - j, j);
|
||||
if (j == sg_dma_len(sg))
|
||||
ctx->sg_nents--;
|
||||
nextlen -= j;
|
||||
if (nextlen == 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
finish_update:
|
||||
if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
|
||||
ctx->flag == CRC_CRYPTO_STATE_FINISH))
|
||||
ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
|
||||
|
||||
/* set CRC data count before start DMA */
|
||||
writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
|
||||
|
||||
/* setup and enable CRC DMA */
|
||||
bfin_crypto_crc_config_dma(crc);
|
||||
|
||||
/* finally kick off CRC operation */
|
||||
reg = readl(&crc->regs->control);
|
||||
writel(reg | BLKEN, &crc->regs->control);
|
||||
|
||||
return -EINPROGRESS;
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_update(struct ahash_request *req)
|
||||
{
|
||||
struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
|
||||
|
||||
if (!req->nbytes)
|
||||
return 0;
|
||||
|
||||
dev_dbg(ctx->crc->dev, "crc_update\n");
|
||||
ctx->total += req->nbytes;
|
||||
ctx->flag = CRC_CRYPTO_STATE_UPDATE;
|
||||
|
||||
return bfin_crypto_crc_handle_queue(ctx->crc, req);
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_final(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
|
||||
struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
|
||||
|
||||
dev_dbg(ctx->crc->dev, "crc_final\n");
|
||||
ctx->flag = CRC_CRYPTO_STATE_FINISH;
|
||||
crc_ctx->key = 0;
|
||||
|
||||
return bfin_crypto_crc_handle_queue(ctx->crc, req);
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_finup(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
|
||||
struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
|
||||
|
||||
dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
|
||||
ctx->total += req->nbytes;
|
||||
ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
|
||||
crc_ctx->key = 0;
|
||||
|
||||
return bfin_crypto_crc_handle_queue(ctx->crc, req);
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_digest(struct ahash_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = bfin_crypto_crc_init(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return bfin_crypto_crc_finup(req);
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
|
||||
|
||||
dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
|
||||
if (keylen != CHKSUM_DIGEST_SIZE) {
|
||||
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
crc_ctx->key = get_unaligned_le32(key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crc_ctx->key = 0;
|
||||
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
||||
sizeof(struct bfin_crypto_crc_reqctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
}
|
||||
|
||||
static struct ahash_alg algs = {
|
||||
.init = bfin_crypto_crc_init,
|
||||
.update = bfin_crypto_crc_update,
|
||||
.final = bfin_crypto_crc_final,
|
||||
.finup = bfin_crypto_crc_finup,
|
||||
.digest = bfin_crypto_crc_digest,
|
||||
.setkey = bfin_crypto_crc_setkey,
|
||||
.halg.digestsize = CHKSUM_DIGEST_SIZE,
|
||||
.halg.base = {
|
||||
.cra_name = "hmac(crc32)",
|
||||
.cra_driver_name = DRIVER_NAME,
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_OPTIONAL_KEY,
|
||||
.cra_blocksize = CHKSUM_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = bfin_crypto_crc_cra_init,
|
||||
.cra_exit = bfin_crypto_crc_cra_exit,
|
||||
}
|
||||
};
|
||||
|
||||
static void bfin_crypto_crc_done_task(unsigned long data)
|
||||
{
|
||||
struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
|
||||
|
||||
bfin_crypto_crc_handle_queue(crc, NULL);
|
||||
}
|
||||
|
||||
static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct bfin_crypto_crc *crc = dev_id;
|
||||
u32 reg;
|
||||
|
||||
if (readl(&crc->regs->status) & DCNTEXP) {
|
||||
writel(DCNTEXP, &crc->regs->status);
|
||||
|
||||
/* prepare results */
|
||||
put_unaligned_le32(readl(&crc->regs->result),
|
||||
crc->req->result);
|
||||
|
||||
reg = readl(&crc->regs->control);
|
||||
writel(reg & ~BLKEN, &crc->regs->control);
|
||||
crc->busy = 0;
|
||||
|
||||
if (crc->req->base.complete)
|
||||
crc->req->base.complete(&crc->req->base, 0);
|
||||
|
||||
tasklet_schedule(&crc->done_task);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
} else
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/**
|
||||
* bfin_crypto_crc_suspend - suspend crc device
|
||||
* @pdev: device being suspended
|
||||
* @state: requested suspend state
|
||||
*/
|
||||
static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
|
||||
{
|
||||
struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
|
||||
int i = 100000;
|
||||
|
||||
while ((readl(&crc->regs->control) & BLKEN) && --i)
|
||||
cpu_relax();
|
||||
|
||||
if (i == 0)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
# define bfin_crypto_crc_suspend NULL
|
||||
#endif
|
||||
|
||||
#define bfin_crypto_crc_resume NULL
|
||||
|
||||
/**
|
||||
* bfin_crypto_crc_probe - Initialize module
|
||||
*
|
||||
*/
|
||||
static int bfin_crypto_crc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *res;
|
||||
struct bfin_crypto_crc *crc;
|
||||
unsigned int timeout = 100000;
|
||||
int ret;
|
||||
|
||||
crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
|
||||
if (!crc) {
|
||||
dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
crc->dev = dev;
|
||||
|
||||
INIT_LIST_HEAD(&crc->list);
|
||||
spin_lock_init(&crc->lock);
|
||||
tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
|
||||
crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
crc->regs = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR((void *)crc->regs)) {
|
||||
dev_err(&pdev->dev, "Cannot map CRC IO\n");
|
||||
return PTR_ERR((void *)crc->regs);
|
||||
}
|
||||
|
||||
crc->irq = platform_get_irq(pdev, 0);
|
||||
if (crc->irq < 0) {
|
||||
dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
|
||||
IRQF_SHARED, dev_name(dev), crc);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
|
||||
if (res == NULL) {
|
||||
dev_err(&pdev->dev, "No CRC DMA channel specified\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
crc->dma_ch = res->start;
|
||||
|
||||
ret = request_dma(crc->dma_ch, dev_name(dev));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
|
||||
if (crc->sg_cpu == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out_error_dma;
|
||||
}
|
||||
/*
|
||||
* need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle +
|
||||
* 1 last + 1 next dma descriptors
|
||||
*/
|
||||
crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
|
||||
crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
|
||||
* ((CRC_MAX_DMA_DESC + 1) << 1);
|
||||
|
||||
writel(0, &crc->regs->control);
|
||||
crc->poly = (u32)pdev->dev.platform_data;
|
||||
writel(crc->poly, &crc->regs->poly);
|
||||
|
||||
while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
|
||||
cpu_relax();
|
||||
|
||||
if (timeout == 0)
|
||||
dev_info(&pdev->dev, "init crc poly timeout\n");
|
||||
|
||||
platform_set_drvdata(pdev, crc);
|
||||
|
||||
spin_lock(&crc_list.lock);
|
||||
list_add(&crc->list, &crc_list.dev_list);
|
||||
spin_unlock(&crc_list.lock);
|
||||
|
||||
if (list_is_singular(&crc_list.dev_list)) {
|
||||
ret = crypto_register_ahash(&algs);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Can't register crypto ahash device\n");
|
||||
goto out_error_dma;
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "initialized\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_error_dma:
|
||||
if (crc->sg_cpu)
|
||||
dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
|
||||
free_dma(crc->dma_ch);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_crypto_crc_remove - Initialize module
|
||||
*
|
||||
*/
|
||||
static int bfin_crypto_crc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
|
||||
|
||||
if (!crc)
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock(&crc_list.lock);
|
||||
list_del(&crc->list);
|
||||
spin_unlock(&crc_list.lock);
|
||||
|
||||
crypto_unregister_ahash(&algs);
|
||||
tasklet_kill(&crc->done_task);
|
||||
free_dma(crc->dma_ch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver bfin_crypto_crc_driver = {
|
||||
.probe = bfin_crypto_crc_probe,
|
||||
.remove = bfin_crypto_crc_remove,
|
||||
.suspend = bfin_crypto_crc_suspend,
|
||||
.resume = bfin_crypto_crc_resume,
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* bfin_crypto_crc_mod_init - Initialize module
|
||||
*
|
||||
* Checks the module params and registers the platform driver.
|
||||
* Real work is in the platform probe function.
|
||||
*/
|
||||
static int __init bfin_crypto_crc_mod_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pr_info("Blackfin hardware CRC crypto driver\n");
|
||||
|
||||
INIT_LIST_HEAD(&crc_list.dev_list);
|
||||
spin_lock_init(&crc_list.lock);
|
||||
|
||||
ret = platform_driver_register(&bfin_crypto_crc_driver);
|
||||
if (ret) {
|
||||
pr_err("unable to register driver\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_crypto_crc_mod_exit - Deinitialize module
|
||||
*/
|
||||
static void __exit bfin_crypto_crc_mod_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&bfin_crypto_crc_driver);
|
||||
}
|
||||
|
||||
module_init(bfin_crypto_crc_mod_init);
|
||||
module_exit(bfin_crypto_crc_mod_exit);
|
||||
|
||||
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
|
||||
MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
|
||||
MODULE_LICENSE("GPL");
|
@ -1,124 +0,0 @@
|
||||
/*
|
||||
* bfin_crc.h - interface to Blackfin CRC controllers
|
||||
*
|
||||
* Copyright 2012 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#ifndef __BFIN_CRC_H__
|
||||
#define __BFIN_CRC_H__
|
||||
|
||||
/* Function driver which use hardware crc must initialize the structure */
|
||||
struct crc_info {
|
||||
/* Input data address */
|
||||
unsigned char *in_addr;
|
||||
/* Output data address */
|
||||
unsigned char *out_addr;
|
||||
/* Input or output bytes */
|
||||
unsigned long datasize;
|
||||
union {
|
||||
/* CRC to compare with that of input buffer */
|
||||
unsigned long crc_compare;
|
||||
/* Value to compare with input data */
|
||||
unsigned long val_verify;
|
||||
/* Value to fill */
|
||||
unsigned long val_fill;
|
||||
};
|
||||
/* Value to program the 32b CRC Polynomial */
|
||||
unsigned long crc_poly;
|
||||
union {
|
||||
/* CRC calculated from the input data */
|
||||
unsigned long crc_result;
|
||||
/* First failed position to verify input data */
|
||||
unsigned long pos_verify;
|
||||
};
|
||||
/* CRC mirror flags */
|
||||
unsigned int bitmirr:1;
|
||||
unsigned int bytmirr:1;
|
||||
unsigned int w16swp:1;
|
||||
unsigned int fdsel:1;
|
||||
unsigned int rsltmirr:1;
|
||||
unsigned int polymirr:1;
|
||||
unsigned int cmpmirr:1;
|
||||
};
|
||||
|
||||
/* Userspace interface */
|
||||
#define CRC_IOC_MAGIC 'C'
|
||||
#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int)
|
||||
#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int)
|
||||
#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int)
|
||||
#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int)
|
||||
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct crc_register {
|
||||
u32 control;
|
||||
u32 datacnt;
|
||||
u32 datacntrld;
|
||||
u32 __pad_1[2];
|
||||
u32 compare;
|
||||
u32 fillval;
|
||||
u32 datafifo;
|
||||
u32 intren;
|
||||
u32 intrenset;
|
||||
u32 intrenclr;
|
||||
u32 poly;
|
||||
u32 __pad_2[4];
|
||||
u32 status;
|
||||
u32 datacntcap;
|
||||
u32 __pad_3;
|
||||
u32 result;
|
||||
u32 curresult;
|
||||
u32 __pad_4[3];
|
||||
u32 revid;
|
||||
};
|
||||
|
||||
/* CRC_STATUS Masks */
|
||||
#define CMPERR 0x00000002 /* Compare error */
|
||||
#define DCNTEXP 0x00000010 /* datacnt register expired */
|
||||
#define IBR 0x00010000 /* Input buffer ready */
|
||||
#define OBR 0x00020000 /* Output buffer ready */
|
||||
#define IRR 0x00040000 /* Immediate result readt */
|
||||
#define LUTDONE 0x00080000 /* Look-up table generation done */
|
||||
#define FSTAT 0x00700000 /* FIFO status */
|
||||
#define MAX_FIFO 4 /* Max fifo size */
|
||||
|
||||
/* CRC_CONTROL Masks */
|
||||
#define BLKEN 0x00000001 /* Block enable */
|
||||
#define OPMODE 0x000000F0 /* Operation mode */
|
||||
#define OPMODE_OFFSET 4 /* Operation mode mask offset*/
|
||||
#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */
|
||||
#define MODE_DATA_FILL 2 /* MTM data fill */
|
||||
#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */
|
||||
#define MODE_DATA_VERIFY 4 /* MSM data verify */
|
||||
#define AUTOCLRZ 0x00000100 /* Auto clear to zero */
|
||||
#define AUTOCLRF 0x00000200 /* Auto clear to one */
|
||||
#define OBRSTALL 0x00001000 /* Stall on output buffer ready */
|
||||
#define IRRSTALL 0x00002000 /* Stall on immediate result ready */
|
||||
#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */
|
||||
#define BITMIRR_OFFSET 16 /* Mirror bits offset */
|
||||
#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */
|
||||
#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */
|
||||
#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */
|
||||
#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */
|
||||
#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */
|
||||
#define FDSEL_OFFSET 19 /* Mirror FIFO offset */
|
||||
#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */
|
||||
#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */
|
||||
#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */
|
||||
#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */
|
||||
#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */
|
||||
#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */
|
||||
|
||||
/* CRC_INTREN Masks */
|
||||
#define CMPERRI 0x02 /* CRC_ERROR_INTR */
|
||||
#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@ -328,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
u32 *desc;
|
||||
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
|
||||
ctx->cdata.keylen;
|
||||
@ -349,7 +350,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
|
||||
}
|
||||
|
||||
desc = ctx->sh_desc_enc;
|
||||
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
|
||||
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
|
||||
@ -366,7 +367,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
|
||||
}
|
||||
|
||||
desc = ctx->sh_desc_dec;
|
||||
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
|
||||
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
|
||||
@ -387,6 +388,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
u32 *desc;
|
||||
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
|
||||
ctx->cdata.keylen;
|
||||
@ -408,7 +410,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
|
||||
}
|
||||
|
||||
desc = ctx->sh_desc_enc;
|
||||
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
|
||||
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
|
||||
false);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
|
||||
@ -425,7 +428,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
|
||||
}
|
||||
|
||||
desc = ctx->sh_desc_dec;
|
||||
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
|
||||
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
|
||||
false);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
|
||||
@ -447,6 +451,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
u32 *desc;
|
||||
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
|
||||
ctx->cdata.keylen;
|
||||
@ -468,7 +473,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
|
||||
}
|
||||
|
||||
desc = ctx->sh_desc_enc;
|
||||
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
|
||||
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
|
||||
false);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
|
||||
@ -485,7 +491,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
|
||||
}
|
||||
|
||||
desc = ctx->sh_desc_dec;
|
||||
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
|
||||
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
|
||||
false);
|
||||
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
|
||||
desc_bytes(desc), ctx->dir);
|
||||
|
||||
@ -563,9 +570,11 @@ static int aead_setkey(struct crypto_aead *aead,
|
||||
|
||||
skip_split_key:
|
||||
ctx->cdata.keylen = keys.enckeylen;
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return aead_set_sh_desc(aead);
|
||||
badkey:
|
||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -625,10 +625,13 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
|
||||
* @ivsize: initialization vector size
|
||||
* @icvsize: integrity check value (ICV) size (truncated or full)
|
||||
* @is_qi: true when called from caam/qi
|
||||
*/
|
||||
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize)
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi)
|
||||
{
|
||||
u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
|
||||
*zero_assoc_jump_cmd2;
|
||||
@ -650,11 +653,35 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_ENCRYPT);
|
||||
|
||||
if (is_qi) {
|
||||
u32 *wait_load_cmd;
|
||||
|
||||
/* REG3 = assoclen */
|
||||
append_seq_load(desc, 4, LDST_CLASS_DECO |
|
||||
LDST_SRCDST_WORD_DECO_MATH3 |
|
||||
(4 << LDST_OFFSET_SHIFT));
|
||||
|
||||
wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_CALM | JUMP_COND_NCP |
|
||||
JUMP_COND_NOP | JUMP_COND_NIP |
|
||||
JUMP_COND_NIFP);
|
||||
set_jump_tgt_here(desc, wait_load_cmd);
|
||||
|
||||
append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
|
||||
ivsize);
|
||||
} else {
|
||||
append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
|
||||
CAAM_CMD_SZ);
|
||||
}
|
||||
|
||||
/* if assoclen + cryptlen is ZERO, skip to ICV write */
|
||||
append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
|
||||
JUMP_COND_MATH_Z);
|
||||
|
||||
if (is_qi)
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
|
||||
|
||||
/* if assoclen is ZERO, skip reading the assoc data */
|
||||
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
|
||||
@ -686,8 +713,11 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
|
||||
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
|
||||
|
||||
/* jump the zero-payload commands */
|
||||
append_jump(desc, JUMP_TEST_ALL | 2);
|
||||
/* jump to ICV writing */
|
||||
if (is_qi)
|
||||
append_jump(desc, JUMP_TEST_ALL | 4);
|
||||
else
|
||||
append_jump(desc, JUMP_TEST_ALL | 2);
|
||||
|
||||
/* zero-payload commands */
|
||||
set_jump_tgt_here(desc, zero_payload_jump_cmd);
|
||||
@ -695,10 +725,18 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
|
||||
/* read assoc data */
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
|
||||
FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
|
||||
if (is_qi)
|
||||
/* jump to ICV writing */
|
||||
append_jump(desc, JUMP_TEST_ALL | 2);
|
||||
|
||||
/* There is no input data */
|
||||
set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
|
||||
|
||||
if (is_qi)
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
|
||||
FIFOLD_TYPE_LAST1);
|
||||
|
||||
/* write ICV */
|
||||
append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
|
||||
LDST_SRCDST_BYTE_CONTEXT);
|
||||
@ -715,10 +753,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
|
||||
* @ivsize: initialization vector size
|
||||
* @icvsize: integrity check value (ICV) size (truncated or full)
|
||||
* @is_qi: true when called from caam/qi
|
||||
*/
|
||||
void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize)
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi)
|
||||
{
|
||||
u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
|
||||
|
||||
@ -739,6 +780,24 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
|
||||
|
||||
if (is_qi) {
|
||||
u32 *wait_load_cmd;
|
||||
|
||||
/* REG3 = assoclen */
|
||||
append_seq_load(desc, 4, LDST_CLASS_DECO |
|
||||
LDST_SRCDST_WORD_DECO_MATH3 |
|
||||
(4 << LDST_OFFSET_SHIFT));
|
||||
|
||||
wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_CALM | JUMP_COND_NCP |
|
||||
JUMP_COND_NOP | JUMP_COND_NIP |
|
||||
JUMP_COND_NIFP);
|
||||
set_jump_tgt_here(desc, wait_load_cmd);
|
||||
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
|
||||
}
|
||||
|
||||
/* if assoclen is ZERO, skip reading the assoc data */
|
||||
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
|
||||
@ -791,10 +850,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
|
||||
* @ivsize: initialization vector size
|
||||
* @icvsize: integrity check value (ICV) size (truncated or full)
|
||||
* @is_qi: true when called from caam/qi
|
||||
*/
|
||||
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize)
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi)
|
||||
{
|
||||
u32 *key_jump_cmd;
|
||||
|
||||
@ -815,7 +877,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_ENCRYPT);
|
||||
|
||||
append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
|
||||
if (is_qi) {
|
||||
u32 *wait_load_cmd;
|
||||
|
||||
/* REG3 = assoclen */
|
||||
append_seq_load(desc, 4, LDST_CLASS_DECO |
|
||||
LDST_SRCDST_WORD_DECO_MATH3 |
|
||||
(4 << LDST_OFFSET_SHIFT));
|
||||
|
||||
wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_CALM | JUMP_COND_NCP |
|
||||
JUMP_COND_NOP | JUMP_COND_NIP |
|
||||
JUMP_COND_NIFP);
|
||||
set_jump_tgt_here(desc, wait_load_cmd);
|
||||
|
||||
/* Read salt and IV */
|
||||
append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
|
||||
cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV);
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
|
||||
}
|
||||
|
||||
append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
|
||||
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
|
||||
/* Read assoc data */
|
||||
@ -823,7 +907,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
|
||||
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
|
||||
|
||||
/* Skip IV */
|
||||
append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
|
||||
|
||||
/* Will read cryptlen bytes */
|
||||
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
@ -862,10 +946,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
|
||||
* @ivsize: initialization vector size
|
||||
* @icvsize: integrity check value (ICV) size (truncated or full)
|
||||
* @is_qi: true when called from caam/qi
|
||||
*/
|
||||
void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize)
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi)
|
||||
{
|
||||
u32 *key_jump_cmd;
|
||||
|
||||
@ -887,7 +974,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
|
||||
|
||||
append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
|
||||
if (is_qi) {
|
||||
u32 *wait_load_cmd;
|
||||
|
||||
/* REG3 = assoclen */
|
||||
append_seq_load(desc, 4, LDST_CLASS_DECO |
|
||||
LDST_SRCDST_WORD_DECO_MATH3 |
|
||||
(4 << LDST_OFFSET_SHIFT));
|
||||
|
||||
wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
|
||||
JUMP_COND_CALM | JUMP_COND_NCP |
|
||||
JUMP_COND_NOP | JUMP_COND_NIP |
|
||||
JUMP_COND_NIFP);
|
||||
set_jump_tgt_here(desc, wait_load_cmd);
|
||||
|
||||
/* Read salt and IV */
|
||||
append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
|
||||
cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV);
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
|
||||
}
|
||||
|
||||
append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
|
||||
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
|
||||
/* Read assoc data */
|
||||
@ -895,7 +1004,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
|
||||
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
|
||||
|
||||
/* Skip IV */
|
||||
append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
|
||||
|
||||
/* Will read cryptlen bytes */
|
||||
append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
|
||||
@ -934,10 +1043,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
|
||||
* @ivsize: initialization vector size
|
||||
* @icvsize: integrity check value (ICV) size (truncated or full)
|
||||
* @is_qi: true when called from caam/qi
|
||||
*/
|
||||
void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize)
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi)
|
||||
{
|
||||
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
|
||||
|
||||
@ -958,6 +1070,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_ENCRYPT);
|
||||
|
||||
if (is_qi) {
|
||||
/* assoclen is not needed, skip it */
|
||||
append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
|
||||
|
||||
/* Read salt and IV */
|
||||
append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
|
||||
cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV);
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
|
||||
}
|
||||
|
||||
/* assoclen + cryptlen = seqinlen */
|
||||
append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
|
||||
@ -1004,10 +1128,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
|
||||
* @desc: pointer to buffer used for descriptor construction
|
||||
* @cdata: pointer to block cipher transform definitions
|
||||
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
|
||||
* @ivsize: initialization vector size
|
||||
* @icvsize: integrity check value (ICV) size (truncated or full)
|
||||
* @is_qi: true when called from caam/qi
|
||||
*/
|
||||
void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize)
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi)
|
||||
{
|
||||
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
|
||||
|
||||
@ -1028,6 +1155,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
|
||||
append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_DECRYPT | OP_ALG_ICV_ON);
|
||||
|
||||
if (is_qi) {
|
||||
/* assoclen is not needed, skip it */
|
||||
append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
|
||||
|
||||
/* Read salt and IV */
|
||||
append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
|
||||
cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV);
|
||||
append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
|
||||
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
|
||||
}
|
||||
|
||||
/* assoclen + cryptlen = seqoutlen */
|
||||
append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
|
||||
|
||||
|
@ -27,14 +27,20 @@
|
||||
#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
|
||||
#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
|
||||
#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
|
||||
#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
|
||||
#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
|
||||
#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
|
||||
#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
|
||||
#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
|
||||
#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
|
||||
#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
|
||||
|
||||
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
|
||||
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
|
||||
@ -67,22 +73,28 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
|
||||
const bool is_qi, int era);
|
||||
|
||||
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi);
|
||||
|
||||
void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi);
|
||||
|
||||
void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi);
|
||||
|
||||
void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi);
|
||||
|
||||
void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi);
|
||||
|
||||
void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int icvsize);
|
||||
unsigned int ivsize, unsigned int icvsize,
|
||||
const bool is_qi);
|
||||
|
||||
void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
|
||||
unsigned int ivsize, const bool is_rfc3686,
|
||||
|
@ -278,12 +278,317 @@ skip_split_key:
|
||||
}
|
||||
}
|
||||
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return ret;
|
||||
badkey:
|
||||
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
memzero_explicit(&keys, sizeof(keys));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int gcm_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
|
||||
ctx->cdata.keylen;
|
||||
|
||||
if (!ctx->cdata.keylen || !ctx->authsize)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptor
|
||||
* must fit into the 64-word Descriptor h/w Buffer
|
||||
*/
|
||||
if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
|
||||
ctx->cdata.key_inline = true;
|
||||
ctx->cdata.key_virt = ctx->key;
|
||||
} else {
|
||||
ctx->cdata.key_inline = false;
|
||||
ctx->cdata.key_dma = ctx->key_dma;
|
||||
}
|
||||
|
||||
cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
|
||||
ctx->authsize, true);
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptor
|
||||
* must fit into the 64-word Descriptor h/w Buffer
|
||||
*/
|
||||
if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
|
||||
ctx->cdata.key_inline = true;
|
||||
ctx->cdata.key_virt = ctx->key;
|
||||
} else {
|
||||
ctx->cdata.key_inline = false;
|
||||
ctx->cdata.key_dma = ctx->key_dma;
|
||||
}
|
||||
|
||||
cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
|
||||
ctx->authsize, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
|
||||
ctx->authsize = authsize;
|
||||
gcm_set_sh_desc(authenc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gcm_setkey(struct crypto_aead *aead,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int ret;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
|
||||
ctx->cdata.keylen = keylen;
|
||||
|
||||
ret = gcm_set_sh_desc(aead);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Now update the driver contexts with the new shared descriptor */
|
||||
if (ctx->drv_ctx[ENCRYPT]) {
|
||||
ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
|
||||
ctx->sh_desc_enc);
|
||||
if (ret) {
|
||||
dev_err(jrdev, "driver enc context update failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->drv_ctx[DECRYPT]) {
|
||||
ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
|
||||
ctx->sh_desc_dec);
|
||||
if (ret) {
|
||||
dev_err(jrdev, "driver dec context update failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
|
||||
ctx->cdata.keylen;
|
||||
|
||||
if (!ctx->cdata.keylen || !ctx->authsize)
|
||||
return 0;
|
||||
|
||||
ctx->cdata.key_virt = ctx->key;
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptor
|
||||
* must fit into the 64-word Descriptor h/w Buffer
|
||||
*/
|
||||
if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
|
||||
ctx->cdata.key_inline = true;
|
||||
} else {
|
||||
ctx->cdata.key_inline = false;
|
||||
ctx->cdata.key_dma = ctx->key_dma;
|
||||
}
|
||||
|
||||
cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
|
||||
ctx->authsize, true);
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptor
|
||||
* must fit into the 64-word Descriptor h/w Buffer
|
||||
*/
|
||||
if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
|
||||
ctx->cdata.key_inline = true;
|
||||
} else {
|
||||
ctx->cdata.key_inline = false;
|
||||
ctx->cdata.key_dma = ctx->key_dma;
|
||||
}
|
||||
|
||||
cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
|
||||
ctx->authsize, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rfc4106_setauthsize(struct crypto_aead *authenc,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
|
||||
ctx->authsize = authsize;
|
||||
rfc4106_set_sh_desc(authenc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rfc4106_setkey(struct crypto_aead *aead,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int ret;
|
||||
|
||||
if (keylen < 4)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
/*
|
||||
* The last four bytes of the key material are used as the salt value
|
||||
* in the nonce. Update the AES key length.
|
||||
*/
|
||||
ctx->cdata.keylen = keylen - 4;
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
|
||||
ctx->dir);
|
||||
|
||||
ret = rfc4106_set_sh_desc(aead);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Now update the driver contexts with the new shared descriptor */
|
||||
if (ctx->drv_ctx[ENCRYPT]) {
|
||||
ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
|
||||
ctx->sh_desc_enc);
|
||||
if (ret) {
|
||||
dev_err(jrdev, "driver enc context update failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->drv_ctx[DECRYPT]) {
|
||||
ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
|
||||
ctx->sh_desc_dec);
|
||||
if (ret) {
|
||||
dev_err(jrdev, "driver dec context update failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
unsigned int ivsize = crypto_aead_ivsize(aead);
|
||||
int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
|
||||
ctx->cdata.keylen;
|
||||
|
||||
if (!ctx->cdata.keylen || !ctx->authsize)
|
||||
return 0;
|
||||
|
||||
ctx->cdata.key_virt = ctx->key;
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptor
|
||||
* must fit into the 64-word Descriptor h/w Buffer
|
||||
*/
|
||||
if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
|
||||
ctx->cdata.key_inline = true;
|
||||
} else {
|
||||
ctx->cdata.key_inline = false;
|
||||
ctx->cdata.key_dma = ctx->key_dma;
|
||||
}
|
||||
|
||||
cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
|
||||
ctx->authsize, true);
|
||||
|
||||
/*
|
||||
* Job Descriptor and Shared Descriptor
|
||||
* must fit into the 64-word Descriptor h/w Buffer
|
||||
*/
|
||||
if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
|
||||
ctx->cdata.key_inline = true;
|
||||
} else {
|
||||
ctx->cdata.key_inline = false;
|
||||
ctx->cdata.key_dma = ctx->key_dma;
|
||||
}
|
||||
|
||||
cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
|
||||
ctx->authsize, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rfc4543_setauthsize(struct crypto_aead *authenc,
|
||||
unsigned int authsize)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
|
||||
ctx->authsize = authsize;
|
||||
rfc4543_set_sh_desc(authenc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rfc4543_setkey(struct crypto_aead *aead,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
int ret;
|
||||
|
||||
if (keylen < 4)
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
|
||||
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
|
||||
#endif
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
/*
|
||||
* The last four bytes of the key material are used as the salt value
|
||||
* in the nonce. Update the AES key length.
|
||||
*/
|
||||
ctx->cdata.keylen = keylen - 4;
|
||||
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
|
||||
ctx->dir);
|
||||
|
||||
ret = rfc4543_set_sh_desc(aead);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Now update the driver contexts with the new shared descriptor */
|
||||
if (ctx->drv_ctx[ENCRYPT]) {
|
||||
ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
|
||||
ctx->sh_desc_enc);
|
||||
if (ret) {
|
||||
dev_err(jrdev, "driver enc context update failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->drv_ctx[DECRYPT]) {
|
||||
ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
|
||||
ctx->sh_desc_dec);
|
||||
if (ret) {
|
||||
dev_err(jrdev, "driver dec context update failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
@ -562,8 +867,18 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
|
||||
qidev = caam_ctx->qidev;
|
||||
|
||||
if (unlikely(status)) {
|
||||
u32 ssrc = status & JRSTA_SSRC_MASK;
|
||||
u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
|
||||
|
||||
caam_jr_strstatus(qidev, status);
|
||||
ecode = -EIO;
|
||||
/*
|
||||
* verify hw auth check passed else return -EBADMSG
|
||||
*/
|
||||
if (ssrc == JRSTA_SSRC_CCB_ERROR &&
|
||||
err_id == JRSTA_CCBERR_ERRID_ICVCHK)
|
||||
ecode = -EBADMSG;
|
||||
else
|
||||
ecode = -EIO;
|
||||
}
|
||||
|
||||
edesc = container_of(drv_req, typeof(*edesc), drv_req);
|
||||
@ -807,6 +1122,22 @@ static int aead_decrypt(struct aead_request *req)
|
||||
return aead_crypt(req, false);
|
||||
}
|
||||
|
||||
static int ipsec_gcm_encrypt(struct aead_request *req)
|
||||
{
|
||||
if (req->assoclen < 8)
|
||||
return -EINVAL;
|
||||
|
||||
return aead_crypt(req, true);
|
||||
}
|
||||
|
||||
static int ipsec_gcm_decrypt(struct aead_request *req)
|
||||
{
|
||||
if (req->assoclen < 8)
|
||||
return -EINVAL;
|
||||
|
||||
return aead_crypt(req, false);
|
||||
}
|
||||
|
||||
static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
|
||||
{
|
||||
struct ablkcipher_edesc *edesc;
|
||||
@ -1327,6 +1658,61 @@ static struct caam_alg_template driver_algs[] = {
|
||||
};
|
||||
|
||||
static struct caam_aead_alg driver_aeads[] = {
|
||||
{
|
||||
.aead = {
|
||||
.base = {
|
||||
.cra_name = "rfc4106(gcm(aes))",
|
||||
.cra_driver_name = "rfc4106-gcm-aes-caam-qi",
|
||||
.cra_blocksize = 1,
|
||||
},
|
||||
.setkey = rfc4106_setkey,
|
||||
.setauthsize = rfc4106_setauthsize,
|
||||
.encrypt = ipsec_gcm_encrypt,
|
||||
.decrypt = ipsec_gcm_decrypt,
|
||||
.ivsize = 8,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.caam = {
|
||||
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
|
||||
},
|
||||
},
|
||||
{
|
||||
.aead = {
|
||||
.base = {
|
||||
.cra_name = "rfc4543(gcm(aes))",
|
||||
.cra_driver_name = "rfc4543-gcm-aes-caam-qi",
|
||||
.cra_blocksize = 1,
|
||||
},
|
||||
.setkey = rfc4543_setkey,
|
||||
.setauthsize = rfc4543_setauthsize,
|
||||
.encrypt = ipsec_gcm_encrypt,
|
||||
.decrypt = ipsec_gcm_decrypt,
|
||||
.ivsize = 8,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.caam = {
|
||||
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
|
||||
},
|
||||
},
|
||||
/* Galois Counter Mode */
|
||||
{
|
||||
.aead = {
|
||||
.base = {
|
||||
.cra_name = "gcm(aes)",
|
||||
.cra_driver_name = "gcm-aes-caam-qi",
|
||||
.cra_blocksize = 1,
|
||||
},
|
||||
.setkey = gcm_setkey,
|
||||
.setauthsize = gcm_setauthsize,
|
||||
.encrypt = aead_encrypt,
|
||||
.decrypt = aead_decrypt,
|
||||
.ivsize = 12,
|
||||
.maxauthsize = AES_BLOCK_SIZE,
|
||||
},
|
||||
.caam = {
|
||||
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
|
||||
}
|
||||
},
|
||||
/* single-pass ipsec_esp descriptor */
|
||||
{
|
||||
.aead = {
|
||||
|
@ -337,7 +337,8 @@ static int caam_remove(struct platform_device *pdev)
|
||||
|
||||
/* shut clocks off before finalizing shutdown */
|
||||
clk_disable_unprepare(ctrlpriv->caam_ipg);
|
||||
clk_disable_unprepare(ctrlpriv->caam_mem);
|
||||
if (ctrlpriv->caam_mem)
|
||||
clk_disable_unprepare(ctrlpriv->caam_mem);
|
||||
clk_disable_unprepare(ctrlpriv->caam_aclk);
|
||||
if (ctrlpriv->caam_emi_slow)
|
||||
clk_disable_unprepare(ctrlpriv->caam_emi_slow);
|
||||
@ -466,14 +467,17 @@ static int caam_probe(struct platform_device *pdev)
|
||||
}
|
||||
ctrlpriv->caam_ipg = clk;
|
||||
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "mem");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev,
|
||||
"can't identify CAAM mem clk: %d\n", ret);
|
||||
return ret;
|
||||
if (!of_machine_is_compatible("fsl,imx7d") &&
|
||||
!of_machine_is_compatible("fsl,imx7s")) {
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "mem");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
dev_err(&pdev->dev,
|
||||
"can't identify CAAM mem clk: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ctrlpriv->caam_mem = clk;
|
||||
}
|
||||
ctrlpriv->caam_mem = clk;
|
||||
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "aclk");
|
||||
if (IS_ERR(clk)) {
|
||||
@ -484,7 +488,9 @@ static int caam_probe(struct platform_device *pdev)
|
||||
}
|
||||
ctrlpriv->caam_aclk = clk;
|
||||
|
||||
if (!of_machine_is_compatible("fsl,imx6ul")) {
|
||||
if (!of_machine_is_compatible("fsl,imx6ul") &&
|
||||
!of_machine_is_compatible("fsl,imx7d") &&
|
||||
!of_machine_is_compatible("fsl,imx7s")) {
|
||||
clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
|
||||
if (IS_ERR(clk)) {
|
||||
ret = PTR_ERR(clk);
|
||||
@ -501,11 +507,13 @@ static int caam_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_mem);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
|
||||
ret);
|
||||
goto disable_caam_ipg;
|
||||
if (ctrlpriv->caam_mem) {
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_mem);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
|
||||
ret);
|
||||
goto disable_caam_ipg;
|
||||
}
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(ctrlpriv->caam_aclk);
|
||||
@ -815,9 +823,6 @@ static int caam_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
caam_remove:
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
debugfs_remove_recursive(ctrlpriv->dfs_root);
|
||||
#endif
|
||||
caam_remove(pdev);
|
||||
return ret;
|
||||
|
||||
@ -829,7 +834,8 @@ disable_caam_emi_slow:
|
||||
disable_caam_aclk:
|
||||
clk_disable_unprepare(ctrlpriv->caam_aclk);
|
||||
disable_caam_mem:
|
||||
clk_disable_unprepare(ctrlpriv->caam_mem);
|
||||
if (ctrlpriv->caam_mem)
|
||||
clk_disable_unprepare(ctrlpriv->caam_mem);
|
||||
disable_caam_ipg:
|
||||
clk_disable_unprepare(ctrlpriv->caam_ipg);
|
||||
return ret;
|
||||
|
@ -579,8 +579,15 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
||||
|
||||
fd = &dqrr->fd;
|
||||
status = be32_to_cpu(fd->status);
|
||||
if (unlikely(status))
|
||||
dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
|
||||
if (unlikely(status)) {
|
||||
u32 ssrc = status & JRSTA_SSRC_MASK;
|
||||
u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
|
||||
|
||||
if (ssrc != JRSTA_SSRC_CCB_ERROR ||
|
||||
err_id != JRSTA_CCBERR_ERRID_ICVCHK)
|
||||
dev_err(qidev, "Error: %#x in CAAM response FD\n",
|
||||
status);
|
||||
}
|
||||
|
||||
if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
|
||||
dev_err(qidev, "Non-compound FD from CAAM\n");
|
||||
|
@ -436,7 +436,7 @@ static int cpt_device_init(struct cpt_device *cpt)
|
||||
|
||||
/* Reset the PF when probed first */
|
||||
cpt_reset(cpt);
|
||||
mdelay(100);
|
||||
msleep(100);
|
||||
|
||||
/*Check BIST status*/
|
||||
bist = (u64)cpt_check_bist_status(cpt);
|
||||
|
@ -46,7 +46,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
|
||||
}
|
||||
|
||||
/* Update result area if supplied */
|
||||
if (req->result)
|
||||
if (req->result && rctx->final)
|
||||
memcpy(req->result, rctx->iv, digest_size);
|
||||
|
||||
e_free:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user