This update includes the following changes:

API:
 
 - Add sig driver API.
 - Remove signing/verification from akcipher API.
 - Move crypto_simd_disabled_for_test to lib/crypto.
 - Add WARN_ON for return values from driver that indicates memory corruption.
 
 Algorithms:
 
 - Provide crc32-arch and crc32c-arch through Crypto API.
 - Optimise crc32c code size on x86.
 - Optimise crct10dif on arm/arm64.
 - Optimise p10-aes-gcm on powerpc.
 - Optimise aegis128 on x86.
 - Output full sample from test interface in jitter RNG.
 - Retry without padata when it fails in pcrypt.
 
 Drivers:
 
 - Add support for Airoha EN7581 TRNG.
 - Add support for STM32MP25x platforms in stm32.
 - Enable iproc-r200 RNG driver on BCMBCA.
 - Add Broadcom BCM74110 RNG driver.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmc6sQsACgkQxycdCkmx
 i6dfHxAAnkI65TE6agZq9DlkEU4ZqOsxxdk0MsGIhbCUTxW3KENzu9vtKjnvg9T/
 Ou0d2J49ny87Y4zaA59Wf/Q1+gg5YSQR5kelonpfrPLkCkJjr72HZpyCHv8TTzEC
 uHHoVj9cnPIF5/yfiqQsrWT1ACip9vn+slyVPaMJV1qR6gnvnSALtsg4e/vKHkn7
 ZMaf2pZ2ROYXdB02nMK5KQcCrxD64MQle/yQepY44eYjnT+XclkqPdi6o1nUSpj/
 RFAeY0jFSTu0pj3DqT48TnU/LiiNLlFOZrGjCdEySoac63vmTtKqfYDmrRaFz4hB
 sucxbgJ3xnnYseRijtfXnxaD/IkDJln+ipGNQKAZLfOVMDCTxPdYGmOpobMTXMS+
 0sY0eAHgqr23P9pOp+sOzcAEFIqg6llAYQVWx3Zl4vpXBUuxzg6AqmHnPicnck7y
 Lw1cJhQxij2De3dG2ZL/0dgQxMjGN/YfCM8SSg6l+Xn3j4j47rqJNH2ZsmXtbJ2n
 kTkmemmWdgRR1IvgQQGsvyKs9ThkcEDW+IzW26SUv3Clvru2NSkX4ZPHbezZQf+D
 R0wMZsW3Fw7Zymerz1GIBSqdLnsyFWtIAjukDpOR6ordPgOBeDt76v6tw5vL2/II
 KYoeN1pdEEecwuhAsEvCryT5ZG4noBeNirf/ElWAfEybgcXiTks=
 =T8pa
 -----END PGP SIGNATURE-----

Merge tag 'v6.13-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:
   - Add sig driver API
   - Remove signing/verification from akcipher API
   - Move crypto_simd_disabled_for_test to lib/crypto
   - Add WARN_ON for return values from driver that indicates memory
     corruption

  Algorithms:
   - Provide crc32-arch and crc32c-arch through Crypto API
   - Optimise crc32c code size on x86
   - Optimise crct10dif on arm/arm64
   - Optimise p10-aes-gcm on powerpc
   - Optimise aegis128 on x86
   - Output full sample from test interface in jitter RNG
   - Retry without padata when it fails in pcrypt

  Drivers:
   - Add support for Airoha EN7581 TRNG
   - Add support for STM32MP25x platforms in stm32
   - Enable iproc-r200 RNG driver on BCMBCA
   - Add Broadcom BCM74110 RNG driver"

* tag 'v6.13-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (112 commits)
  crypto: marvell/cesa - fix uninit value for struct mv_cesa_op_ctx
  crypto: cavium - Fix an error handling path in cpt_ucode_load_fw()
  crypto: aesni - Move back to module_init
  crypto: lib/mpi - Export mpi_set_bit
  crypto: aes-gcm-p10 - Use the correct bit to test for P10
  hwrng: amd - remove reference to removed PPC_MAPLE config
  crypto: arm/crct10dif - Implement plain NEON variant
  crypto: arm/crct10dif - Macroify PMULL asm code
  crypto: arm/crct10dif - Use existing mov_l macro instead of __adrl
  crypto: arm64/crct10dif - Remove remaining 64x64 PMULL fallback code
  crypto: arm64/crct10dif - Use faster 16x64 bit polynomial multiply
  crypto: arm64/crct10dif - Remove obsolete chunking logic
  crypto: bcm - add error check in the ahash_hmac_init function
  crypto: caam - add error check to caam_rsa_set_priv_key_form
  hwrng: bcm74110 - Add Broadcom BCM74110 RNG driver
  dt-bindings: rng: add binding for BCM74110 RNG
  padata: Clean up in padata_do_multithreaded()
  crypto: inside-secure - Fix the return value of safexcel_xcbcmac_cra_init()
  crypto: qat - Fix missing destroy_workqueue in adf_init_aer()
  crypto: rsassa-pkcs1 - Reinstate support for legacy protocols
  ...
This commit is contained in:
Linus Torvalds 2024-11-19 10:28:41 -08:00
commit 02b2f1a7b8
170 changed files with 6124 additions and 4264 deletions

View File

@ -184,3 +184,10 @@ Date: Apr 2020
Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of time out requests.
Available for both PF and VF, and take no other effect on HPRE.
What: /sys/kernel/debug/hisi_hpre/<bdf>/cap_regs
Date: Oct 2024
Contact: linux-crypto@vger.kernel.org
Description: Dump the values of the qm and hpre capability bit registers and
support the query of device specifications to facilitate fault locating.
Available for both PF and VF, and take no other effect on HPRE.

View File

@ -157,3 +157,10 @@ Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of completed but marked error requests
to be received.
Available for both PF and VF, and take no other effect on SEC.
What: /sys/kernel/debug/hisi_sec2/<bdf>/cap_regs
Date: Oct 2024
Contact: linux-crypto@vger.kernel.org
Description: Dump the values of the qm and sec capability bit registers and
support the query of device specifications to facilitate fault locating.
Available for both PF and VF, and take no other effect on SEC.

View File

@ -158,3 +158,10 @@ Contact: linux-crypto@vger.kernel.org
Description: Dump the total number of BD type error requests
to be received.
Available for both PF and VF, and take no other effect on ZIP.
What: /sys/kernel/debug/hisi_zip/<bdf>/cap_regs
Date: Oct 2024
Contact: linux-crypto@vger.kernel.org
Description: Dump the values of the qm and zip capability bit registers and
support the query of device specifications to facilitate fault locating.
Available for both PF and VF, and take no other effect on ZIP.

View File

@ -8,10 +8,10 @@ Asymmetric Cipher API
---------------------
.. kernel-doc:: include/crypto/akcipher.h
:doc: Generic Public Key API
:doc: Generic Public Key Cipher API
.. kernel-doc:: include/crypto/akcipher.h
:functions: crypto_alloc_akcipher crypto_free_akcipher crypto_akcipher_set_pub_key crypto_akcipher_set_priv_key crypto_akcipher_maxsize crypto_akcipher_encrypt crypto_akcipher_decrypt crypto_akcipher_sign crypto_akcipher_verify
:functions: crypto_alloc_akcipher crypto_free_akcipher crypto_akcipher_set_pub_key crypto_akcipher_set_priv_key crypto_akcipher_maxsize crypto_akcipher_encrypt crypto_akcipher_decrypt
Asymmetric Cipher Request Handle
--------------------------------

View File

@ -0,0 +1,15 @@
Asymmetric Signature Algorithm Definitions
------------------------------------------
.. kernel-doc:: include/crypto/sig.h
:functions: sig_alg
Asymmetric Signature API
------------------------
.. kernel-doc:: include/crypto/sig.h
:doc: Generic Public Key Signature API
.. kernel-doc:: include/crypto/sig.h
:functions: crypto_alloc_sig crypto_free_sig crypto_sig_set_pubkey crypto_sig_set_privkey crypto_sig_keysize crypto_sig_maxsize crypto_sig_digestsize crypto_sig_sign crypto_sig_verify

View File

@ -10,4 +10,5 @@ Programming Interface
api-digest
api-rng
api-akcipher
api-sig
api-kpp

View File

@ -214,6 +214,8 @@ the aforementioned cipher types:
- CRYPTO_ALG_TYPE_AKCIPHER Asymmetric cipher
- CRYPTO_ALG_TYPE_SIG Asymmetric signature
- CRYPTO_ALG_TYPE_PCOMPRESS Enhanced version of
CRYPTO_ALG_TYPE_COMPRESS allowing for segmented compression /
decompression instead of performing the operation on one segment

View File

@ -44,6 +44,7 @@ properties:
- items:
- enum:
- qcom,sa8775p-qce
- qcom,sc7280-qce
- qcom,sm6350-qce
- qcom,sm8250-qce

View File

@ -0,0 +1,38 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/rng/airoha,en7581-trng.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Airoha EN7851 True Random Number Generator
maintainers:
- Christian Marangi <ansuelsmth@gmail.com>
properties:
compatible:
const: airoha,en7581-trng
reg:
maxItems: 1
interrupts:
maxItems: 1
required:
- compatible
- reg
- interrupts
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
rng@1faa1000 {
compatible = "airoha,en7581-trng";
reg = <0x1faa1000 0x1000>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
};

View File

@ -0,0 +1,35 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/rng/brcm,bcm74110-rng.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: BCM74110 Random number generator
description:
Random number generator used on the BCM74110.
maintainers:
- Markus Mayer <mmayer@broadcom.com>
- Florian Fainelli <florian.fainelli@broadcom.com>
properties:
compatible:
enum:
- brcm,bcm74110-rng
reg:
maxItems: 1
required:
- compatible
- reg
additionalProperties: false
examples:
- |
rng@83ba000 {
compatible = "brcm,bcm74110-rng";
reg = <0x83ba000 0x14>;
};

View File

@ -14,8 +14,8 @@ properties:
oneOf:
- const: fsl,imx21-rnga
- const: fsl,imx25-rngb
- items:
- const: fsl,imx31-rnga
- items:
- const: fsl,imx21-rnga
- items:
- enum:

View File

@ -1,20 +1,25 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/rng/omap_rng.yaml#
$id: http://devicetree.org/schemas/rng/inside-secure,safexcel-eip76.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: OMAP SoC and Inside-Secure HWRNG Module
title: Inside-Secure HWRNG Module
maintainers:
- Jayesh Choudhary <j-choudhary@ti.com>
properties:
compatible:
enum:
oneOf:
- enum:
- ti,omap2-rng
- ti,omap4-rng
- inside-secure,safexcel-eip76
- items:
- enum:
- marvell,armada-8k-rng
- const: inside-secure,safexcel-eip76
ti,hwmods:
const: rng

View File

@ -18,12 +18,19 @@ properties:
enum:
- st,stm32-rng
- st,stm32mp13-rng
- st,stm32mp25-rng
reg:
maxItems: 1
clocks:
maxItems: 1
minItems: 1
maxItems: 2
clock-names:
items:
- const: core
- const: bus
resets:
maxItems: 1
@ -57,6 +64,25 @@ allOf:
properties:
st,rng-lock-conf: false
- if:
properties:
compatible:
contains:
enum:
- st,stm32-rng
- st,stm32mp13-rng
then:
properties:
clocks:
maxItems: 1
clock-names: false
else:
properties:
clocks:
minItems: 2
required:
- clock-names
additionalProperties: false
examples:

View File

@ -11447,7 +11447,7 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
F: drivers/dma/ioat*
INTEL IAA CRYPTO DRIVER
M: Tom Zanussi <tom.zanussi@linux.intel.com>
M: Kristen Accardi <kristen.c.accardi@intel.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst

View File

@ -112,55 +112,120 @@
FOLD_CONST_L .req q10l
FOLD_CONST_H .req q10h
/*
* Pairwise long polynomial multiplication of two 16-bit values
*
* { w0, w1 }, { y0, y1 }
*
* by two 64-bit values
*
* { x0, x1, x2, x3, x4, x5, x6, x7 }, { z0, z1, z2, z3, z4, z5, z6, z7 }
*
* where each vector element is a byte, ordered from least to most
* significant. The resulting 80-bit vectors are XOR'ed together.
*
* This can be implemented using 8x8 long polynomial multiplication, by
* reorganizing the input so that each pairwise 8x8 multiplication
* produces one of the terms from the decomposition below, and
* combining the results of each rank and shifting them into place.
*
* Rank
* 0 w0*x0 ^ | y0*z0 ^
* 1 (w0*x1 ^ w1*x0) << 8 ^ | (y0*z1 ^ y1*z0) << 8 ^
* 2 (w0*x2 ^ w1*x1) << 16 ^ | (y0*z2 ^ y1*z1) << 16 ^
* 3 (w0*x3 ^ w1*x2) << 24 ^ | (y0*z3 ^ y1*z2) << 24 ^
* 4 (w0*x4 ^ w1*x3) << 32 ^ | (y0*z4 ^ y1*z3) << 32 ^
* 5 (w0*x5 ^ w1*x4) << 40 ^ | (y0*z5 ^ y1*z4) << 40 ^
* 6 (w0*x6 ^ w1*x5) << 48 ^ | (y0*z6 ^ y1*z5) << 48 ^
* 7 (w0*x7 ^ w1*x6) << 56 ^ | (y0*z7 ^ y1*z6) << 56 ^
* 8 w1*x7 << 64 | y1*z7 << 64
*
* The inputs can be reorganized into
*
* { w0, w0, w0, w0, y0, y0, y0, y0 }, { w1, w1, w1, w1, y1, y1, y1, y1 }
* { x0, x2, x4, x6, z0, z2, z4, z6 }, { x1, x3, x5, x7, z1, z3, z5, z7 }
*
* and after performing 8x8->16 bit long polynomial multiplication of
* each of the halves of the first vector with those of the second one,
* we obtain the following four vectors of 16-bit elements:
*
* a := { w0*x0, w0*x2, w0*x4, w0*x6 }, { y0*z0, y0*z2, y0*z4, y0*z6 }
* b := { w0*x1, w0*x3, w0*x5, w0*x7 }, { y0*z1, y0*z3, y0*z5, y0*z7 }
* c := { w1*x0, w1*x2, w1*x4, w1*x6 }, { y1*z0, y1*z2, y1*z4, y1*z6 }
* d := { w1*x1, w1*x3, w1*x5, w1*x7 }, { y1*z1, y1*z3, y1*z5, y1*z7 }
*
* Results b and c can be XORed together, as the vector elements have
* matching ranks. Then, the final XOR can be pulled forward, and
* applied between the halves of each of the remaining three vectors,
* which are then shifted into place, and XORed together to produce the
* final 80-bit result.
*/
.macro pmull16x64_p8, v16, v64
vext.8 q11, \v64, \v64, #1
vld1.64 {q12}, [r4, :128]
vuzp.8 q11, \v64
vtbl.8 d24, {\v16\()_L-\v16\()_H}, d24
vtbl.8 d25, {\v16\()_L-\v16\()_H}, d25
bl __pmull16x64_p8
veor \v64, q12, q14
.endm
__pmull16x64_p8:
vmull.p8 q13, d23, d24
vmull.p8 q14, d23, d25
vmull.p8 q15, d22, d24
vmull.p8 q12, d22, d25
veor q14, q14, q15
veor d24, d24, d25
veor d26, d26, d27
veor d28, d28, d29
vmov.i32 d25, #0
vmov.i32 d29, #0
vext.8 q12, q12, q12, #14
vext.8 q14, q14, q14, #15
veor d24, d24, d26
bx lr
ENDPROC(__pmull16x64_p8)
.macro pmull16x64_p64, v16, v64
vmull.p64 q11, \v64\()l, \v16\()_L
vmull.p64 \v64, \v64\()h, \v16\()_H
veor \v64, \v64, q11
.endm
// Fold reg1, reg2 into the next 32 data bytes, storing the result back
// into reg1, reg2.
.macro fold_32_bytes, reg1, reg2
vld1.64 {q11-q12}, [buf]!
.macro fold_32_bytes, reg1, reg2, p
vld1.64 {q8-q9}, [buf]!
vmull.p64 q8, \reg1\()h, FOLD_CONST_H
vmull.p64 \reg1, \reg1\()l, FOLD_CONST_L
vmull.p64 q9, \reg2\()h, FOLD_CONST_H
vmull.p64 \reg2, \reg2\()l, FOLD_CONST_L
pmull16x64_\p FOLD_CONST, \reg1
pmull16x64_\p FOLD_CONST, \reg2
CPU_LE( vrev64.8 q11, q11 )
CPU_LE( vrev64.8 q12, q12 )
vswp q11l, q11h
vswp q12l, q12h
CPU_LE( vrev64.8 q8, q8 )
CPU_LE( vrev64.8 q9, q9 )
vswp q8l, q8h
vswp q9l, q9h
veor.8 \reg1, \reg1, q8
veor.8 \reg2, \reg2, q9
veor.8 \reg1, \reg1, q11
veor.8 \reg2, \reg2, q12
.endm
// Fold src_reg into dst_reg, optionally loading the next fold constants
.macro fold_16_bytes, src_reg, dst_reg, load_next_consts
vmull.p64 q8, \src_reg\()l, FOLD_CONST_L
vmull.p64 \src_reg, \src_reg\()h, FOLD_CONST_H
.macro fold_16_bytes, src_reg, dst_reg, p, load_next_consts
pmull16x64_\p FOLD_CONST, \src_reg
.ifnb \load_next_consts
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
.endif
veor.8 \dst_reg, \dst_reg, q8
veor.8 \dst_reg, \dst_reg, \src_reg
.endm
.macro __adrl, out, sym
movw \out, #:lower16:\sym
movt \out, #:upper16:\sym
.endm
//
// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
//
// Assumes len >= 16.
//
ENTRY(crc_t10dif_pmull)
.macro crct10dif, p
// For sizes less than 256 bytes, we can't fold 128 bytes at a time.
cmp len, #256
blt .Lless_than_256_bytes
blt .Lless_than_256_bytes\@
__adrl fold_consts_ptr, .Lfold_across_128_bytes_consts
mov_l fold_consts_ptr, .Lfold_across_128_bytes_consts
// Load the first 128 data bytes. Byte swapping is necessary to make
// the bit order match the polynomial coefficient order.
@ -199,27 +264,27 @@ CPU_LE( vrev64.8 q7, q7 )
// While >= 128 data bytes remain (not counting q0-q7), fold the 128
// bytes q0-q7 into them, storing the result back into q0-q7.
.Lfold_128_bytes_loop:
fold_32_bytes q0, q1
fold_32_bytes q2, q3
fold_32_bytes q4, q5
fold_32_bytes q6, q7
.Lfold_128_bytes_loop\@:
fold_32_bytes q0, q1, \p
fold_32_bytes q2, q3, \p
fold_32_bytes q4, q5, \p
fold_32_bytes q6, q7, \p
subs len, len, #128
bge .Lfold_128_bytes_loop
bge .Lfold_128_bytes_loop\@
// Now fold the 112 bytes in q0-q6 into the 16 bytes in q7.
// Fold across 64 bytes.
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
fold_16_bytes q0, q4
fold_16_bytes q1, q5
fold_16_bytes q2, q6
fold_16_bytes q3, q7, 1
fold_16_bytes q0, q4, \p
fold_16_bytes q1, q5, \p
fold_16_bytes q2, q6, \p
fold_16_bytes q3, q7, \p, 1
// Fold across 32 bytes.
fold_16_bytes q4, q6
fold_16_bytes q5, q7, 1
fold_16_bytes q4, q6, \p
fold_16_bytes q5, q7, \p, 1
// Fold across 16 bytes.
fold_16_bytes q6, q7
fold_16_bytes q6, q7, \p
// Add 128 to get the correct number of data bytes remaining in 0...127
// (not counting q7), following the previous extra subtraction by 128.
@ -229,25 +294,23 @@ CPU_LE( vrev64.8 q7, q7 )
// While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7
// into them, storing the result back into q7.
blt .Lfold_16_bytes_loop_done
.Lfold_16_bytes_loop:
vmull.p64 q8, q7l, FOLD_CONST_L
vmull.p64 q7, q7h, FOLD_CONST_H
veor.8 q7, q7, q8
blt .Lfold_16_bytes_loop_done\@
.Lfold_16_bytes_loop\@:
pmull16x64_\p FOLD_CONST, q7
vld1.64 {q0}, [buf]!
CPU_LE( vrev64.8 q0, q0 )
vswp q0l, q0h
veor.8 q7, q7, q0
subs len, len, #16
bge .Lfold_16_bytes_loop
bge .Lfold_16_bytes_loop\@
.Lfold_16_bytes_loop_done:
.Lfold_16_bytes_loop_done\@:
// Add 16 to get the correct number of data bytes remaining in 0...15
// (not counting q7), following the previous extra subtraction by 16.
adds len, len, #16
beq .Lreduce_final_16_bytes
beq .Lreduce_final_16_bytes\@
.Lhandle_partial_segment:
.Lhandle_partial_segment\@:
// Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
// 16 bytes are in q7 and the rest are the remaining data in 'buf'. To
// do this without needing a fold constant for each possible 'len',
@ -262,9 +325,9 @@ CPU_LE( vrev64.8 q0, q0 )
vswp q0l, q0h
// q1 = high order part of second chunk: q7 left-shifted by 'len' bytes.
__adrl r3, .Lbyteshift_table + 16
sub r3, r3, len
vld1.8 {q2}, [r3]
mov_l r1, .Lbyteshift_table + 16
sub r1, r1, len
vld1.8 {q2}, [r1]
vtbl.8 q1l, {q7l-q7h}, q2l
vtbl.8 q1h, {q7l-q7h}, q2h
@ -282,12 +345,46 @@ CPU_LE( vrev64.8 q0, q0 )
vbsl.8 q2, q1, q0
// Fold the first chunk into the second chunk, storing the result in q7.
vmull.p64 q0, q3l, FOLD_CONST_L
vmull.p64 q7, q3h, FOLD_CONST_H
veor.8 q7, q7, q0
veor.8 q7, q7, q2
pmull16x64_\p FOLD_CONST, q3
veor.8 q7, q3, q2
b .Lreduce_final_16_bytes\@
.Lless_than_256_bytes\@:
// Checksumming a buffer of length 16...255 bytes
mov_l fold_consts_ptr, .Lfold_across_16_bytes_consts
// Load the first 16 data bytes.
vld1.64 {q7}, [buf]!
CPU_LE( vrev64.8 q7, q7 )
vswp q7l, q7h
// XOR the first 16 data *bits* with the initial CRC value.
vmov.i8 q0h, #0
vmov.u16 q0h[3], init_crc
veor.8 q7h, q7h, q0h
// Load the fold-across-16-bytes constants.
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
cmp len, #16
beq .Lreduce_final_16_bytes\@ // len == 16
subs len, len, #32
addlt len, len, #16
blt .Lhandle_partial_segment\@ // 17 <= len <= 31
b .Lfold_16_bytes_loop\@ // 32 <= len <= 255
.Lreduce_final_16_bytes\@:
.endm
//
// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
//
// Assumes len >= 16.
//
ENTRY(crc_t10dif_pmull64)
crct10dif p64
.Lreduce_final_16_bytes:
// Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC.
// Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
@ -320,32 +417,19 @@ CPU_LE( vrev64.8 q0, q0 )
vmov.u16 r0, q0l[0]
bx lr
ENDPROC(crc_t10dif_pmull64)
.Lless_than_256_bytes:
// Checksumming a buffer of length 16...255 bytes
ENTRY(crc_t10dif_pmull8)
push {r4, lr}
mov_l r4, .L16x64perm
__adrl fold_consts_ptr, .Lfold_across_16_bytes_consts
crct10dif p8
// Load the first 16 data bytes.
vld1.64 {q7}, [buf]!
CPU_LE( vrev64.8 q7, q7 )
vswp q7l, q7h
// XOR the first 16 data *bits* with the initial CRC value.
vmov.i8 q0h, #0
vmov.u16 q0h[3], init_crc
veor.8 q7h, q7h, q0h
// Load the fold-across-16-bytes constants.
vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
cmp len, #16
beq .Lreduce_final_16_bytes // len == 16
subs len, len, #32
addlt len, len, #16
blt .Lhandle_partial_segment // 17 <= len <= 31
b .Lfold_16_bytes_loop // 32 <= len <= 255
ENDPROC(crc_t10dif_pmull)
vst1.64 {q7}, [r3, :128]
pop {r4, pc}
ENDPROC(crc_t10dif_pmull8)
.section ".rodata", "a"
.align 4
@ -379,3 +463,6 @@ ENDPROC(crc_t10dif_pmull)
.byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
.byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
.byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
.L16x64perm:
.quad 0x808080800000000, 0x909090901010101

View File

@ -19,7 +19,9 @@
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
asmlinkage u16 crc_t10dif_pmull64(u16 init_crc, const u8 *buf, size_t len);
asmlinkage void crc_t10dif_pmull8(u16 init_crc, const u8 *buf, size_t len,
u8 out[16]);
static int crct10dif_init(struct shash_desc *desc)
{
@ -29,14 +31,14 @@ static int crct10dif_init(struct shash_desc *desc)
return 0;
}
static int crct10dif_update(struct shash_desc *desc, const u8 *data,
static int crct10dif_update_ce(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull(*crc, data, length);
*crc = crc_t10dif_pmull64(*crc, data, length);
kernel_neon_end();
} else {
*crc = crc_t10dif_generic(*crc, data, length);
@ -45,6 +47,27 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
return 0;
}
static int crct10dif_update_neon(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crcp = shash_desc_ctx(desc);
u8 buf[16] __aligned(16);
u16 crc = *crcp;
if (length > CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
crc_t10dif_pmull8(crc, data, length, buf);
kernel_neon_end();
crc = 0;
data = buf;
length = sizeof(buf);
}
*crcp = crc_t10dif_generic(crc, data, length);
return 0;
}
static int crct10dif_final(struct shash_desc *desc, u8 *out)
{
u16 *crc = shash_desc_ctx(desc);
@ -53,10 +76,22 @@ static int crct10dif_final(struct shash_desc *desc, u8 *out)
return 0;
}
static struct shash_alg crc_t10dif_alg = {
static struct shash_alg algs[] = {{
.digestsize = CRC_T10DIF_DIGEST_SIZE,
.init = crct10dif_init,
.update = crct10dif_update,
.update = crct10dif_update_neon,
.final = crct10dif_final,
.descsize = CRC_T10DIF_DIGEST_SIZE,
.base.cra_name = "crct10dif",
.base.cra_driver_name = "crct10dif-arm-neon",
.base.cra_priority = 150,
.base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = CRC_T10DIF_DIGEST_SIZE,
.init = crct10dif_init,
.update = crct10dif_update_ce,
.final = crct10dif_final,
.descsize = CRC_T10DIF_DIGEST_SIZE,
@ -65,19 +100,19 @@ static struct shash_alg crc_t10dif_alg = {
.base.cra_priority = 200,
.base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
};
}};
static int __init crc_t10dif_mod_init(void)
{
if (!(elf_hwcap2 & HWCAP2_PMULL))
if (!(elf_hwcap & HWCAP_NEON))
return -ENODEV;
return crypto_register_shash(&crc_t10dif_alg);
return crypto_register_shashes(algs, 1 + !!(elf_hwcap2 & HWCAP2_PMULL));
}
static void __exit crc_t10dif_mod_exit(void)
{
crypto_unregister_shash(&crc_t10dif_alg);
crypto_unregister_shashes(algs, 1 + !!(elf_hwcap2 & HWCAP2_PMULL));
}
module_init(crc_t10dif_mod_init);

View File

@ -1,8 +1,11 @@
//
// Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
//
// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
// Copyright (C) 2019 Google LLC <ebiggers@google.com>
// Copyright (C) 2016 Linaro Ltd
// Copyright (C) 2019-2024 Google LLC
//
// Authors: Ard Biesheuvel <ardb@google.com>
// Eric Biggers <ebiggers@google.com>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
@ -71,161 +74,117 @@
init_crc .req w0
buf .req x1
len .req x2
fold_consts_ptr .req x3
fold_consts_ptr .req x5
fold_consts .req v10
ad .req v14
k00_16 .req v15
k32_48 .req v16
t3 .req v17
t4 .req v18
t5 .req v19
t6 .req v20
t7 .req v21
t8 .req v22
t9 .req v23
perm1 .req v24
perm2 .req v25
perm3 .req v26
perm4 .req v27
perm .req v27
bd1 .req v28
bd2 .req v29
bd3 .req v30
bd4 .req v31
.macro __pmull_init_p64
.macro pmull16x64_p64, a16, b64, c64
pmull2 \c64\().1q, \a16\().2d, \b64\().2d
pmull \b64\().1q, \a16\().1d, \b64\().1d
.endm
.macro __pmull_pre_p64, bd
/*
* Pairwise long polynomial multiplication of two 16-bit values
*
* { w0, w1 }, { y0, y1 }
*
* by two 64-bit values
*
* { x0, x1, x2, x3, x4, x5, x6, x7 }, { z0, z1, z2, z3, z4, z5, z6, z7 }
*
* where each vector element is a byte, ordered from least to most
* significant.
*
* This can be implemented using 8x8 long polynomial multiplication, by
* reorganizing the input so that each pairwise 8x8 multiplication
* produces one of the terms from the decomposition below, and
* combining the results of each rank and shifting them into place.
*
* Rank
* 0 w0*x0 ^ | y0*z0 ^
* 1 (w0*x1 ^ w1*x0) << 8 ^ | (y0*z1 ^ y1*z0) << 8 ^
* 2 (w0*x2 ^ w1*x1) << 16 ^ | (y0*z2 ^ y1*z1) << 16 ^
* 3 (w0*x3 ^ w1*x2) << 24 ^ | (y0*z3 ^ y1*z2) << 24 ^
* 4 (w0*x4 ^ w1*x3) << 32 ^ | (y0*z4 ^ y1*z3) << 32 ^
* 5 (w0*x5 ^ w1*x4) << 40 ^ | (y0*z5 ^ y1*z4) << 40 ^
* 6 (w0*x6 ^ w1*x5) << 48 ^ | (y0*z6 ^ y1*z5) << 48 ^
* 7 (w0*x7 ^ w1*x6) << 56 ^ | (y0*z7 ^ y1*z6) << 56 ^
* 8 w1*x7 << 64 | y1*z7 << 64
*
* The inputs can be reorganized into
*
* { w0, w0, w0, w0, y0, y0, y0, y0 }, { w1, w1, w1, w1, y1, y1, y1, y1 }
* { x0, x2, x4, x6, z0, z2, z4, z6 }, { x1, x3, x5, x7, z1, z3, z5, z7 }
*
* and after performing 8x8->16 bit long polynomial multiplication of
* each of the halves of the first vector with those of the second one,
* we obtain the following four vectors of 16-bit elements:
*
* a := { w0*x0, w0*x2, w0*x4, w0*x6 }, { y0*z0, y0*z2, y0*z4, y0*z6 }
* b := { w0*x1, w0*x3, w0*x5, w0*x7 }, { y0*z1, y0*z3, y0*z5, y0*z7 }
* c := { w1*x0, w1*x2, w1*x4, w1*x6 }, { y1*z0, y1*z2, y1*z4, y1*z6 }
* d := { w1*x1, w1*x3, w1*x5, w1*x7 }, { y1*z1, y1*z3, y1*z5, y1*z7 }
*
* Results b and c can be XORed together, as the vector elements have
* matching ranks. Then, the final XOR (*) can be pulled forward, and
* applied between the halves of each of the remaining three vectors,
* which are then shifted into place, and combined to produce two
* 80-bit results.
*
* (*) NOTE: the 16x64 bit polynomial multiply below is not equivalent
* to the 64x64 bit one above, but XOR'ing the outputs together will
* produce the expected result, and this is sufficient in the context of
* this algorithm.
*/
.macro pmull16x64_p8, a16, b64, c64
ext t7.16b, \b64\().16b, \b64\().16b, #1
tbl t5.16b, {\a16\().16b}, perm.16b
uzp1 t7.16b, \b64\().16b, t7.16b
bl __pmull_p8_16x64
ext \b64\().16b, t4.16b, t4.16b, #15
eor \c64\().16b, t8.16b, t5.16b
.endm
.macro __pmull_init_p8
// k00_16 := 0x0000000000000000_000000000000ffff
// k32_48 := 0x00000000ffffffff_0000ffffffffffff
movi k32_48.2d, #0xffffffff
mov k32_48.h[2], k32_48.h[0]
ushr k00_16.2d, k32_48.2d, #32
SYM_FUNC_START_LOCAL(__pmull_p8_16x64)
ext t6.16b, t5.16b, t5.16b, #8
// prepare the permutation vectors
mov_q x5, 0x080f0e0d0c0b0a09
movi perm4.8b, #8
dup perm1.2d, x5
eor perm1.16b, perm1.16b, perm4.16b
ushr perm2.2d, perm1.2d, #8
ushr perm3.2d, perm1.2d, #16
ushr perm4.2d, perm1.2d, #24
sli perm2.2d, perm1.2d, #56
sli perm3.2d, perm1.2d, #48
sli perm4.2d, perm1.2d, #40
.endm
pmull t3.8h, t7.8b, t5.8b
pmull t4.8h, t7.8b, t6.8b
pmull2 t5.8h, t7.16b, t5.16b
pmull2 t6.8h, t7.16b, t6.16b
.macro __pmull_pre_p8, bd
tbl bd1.16b, {\bd\().16b}, perm1.16b
tbl bd2.16b, {\bd\().16b}, perm2.16b
tbl bd3.16b, {\bd\().16b}, perm3.16b
tbl bd4.16b, {\bd\().16b}, perm4.16b
.endm
SYM_FUNC_START_LOCAL(__pmull_p8_core)
.L__pmull_p8_core:
ext t4.8b, ad.8b, ad.8b, #1 // A1
ext t5.8b, ad.8b, ad.8b, #2 // A2
ext t6.8b, ad.8b, ad.8b, #3 // A3
pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B
pmull t8.8h, ad.8b, bd1.8b // E = A*B1
pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B
pmull t7.8h, ad.8b, bd2.8b // G = A*B2
pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B
pmull t9.8h, ad.8b, bd3.8b // I = A*B3
pmull t3.8h, ad.8b, bd4.8b // K = A*B4
b 0f
.L__pmull_p8_core2:
tbl t4.16b, {ad.16b}, perm1.16b // A1
tbl t5.16b, {ad.16b}, perm2.16b // A2
tbl t6.16b, {ad.16b}, perm3.16b // A3
pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B
pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1
pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B
pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2
pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B
pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3
pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
0: eor t4.16b, t4.16b, t8.16b // L = E + F
eor t5.16b, t5.16b, t7.16b // M = G + H
eor t6.16b, t6.16b, t9.16b // N = I + J
uzp1 t8.2d, t4.2d, t5.2d
uzp2 t4.2d, t4.2d, t5.2d
uzp1 t7.2d, t6.2d, t3.2d
uzp2 t6.2d, t6.2d, t3.2d
// t4 = (L) (P0 + P1) << 8
// t5 = (M) (P2 + P3) << 16
eor t8.16b, t8.16b, t4.16b
and t4.16b, t4.16b, k32_48.16b
// t6 = (N) (P4 + P5) << 24
// t7 = (K) (P6 + P7) << 32
eor t7.16b, t7.16b, t6.16b
and t6.16b, t6.16b, k00_16.16b
eor t8.16b, t8.16b, t4.16b
eor t7.16b, t7.16b, t6.16b
zip2 t5.2d, t8.2d, t4.2d
zip1 t4.2d, t8.2d, t4.2d
zip2 t3.2d, t7.2d, t6.2d
zip1 t6.2d, t7.2d, t6.2d
ext t4.16b, t4.16b, t4.16b, #15
ext t8.16b, t3.16b, t3.16b, #8
eor t4.16b, t4.16b, t6.16b
ext t7.16b, t5.16b, t5.16b, #8
ext t6.16b, t4.16b, t4.16b, #8
eor t8.8b, t8.8b, t3.8b
eor t5.8b, t5.8b, t7.8b
eor t4.8b, t4.8b, t6.8b
ext t5.16b, t5.16b, t5.16b, #14
ext t6.16b, t6.16b, t6.16b, #13
ext t3.16b, t3.16b, t3.16b, #12
eor t4.16b, t4.16b, t5.16b
eor t6.16b, t6.16b, t3.16b
ret
SYM_FUNC_END(__pmull_p8_core)
SYM_FUNC_END(__pmull_p8_16x64)
.macro __pmull_p8, rq, ad, bd, i
.ifnc \bd, fold_consts
.err
.endif
mov ad.16b, \ad\().16b
.ifb \i
pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B
.else
pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B
.endif
bl .L__pmull_p8_core\i
eor \rq\().16b, \rq\().16b, t4.16b
eor \rq\().16b, \rq\().16b, t6.16b
.endm
// Fold reg1, reg2 into the next 32 data bytes, storing the result back
// into reg1, reg2.
.macro fold_32_bytes, p, reg1, reg2
ldp q11, q12, [buf], #0x20
__pmull_\p v8, \reg1, fold_consts, 2
__pmull_\p \reg1, \reg1, fold_consts
pmull16x64_\p fold_consts, \reg1, v8
CPU_LE( rev64 v11.16b, v11.16b )
CPU_LE( rev64 v12.16b, v12.16b )
__pmull_\p v9, \reg2, fold_consts, 2
__pmull_\p \reg2, \reg2, fold_consts
pmull16x64_\p fold_consts, \reg2, v9
CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
@ -238,26 +197,15 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
// Fold src_reg into dst_reg, optionally loading the next fold constants
.macro fold_16_bytes, p, src_reg, dst_reg, load_next_consts
__pmull_\p v8, \src_reg, fold_consts
__pmull_\p \src_reg, \src_reg, fold_consts, 2
pmull16x64_\p fold_consts, \src_reg, v8
.ifnb \load_next_consts
ld1 {fold_consts.2d}, [fold_consts_ptr], #16
__pmull_pre_\p fold_consts
.endif
eor \dst_reg\().16b, \dst_reg\().16b, v8.16b
eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b
.endm
.macro __pmull_p64, rd, rn, rm, n
.ifb \n
pmull \rd\().1q, \rn\().1d, \rm\().1d
.else
pmull2 \rd\().1q, \rn\().2d, \rm\().2d
.endif
.endm
.macro crc_t10dif_pmull, p
__pmull_init_\p
// For sizes less than 256 bytes, we can't fold 128 bytes at a time.
cmp len, #256
@ -296,7 +244,6 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// Load the constants for folding across 128 bytes.
ld1 {fold_consts.2d}, [fold_consts_ptr]
__pmull_pre_\p fold_consts
// Subtract 128 for the 128 data bytes just consumed. Subtract another
// 128 to simplify the termination condition of the following loop.
@ -318,7 +265,6 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// Fold across 64 bytes.
add fold_consts_ptr, fold_consts_ptr, #16
ld1 {fold_consts.2d}, [fold_consts_ptr], #16
__pmull_pre_\p fold_consts
fold_16_bytes \p, v0, v4
fold_16_bytes \p, v1, v5
fold_16_bytes \p, v2, v6
@ -339,8 +285,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// into them, storing the result back into v7.
b.lt .Lfold_16_bytes_loop_done_\@
.Lfold_16_bytes_loop_\@:
__pmull_\p v8, v7, fold_consts
__pmull_\p v7, v7, fold_consts, 2
pmull16x64_\p fold_consts, v7, v8
eor v7.16b, v7.16b, v8.16b
ldr q0, [buf], #16
CPU_LE( rev64 v0.16b, v0.16b )
@ -387,51 +332,10 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
bsl v2.16b, v1.16b, v0.16b
// Fold the first chunk into the second chunk, storing the result in v7.
__pmull_\p v0, v3, fold_consts
__pmull_\p v7, v3, fold_consts, 2
eor v7.16b, v7.16b, v0.16b
pmull16x64_\p fold_consts, v3, v0
eor v7.16b, v3.16b, v0.16b
eor v7.16b, v7.16b, v2.16b
.Lreduce_final_16_bytes_\@:
// Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
movi v2.16b, #0 // init zero register
// Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
ld1 {fold_consts.2d}, [fold_consts_ptr], #16
__pmull_pre_\p fold_consts
// Fold the high 64 bits into the low 64 bits, while also multiplying by
// x^64. This produces a 128-bit value congruent to x^64 * M(x) and
// whose low 48 bits are 0.
ext v0.16b, v2.16b, v7.16b, #8
__pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x))
eor v0.16b, v0.16b, v7.16b // + low bits * x^64
// Fold the high 32 bits into the low 96 bits. This produces a 96-bit
// value congruent to x^64 * M(x) and whose low 48 bits are 0.
ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
mov v0.s[3], v2.s[0] // zero high 32 bits
__pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x))
eor v0.16b, v0.16b, v1.16b // + low bits
// Load G(x) and floor(x^48 / G(x)).
ld1 {fold_consts.2d}, [fold_consts_ptr]
__pmull_pre_\p fold_consts
// Use Barrett reduction to compute the final CRC value.
__pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x))
ushr v1.2d, v1.2d, #32 // /= x^32
__pmull_\p v1, v1, fold_consts // *= G(x)
ushr v0.2d, v0.2d, #48
eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits
// Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
umov w0, v0.h[0]
.ifc \p, p8
frame_pop
.endif
ret
b .Lreduce_final_16_bytes_\@
.Lless_than_256_bytes_\@:
// Checksumming a buffer of length 16...255 bytes
@ -450,7 +354,6 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// Load the fold-across-16-bytes constants.
ld1 {fold_consts.2d}, [fold_consts_ptr], #16
__pmull_pre_\p fold_consts
cmp len, #16
b.eq .Lreduce_final_16_bytes_\@ // len == 16
@ -458,6 +361,8 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255
add len, len, #16
b .Lhandle_partial_segment_\@ // 17 <= len <= 31
.Lreduce_final_16_bytes_\@:
.endm
//
@ -467,7 +372,22 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
//
SYM_FUNC_START(crc_t10dif_pmull_p8)
frame_push 1
// Compose { 0,0,0,0, 8,8,8,8, 1,1,1,1, 9,9,9,9 }
movi perm.4h, #8, lsl #8
orr perm.2s, #1, lsl #16
orr perm.2s, #1, lsl #24
zip1 perm.16b, perm.16b, perm.16b
zip1 perm.16b, perm.16b, perm.16b
crc_t10dif_pmull p8
CPU_LE( rev64 v7.16b, v7.16b )
CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
str q7, [x3]
frame_pop
ret
SYM_FUNC_END(crc_t10dif_pmull_p8)
.align 5
@ -478,6 +398,41 @@ SYM_FUNC_END(crc_t10dif_pmull_p8)
//
SYM_FUNC_START(crc_t10dif_pmull_p64)
crc_t10dif_pmull p64
// Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
movi v2.16b, #0 // init zero register
// Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
ld1 {fold_consts.2d}, [fold_consts_ptr], #16
// Fold the high 64 bits into the low 64 bits, while also multiplying by
// x^64. This produces a 128-bit value congruent to x^64 * M(x) and
// whose low 48 bits are 0.
ext v0.16b, v2.16b, v7.16b, #8
pmull2 v7.1q, v7.2d, fold_consts.2d // high bits * x^48 * (x^80 mod G(x))
eor v0.16b, v0.16b, v7.16b // + low bits * x^64
// Fold the high 32 bits into the low 96 bits. This produces a 96-bit
// value congruent to x^64 * M(x) and whose low 48 bits are 0.
ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
mov v0.s[3], v2.s[0] // zero high 32 bits
pmull v1.1q, v1.1d, fold_consts.1d // high 32 bits * x^48 * (x^48 mod G(x))
eor v0.16b, v0.16b, v1.16b // + low bits
// Load G(x) and floor(x^48 / G(x)).
ld1 {fold_consts.2d}, [fold_consts_ptr]
// Use Barrett reduction to compute the final CRC value.
pmull2 v1.1q, v0.2d, fold_consts.2d // high 32 bits * floor(x^48 / G(x))
ushr v1.2d, v1.2d, #32 // /= x^32
pmull v1.1q, v1.1d, fold_consts.1d // *= G(x)
ushr v0.2d, v0.2d, #48
eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits
// Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
umov w0, v0.h[0]
ret
SYM_FUNC_END(crc_t10dif_pmull_p64)
.section ".rodata", "a"

View File

@ -20,7 +20,8 @@
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len);
asmlinkage void crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len,
u8 out[16]);
asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len);
static int crct10dif_init(struct shash_desc *desc)
@ -34,25 +35,21 @@ static int crct10dif_init(struct shash_desc *desc)
static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
do {
unsigned int chunk = length;
if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE)
chunk = SZ_4K;
u16 *crcp = shash_desc_ctx(desc);
u16 crc = *crcp;
u8 buf[16];
if (length > CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull_p8(*crc, data, chunk);
crc_t10dif_pmull_p8(crc, data, length, buf);
kernel_neon_end();
data += chunk;
length -= chunk;
} while (length);
} else {
*crc = crc_t10dif_generic(*crc, data, length);
crc = 0;
data = buf;
length = sizeof(buf);
}
*crcp = crc_t10dif_generic(crc, data, length);
return 0;
}
@ -62,18 +59,9 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
do {
unsigned int chunk = length;
if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE)
chunk = SZ_4K;
kernel_neon_begin();
*crc = crc_t10dif_pmull_p64(*crc, data, chunk);
*crc = crc_t10dif_pmull_p64(*crc, data, length);
kernel_neon_end();
data += chunk;
length -= chunk;
} while (length);
} else {
*crc = crc_t10dif_generic(*crc, data, length);
}

View File

@ -107,12 +107,12 @@ config CRYPTO_AES_PPC_SPE
config CRYPTO_AES_GCM_P10
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
depends on BROKEN
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_LIB_AES
select CRYPTO_ALGAPI
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
select CRYPTO_SIMD
help
AEAD cipher: AES cipher algorithms (FIPS-197)
GCM (Galois/Counter Mode) authenticated encryption mode (NIST SP800-38D)

View File

@ -8,6 +8,7 @@
#include <linux/unaligned.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/gcm.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/b128ops.h>
@ -24,6 +25,7 @@
#define PPC_ALIGN 16
#define GCM_IV_SIZE 12
#define RFC4106_NONCE_SIZE 4
MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
@ -40,6 +42,7 @@ asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
unsigned char *aad, unsigned int alen);
asmlinkage void gcm_update(u8 *iv, void *Xi);
struct aes_key {
u8 key[AES_MAX_KEYLENGTH];
@ -52,6 +55,7 @@ struct gcm_ctx {
u8 aad_hash[16];
u64 aadLen;
u64 Plen; /* offset 56 - used in aes_p10_gcm_{en/de}crypt */
u8 pblock[16];
};
struct Hash_ctx {
u8 H[16]; /* subkey */
@ -60,17 +64,20 @@ struct Hash_ctx {
struct p10_aes_gcm_ctx {
struct aes_key enc_key;
u8 nonce[RFC4106_NONCE_SIZE];
};
static void vsx_begin(void)
{
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
}
static void vsx_end(void)
{
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
@ -198,7 +205,8 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
return ret ? -EINVAL : 0;
}
static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
int assoclen, int enc)
{
struct crypto_tfm *tfm = req->base.tfm;
struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
@ -210,7 +218,6 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
struct skcipher_walk walk;
u8 *assocmem = NULL;
u8 *assoc;
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
@ -218,11 +225,12 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
u8 otag[16];
int total_processed = 0;
int nbytes;
memset(databuf, 0, sizeof(databuf));
memset(hashbuf, 0, sizeof(hashbuf));
memset(ivbuf, 0, sizeof(ivbuf));
memcpy(iv, req->iv, GCM_IV_SIZE);
memcpy(iv, riv, GCM_IV_SIZE);
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length) {
@ -257,19 +265,25 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
if (ret)
return ret;
while (walk.nbytes > 0 && ret == 0) {
while ((nbytes = walk.nbytes) > 0 && ret == 0) {
u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
u8 buf[AES_BLOCK_SIZE];
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
src = dst = memcpy(buf, src, nbytes);
vsx_begin();
if (enc)
aes_p10_gcm_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
walk.nbytes,
aes_p10_gcm_encrypt(src, dst, nbytes,
&ctx->enc_key, gctx->iv, hash->Htable);
else
aes_p10_gcm_decrypt(walk.src.virt.addr,
walk.dst.virt.addr,
walk.nbytes,
aes_p10_gcm_decrypt(src, dst, nbytes,
&ctx->enc_key, gctx->iv, hash->Htable);
if (unlikely(nbytes > 0 && nbytes < AES_BLOCK_SIZE))
memcpy(walk.dst.virt.addr, buf, nbytes);
vsx_end();
total_processed += walk.nbytes;
@ -281,6 +295,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
/* Finalize hash */
vsx_begin();
gcm_update(gctx->iv, hash->Htable);
finish_tag(gctx, hash, total_processed);
vsx_end();
@ -302,17 +317,63 @@ static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
return 0;
}
static int rfc4106_setkey(struct crypto_aead *tfm, const u8 *inkey,
unsigned int keylen)
{
struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
int err;
keylen -= RFC4106_NONCE_SIZE;
err = p10_aes_gcm_setkey(tfm, inkey, keylen);
if (err)
return err;
memcpy(ctx->nonce, inkey + keylen, RFC4106_NONCE_SIZE);
return 0;
}
static int rfc4106_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
return crypto_rfc4106_check_authsize(authsize);
}
static int rfc4106_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 iv[AES_BLOCK_SIZE];
memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
return crypto_ipsec_check_assoclen(req->assoclen) ?:
p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 1);
}
static int rfc4106_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct p10_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
u8 iv[AES_BLOCK_SIZE];
memcpy(iv, ctx->nonce, RFC4106_NONCE_SIZE);
memcpy(iv + RFC4106_NONCE_SIZE, req->iv, GCM_RFC4106_IV_SIZE);
return crypto_ipsec_check_assoclen(req->assoclen) ?:
p10_aes_gcm_crypt(req, iv, req->assoclen - GCM_RFC4106_IV_SIZE, 0);
}
static int p10_aes_gcm_encrypt(struct aead_request *req)
{
return p10_aes_gcm_crypt(req, 1);
return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 1);
}
static int p10_aes_gcm_decrypt(struct aead_request *req)
{
return p10_aes_gcm_crypt(req, 0);
return p10_aes_gcm_crypt(req, req->iv, req->assoclen, 0);
}
static struct aead_alg gcm_aes_alg = {
static struct aead_alg gcm_aes_algs[] = {{
.ivsize = GCM_IV_SIZE,
.maxauthsize = 16,
@ -321,23 +382,57 @@ static struct aead_alg gcm_aes_alg = {
.encrypt = p10_aes_gcm_encrypt,
.decrypt = p10_aes_gcm_decrypt,
.base.cra_name = "gcm(aes)",
.base.cra_driver_name = "aes_gcm_p10",
.base.cra_name = "__gcm(aes)",
.base.cra_driver_name = "__aes_gcm_p10",
.base.cra_priority = 2100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx),
.base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx)+
4 * sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
};
.base.cra_flags = CRYPTO_ALG_INTERNAL,
}, {
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.setkey = rfc4106_setkey,
.setauthsize = rfc4106_setauthsize,
.encrypt = rfc4106_encrypt,
.decrypt = rfc4106_decrypt,
.base.cra_name = "__rfc4106(gcm(aes))",
.base.cra_driver_name = "__rfc4106_aes_gcm_p10",
.base.cra_priority = 2100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx) +
4 * sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
}};
static struct simd_aead_alg *p10_simd_aeads[ARRAY_SIZE(gcm_aes_algs)];
static int __init p10_init(void)
{
return crypto_register_aead(&gcm_aes_alg);
int ret;
if (!cpu_has_feature(CPU_FTR_ARCH_31))
return 0;
ret = simd_register_aeads_compat(gcm_aes_algs,
ARRAY_SIZE(gcm_aes_algs),
p10_simd_aeads);
if (ret) {
simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs),
p10_simd_aeads);
return ret;
}
return 0;
}
static void __exit p10_exit(void)
{
crypto_unregister_aead(&gcm_aes_alg);
simd_unregister_aeads(gcm_aes_algs, ARRAY_SIZE(gcm_aes_algs),
p10_simd_aeads);
}
module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init);
module_init(p10_init);
module_exit(p10_exit);

File diff suppressed because it is too large Load Diff

View File

@ -363,7 +363,7 @@ config CRYPTO_CHACHA20_X86_64
- AVX-512VL (Advanced Vector Extensions-512VL)
config CRYPTO_AEGIS128_AESNI_SSE2
tristate "AEAD ciphers: AEGIS-128 (AES-NI/SSE2)"
tristate "AEAD ciphers: AEGIS-128 (AES-NI/SSE4.1)"
depends on X86 && 64BIT
select CRYPTO_AEAD
select CRYPTO_SIMD
@ -372,7 +372,7 @@ config CRYPTO_AEGIS128_AESNI_SSE2
Architecture: x86_64 using:
- AES-NI (AES New Instructions)
- SSE2 (Streaming SIMD Extensions 2)
- SSE4.1 (Streaming SIMD Extensions 4.1)
config CRYPTO_NHPOLY1305_SSE2
tristate "Hash functions: NHPoly1305 (SSE2)"

View File

@ -1,14 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AES-NI + SSE2 implementation of AEGIS-128
* AES-NI + SSE4.1 implementation of AEGIS-128
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
* Copyright 2024 Google LLC
*/
#include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/frame.h>
#define STATE0 %xmm0
#define STATE1 %xmm1
@ -20,11 +19,6 @@
#define T0 %xmm6
#define T1 %xmm7
#define STATEP %rdi
#define LEN %rsi
#define SRC %rdx
#define DST %rcx
.section .rodata.cst16.aegis128_const, "aM", @progbits, 32
.align 16
.Laegis128_const_0:
@ -34,11 +28,11 @@
.byte 0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1
.byte 0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd
.section .rodata.cst16.aegis128_counter, "aM", @progbits, 16
.align 16
.Laegis128_counter:
.byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
.byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
.section .rodata.cst32.zeropad_mask, "aM", @progbits, 32
.align 32
.Lzeropad_mask:
.octa 0xffffffffffffffffffffffffffffffff
.octa 0
.text
@ -61,140 +55,102 @@
.endm
/*
* __load_partial: internal ABI
* input:
* LEN - bytes
* SRC - src
* output:
* MSG - message block
* changed:
* T0
* %r8
* %r9
* Load 1 <= LEN (%ecx) <= 15 bytes from the pointer SRC into the xmm register
* MSG and zeroize any remaining bytes. Clobbers %rax, %rcx, and %r8.
*/
SYM_FUNC_START_LOCAL(__load_partial)
xor %r9d, %r9d
pxor MSG, MSG
.macro load_partial
sub $8, %ecx /* LEN - 8 */
jle .Lle8\@
mov LEN, %r8
and $0x1, %r8
jz .Lld_partial_1
/* Load 9 <= LEN <= 15 bytes: */
movq (SRC), MSG /* Load first 8 bytes */
mov (SRC, %rcx), %rax /* Load last 8 bytes */
neg %ecx
shl $3, %ecx
shr %cl, %rax /* Discard overlapping bytes */
pinsrq $1, %rax, MSG
jmp .Ldone\@
mov LEN, %r8
and $0x1E, %r8
add SRC, %r8
mov (%r8), %r9b
.Lle8\@:
add $4, %ecx /* LEN - 4 */
jl .Llt4\@
.Lld_partial_1:
mov LEN, %r8
and $0x2, %r8
jz .Lld_partial_2
/* Load 4 <= LEN <= 8 bytes: */
mov (SRC), %eax /* Load first 4 bytes */
mov (SRC, %rcx), %r8d /* Load last 4 bytes */
jmp .Lcombine\@
mov LEN, %r8
and $0x1C, %r8
add SRC, %r8
shl $0x10, %r9
mov (%r8), %r9w
.Lld_partial_2:
mov LEN, %r8
and $0x4, %r8
jz .Lld_partial_4
mov LEN, %r8
and $0x18, %r8
add SRC, %r8
shl $32, %r9
mov (%r8), %r8d
xor %r8, %r9
.Lld_partial_4:
movq %r9, MSG
mov LEN, %r8
and $0x8, %r8
jz .Lld_partial_8
mov LEN, %r8
and $0x10, %r8
add SRC, %r8
pslldq $8, MSG
movq (%r8), T0
pxor T0, MSG
.Lld_partial_8:
RET
SYM_FUNC_END(__load_partial)
.Llt4\@:
/* Load 1 <= LEN <= 3 bytes: */
add $2, %ecx /* LEN - 2 */
movzbl (SRC), %eax /* Load first byte */
jl .Lmovq\@
movzwl (SRC, %rcx), %r8d /* Load last 2 bytes */
.Lcombine\@:
shl $3, %ecx
shl %cl, %r8
or %r8, %rax /* Combine the two parts */
.Lmovq\@:
movq %rax, MSG
.Ldone\@:
.endm
/*
* __store_partial: internal ABI
* input:
* LEN - bytes
* DST - dst
* output:
* T0 - message block
* changed:
* %r8
* %r9
* %r10
* Store 1 <= LEN (%ecx) <= 15 bytes from the xmm register \msg to the pointer
* DST. Clobbers %rax, %rcx, and %r8.
*/
SYM_FUNC_START_LOCAL(__store_partial)
mov LEN, %r8
mov DST, %r9
.macro store_partial msg
sub $8, %ecx /* LEN - 8 */
jl .Llt8\@
movq T0, %r10
/* Store 8 <= LEN <= 15 bytes: */
pextrq $1, \msg, %rax
mov %ecx, %r8d
shl $3, %ecx
ror %cl, %rax
mov %rax, (DST, %r8) /* Store last LEN - 8 bytes */
movq \msg, (DST) /* Store first 8 bytes */
jmp .Ldone\@
cmp $8, %r8
jl .Lst_partial_8
.Llt8\@:
add $4, %ecx /* LEN - 4 */
jl .Llt4\@
mov %r10, (%r9)
psrldq $8, T0
movq T0, %r10
/* Store 4 <= LEN <= 7 bytes: */
pextrd $1, \msg, %eax
mov %ecx, %r8d
shl $3, %ecx
ror %cl, %eax
mov %eax, (DST, %r8) /* Store last LEN - 4 bytes */
movd \msg, (DST) /* Store first 4 bytes */
jmp .Ldone\@
sub $8, %r8
add $8, %r9
.Lst_partial_8:
cmp $4, %r8
jl .Lst_partial_4
mov %r10d, (%r9)
shr $32, %r10
sub $4, %r8
add $4, %r9
.Lst_partial_4:
cmp $2, %r8
jl .Lst_partial_2
mov %r10w, (%r9)
shr $0x10, %r10
sub $2, %r8
add $2, %r9
.Lst_partial_2:
cmp $1, %r8
jl .Lst_partial_1
mov %r10b, (%r9)
.Lst_partial_1:
RET
SYM_FUNC_END(__store_partial)
.Llt4\@:
/* Store 1 <= LEN <= 3 bytes: */
pextrb $0, \msg, 0(DST)
cmp $-2, %ecx /* LEN - 4 == -2, i.e. LEN == 2? */
jl .Ldone\@
pextrb $1, \msg, 1(DST)
je .Ldone\@
pextrb $2, \msg, 2(DST)
.Ldone\@:
.endm
/*
* void crypto_aegis128_aesni_init(void *state, const void *key, const void *iv);
* void aegis128_aesni_init(struct aegis_state *state,
* const struct aegis_block *key,
* const u8 iv[AEGIS128_NONCE_SIZE]);
*/
SYM_FUNC_START(crypto_aegis128_aesni_init)
FRAME_BEGIN
SYM_FUNC_START(aegis128_aesni_init)
.set STATEP, %rdi
.set KEYP, %rsi
.set IVP, %rdx
/* load IV: */
movdqu (%rdx), T1
movdqu (IVP), T1
/* load key: */
movdqa (%rsi), KEY
movdqa (KEYP), KEY
pxor KEY, T1
movdqa T1, STATE0
movdqa KEY, STATE3
@ -224,20 +180,22 @@ SYM_FUNC_START(crypto_aegis128_aesni_init)
movdqu STATE2, 0x20(STATEP)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
RET
SYM_FUNC_END(crypto_aegis128_aesni_init)
SYM_FUNC_END(aegis128_aesni_init)
/*
* void crypto_aegis128_aesni_ad(void *state, unsigned int length,
* const void *data);
* void aegis128_aesni_ad(struct aegis_state *state, const u8 *data,
* unsigned int len);
*
* len must be a multiple of 16.
*/
SYM_FUNC_START(crypto_aegis128_aesni_ad)
FRAME_BEGIN
SYM_FUNC_START(aegis128_aesni_ad)
.set STATEP, %rdi
.set SRC, %rsi
.set LEN, %edx
cmp $0x10, LEN
jb .Lad_out
test LEN, LEN
jz .Lad_out
/* load the state: */
movdqu 0x00(STATEP), STATE0
@ -246,89 +204,40 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu 0x30(STATEP), STATE3
movdqu 0x40(STATEP), STATE4
mov SRC, %r8
and $0xF, %r8
jnz .Lad_u_loop
.align 8
.Lad_a_loop:
movdqa 0x00(SRC), MSG
aegis128_update
pxor MSG, STATE4
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_1
movdqa 0x10(SRC), MSG
aegis128_update
pxor MSG, STATE3
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_2
movdqa 0x20(SRC), MSG
aegis128_update
pxor MSG, STATE2
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_3
movdqa 0x30(SRC), MSG
aegis128_update
pxor MSG, STATE1
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_4
movdqa 0x40(SRC), MSG
aegis128_update
pxor MSG, STATE0
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_0
add $0x50, SRC
jmp .Lad_a_loop
.align 8
.Lad_u_loop:
.Lad_loop:
movdqu 0x00(SRC), MSG
aegis128_update
pxor MSG, STATE4
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_1
jz .Lad_out_1
movdqu 0x10(SRC), MSG
aegis128_update
pxor MSG, STATE3
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_2
jz .Lad_out_2
movdqu 0x20(SRC), MSG
aegis128_update
pxor MSG, STATE2
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_3
jz .Lad_out_3
movdqu 0x30(SRC), MSG
aegis128_update
pxor MSG, STATE1
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_4
jz .Lad_out_4
movdqu 0x40(SRC), MSG
aegis128_update
pxor MSG, STATE0
sub $0x10, LEN
cmp $0x10, LEN
jl .Lad_out_0
jz .Lad_out_0
add $0x50, SRC
jmp .Lad_u_loop
jmp .Lad_loop
/* store the state: */
.Lad_out_0:
@ -337,7 +246,6 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE2, 0x20(STATEP)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
RET
.Lad_out_1:
@ -346,7 +254,6 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE1, 0x20(STATEP)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
RET
.Lad_out_2:
@ -355,7 +262,6 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE0, 0x20(STATEP)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
RET
.Lad_out_3:
@ -364,7 +270,6 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE4, 0x20(STATEP)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
RET
.Lad_out_4:
@ -373,41 +278,38 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE3, 0x20(STATEP)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
RET
.Lad_out:
FRAME_END
RET
SYM_FUNC_END(crypto_aegis128_aesni_ad)
SYM_FUNC_END(aegis128_aesni_ad)
.macro encrypt_block a s0 s1 s2 s3 s4 i
movdq\a (\i * 0x10)(SRC), MSG
.macro encrypt_block s0 s1 s2 s3 s4 i
movdqu (\i * 0x10)(SRC), MSG
movdqa MSG, T0
pxor \s1, T0
pxor \s4, T0
movdqa \s2, T1
pand \s3, T1
pxor T1, T0
movdq\a T0, (\i * 0x10)(DST)
movdqu T0, (\i * 0x10)(DST)
aegis128_update
pxor MSG, \s4
sub $0x10, LEN
cmp $0x10, LEN
jl .Lenc_out_\i
jz .Lenc_out_\i
.endm
/*
* void crypto_aegis128_aesni_enc(void *state, unsigned int length,
* const void *src, void *dst);
* void aegis128_aesni_enc(struct aegis_state *state, const u8 *src, u8 *dst,
* unsigned int len);
*
* len must be nonzero and a multiple of 16.
*/
SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
FRAME_BEGIN
cmp $0x10, LEN
jb .Lenc_out
SYM_FUNC_START(aegis128_aesni_enc)
.set STATEP, %rdi
.set SRC, %rsi
.set DST, %rdx
.set LEN, %ecx
/* load the state: */
movdqu 0x00(STATEP), STATE0
@ -416,34 +318,17 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
movdqu 0x30(STATEP), STATE3
movdqu 0x40(STATEP), STATE4
mov SRC, %r8
or DST, %r8
and $0xF, %r8
jnz .Lenc_u_loop
.align 8
.Lenc_a_loop:
encrypt_block a STATE0 STATE1 STATE2 STATE3 STATE4 0
encrypt_block a STATE4 STATE0 STATE1 STATE2 STATE3 1
encrypt_block a STATE3 STATE4 STATE0 STATE1 STATE2 2
encrypt_block a STATE2 STATE3 STATE4 STATE0 STATE1 3
encrypt_block a STATE1 STATE2 STATE3 STATE4 STATE0 4
.Lenc_loop:
encrypt_block STATE0 STATE1 STATE2 STATE3 STATE4 0
encrypt_block STATE4 STATE0 STATE1 STATE2 STATE3 1
encrypt_block STATE3 STATE4 STATE0 STATE1 STATE2 2
encrypt_block STATE2 STATE3 STATE4 STATE0 STATE1 3
encrypt_block STATE1 STATE2 STATE3 STATE4 STATE0 4
add $0x50, SRC
add $0x50, DST
jmp .Lenc_a_loop
.align 8
.Lenc_u_loop:
encrypt_block u STATE0 STATE1 STATE2 STATE3 STATE4 0
encrypt_block u STATE4 STATE0 STATE1 STATE2 STATE3 1
encrypt_block u STATE3 STATE4 STATE0 STATE1 STATE2 2
encrypt_block u STATE2 STATE3 STATE4 STATE0 STATE1 3
encrypt_block u STATE1 STATE2 STATE3 STATE4 STATE0 4
add $0x50, SRC
add $0x50, DST
jmp .Lenc_u_loop
jmp .Lenc_loop
/* store the state: */
.Lenc_out_0:
@ -452,7 +337,6 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE1, 0x20(STATEP)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
RET
.Lenc_out_1:
@ -461,7 +345,6 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE0, 0x20(STATEP)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
RET
.Lenc_out_2:
@ -470,7 +353,6 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE4, 0x20(STATEP)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
RET
.Lenc_out_3:
@ -479,7 +361,6 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE3, 0x20(STATEP)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
RET
.Lenc_out_4:
@ -488,20 +369,19 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE2, 0x20(STATEP)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
RET
.Lenc_out:
FRAME_END
RET
SYM_FUNC_END(crypto_aegis128_aesni_enc)
SYM_FUNC_END(aegis128_aesni_enc)
/*
* void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length,
* const void *src, void *dst);
* void aegis128_aesni_enc_tail(struct aegis_state *state, const u8 *src,
* u8 *dst, unsigned int len);
*/
SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail)
FRAME_BEGIN
SYM_FUNC_START(aegis128_aesni_enc_tail)
.set STATEP, %rdi
.set SRC, %rsi
.set DST, %rdx
.set LEN, %ecx /* {load,store}_partial rely on this being %ecx */
/* load the state: */
movdqu 0x00(STATEP), STATE0
@ -511,7 +391,8 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail)
movdqu 0x40(STATEP), STATE4
/* encrypt message: */
call __load_partial
mov LEN, %r9d
load_partial
movdqa MSG, T0
pxor STATE1, T0
@ -520,7 +401,8 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail)
pand STATE3, T1
pxor T1, T0
call __store_partial
mov %r9d, LEN
store_partial T0
aegis128_update
pxor MSG, STATE4
@ -531,37 +413,36 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_enc_tail)
movdqu STATE1, 0x20(STATEP)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
RET
SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
SYM_FUNC_END(aegis128_aesni_enc_tail)
.macro decrypt_block a s0 s1 s2 s3 s4 i
movdq\a (\i * 0x10)(SRC), MSG
.macro decrypt_block s0 s1 s2 s3 s4 i
movdqu (\i * 0x10)(SRC), MSG
pxor \s1, MSG
pxor \s4, MSG
movdqa \s2, T1
pand \s3, T1
pxor T1, MSG
movdq\a MSG, (\i * 0x10)(DST)
movdqu MSG, (\i * 0x10)(DST)
aegis128_update
pxor MSG, \s4
sub $0x10, LEN
cmp $0x10, LEN
jl .Ldec_out_\i
jz .Ldec_out_\i
.endm
/*
* void crypto_aegis128_aesni_dec(void *state, unsigned int length,
* const void *src, void *dst);
* void aegis128_aesni_dec(struct aegis_state *state, const u8 *src, u8 *dst,
* unsigned int len);
*
* len must be nonzero and a multiple of 16.
*/
SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
FRAME_BEGIN
cmp $0x10, LEN
jb .Ldec_out
SYM_FUNC_START(aegis128_aesni_dec)
.set STATEP, %rdi
.set SRC, %rsi
.set DST, %rdx
.set LEN, %ecx
/* load the state: */
movdqu 0x00(STATEP), STATE0
@ -570,34 +451,17 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
movdqu 0x30(STATEP), STATE3
movdqu 0x40(STATEP), STATE4
mov SRC, %r8
or DST, %r8
and $0xF, %r8
jnz .Ldec_u_loop
.align 8
.Ldec_a_loop:
decrypt_block a STATE0 STATE1 STATE2 STATE3 STATE4 0
decrypt_block a STATE4 STATE0 STATE1 STATE2 STATE3 1
decrypt_block a STATE3 STATE4 STATE0 STATE1 STATE2 2
decrypt_block a STATE2 STATE3 STATE4 STATE0 STATE1 3
decrypt_block a STATE1 STATE2 STATE3 STATE4 STATE0 4
.Ldec_loop:
decrypt_block STATE0 STATE1 STATE2 STATE3 STATE4 0
decrypt_block STATE4 STATE0 STATE1 STATE2 STATE3 1
decrypt_block STATE3 STATE4 STATE0 STATE1 STATE2 2
decrypt_block STATE2 STATE3 STATE4 STATE0 STATE1 3
decrypt_block STATE1 STATE2 STATE3 STATE4 STATE0 4
add $0x50, SRC
add $0x50, DST
jmp .Ldec_a_loop
.align 8
.Ldec_u_loop:
decrypt_block u STATE0 STATE1 STATE2 STATE3 STATE4 0
decrypt_block u STATE4 STATE0 STATE1 STATE2 STATE3 1
decrypt_block u STATE3 STATE4 STATE0 STATE1 STATE2 2
decrypt_block u STATE2 STATE3 STATE4 STATE0 STATE1 3
decrypt_block u STATE1 STATE2 STATE3 STATE4 STATE0 4
add $0x50, SRC
add $0x50, DST
jmp .Ldec_u_loop
jmp .Ldec_loop
/* store the state: */
.Ldec_out_0:
@ -606,7 +470,6 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE1, 0x20(STATEP)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
RET
.Ldec_out_1:
@ -615,7 +478,6 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE0, 0x20(STATEP)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
RET
.Ldec_out_2:
@ -624,7 +486,6 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE4, 0x20(STATEP)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
RET
.Ldec_out_3:
@ -633,7 +494,6 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE3, 0x20(STATEP)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
RET
.Ldec_out_4:
@ -642,20 +502,19 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE2, 0x20(STATEP)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
RET
.Ldec_out:
FRAME_END
RET
SYM_FUNC_END(crypto_aegis128_aesni_dec)
SYM_FUNC_END(aegis128_aesni_dec)
/*
* void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length,
* const void *src, void *dst);
* void aegis128_aesni_dec_tail(struct aegis_state *state, const u8 *src,
* u8 *dst, unsigned int len);
*/
SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
FRAME_BEGIN
SYM_FUNC_START(aegis128_aesni_dec_tail)
.set STATEP, %rdi
.set SRC, %rsi
.set DST, %rdx
.set LEN, %ecx /* {load,store}_partial rely on this being %ecx */
/* load the state: */
movdqu 0x00(STATEP), STATE0
@ -665,7 +524,8 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
movdqu 0x40(STATEP), STATE4
/* decrypt message: */
call __load_partial
mov LEN, %r9d
load_partial
pxor STATE1, MSG
pxor STATE4, MSG
@ -673,17 +533,13 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
pand STATE3, T1
pxor T1, MSG
movdqa MSG, T0
call __store_partial
mov %r9d, LEN
store_partial MSG
/* mask with byte count: */
movq LEN, T0
punpcklbw T0, T0
punpcklbw T0, T0
punpcklbw T0, T0
punpcklbw T0, T0
movdqa .Laegis128_counter(%rip), T1
pcmpgtb T1, T0
lea .Lzeropad_mask+16(%rip), %rax
sub %r9, %rax
movdqu (%rax), T0
pand T0, MSG
aegis128_update
@ -695,17 +551,19 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail)
movdqu STATE1, 0x20(STATEP)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
RET
SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
SYM_FUNC_END(aegis128_aesni_dec_tail)
/*
* void crypto_aegis128_aesni_final(void *state, void *tag_xor,
* u64 assoclen, u64 cryptlen);
* void aegis128_aesni_final(struct aegis_state *state,
* struct aegis_block *tag_xor,
* unsigned int assoclen, unsigned int cryptlen);
*/
SYM_FUNC_START(crypto_aegis128_aesni_final)
FRAME_BEGIN
SYM_FUNC_START(aegis128_aesni_final)
.set STATEP, %rdi
.set TAG_XOR, %rsi
.set ASSOCLEN, %edx
.set CRYPTLEN, %ecx
/* load the state: */
movdqu 0x00(STATEP), STATE0
@ -715,10 +573,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
movdqu 0x40(STATEP), STATE4
/* prepare length block: */
movq %rdx, MSG
movq %rcx, T0
pslldq $8, T0
pxor T0, MSG
movd ASSOCLEN, MSG
pinsrd $2, CRYPTLEN, MSG
psllq $3, MSG /* multiply by 8 (to get bit count) */
pxor STATE3, MSG
@ -733,7 +589,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
aegis128_update; pxor MSG, STATE3
/* xor tag: */
movdqu (%rsi), MSG
movdqu (TAG_XOR), MSG
pxor STATE0, MSG
pxor STATE1, MSG
@ -741,8 +597,6 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
pxor STATE3, MSG
pxor STATE4, MSG
movdqu MSG, (%rsi)
FRAME_END
movdqu MSG, (TAG_XOR)
RET
SYM_FUNC_END(crypto_aegis128_aesni_final)
SYM_FUNC_END(aegis128_aesni_final)

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-128 Authenticated-Encryption Algorithm
* Glue for AES-NI + SSE2 implementation
* Glue for AES-NI + SSE4.1 implementation
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <omosnacek@gmail.com>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
@ -23,27 +23,6 @@
#define AEGIS128_MIN_AUTH_SIZE 8
#define AEGIS128_MAX_AUTH_SIZE 16
asmlinkage void crypto_aegis128_aesni_init(void *state, void *key, void *iv);
asmlinkage void crypto_aegis128_aesni_ad(
void *state, unsigned int length, const void *data);
asmlinkage void crypto_aegis128_aesni_enc(
void *state, unsigned int length, const void *src, void *dst);
asmlinkage void crypto_aegis128_aesni_dec(
void *state, unsigned int length, const void *src, void *dst);
asmlinkage void crypto_aegis128_aesni_enc_tail(
void *state, unsigned int length, const void *src, void *dst);
asmlinkage void crypto_aegis128_aesni_dec_tail(
void *state, unsigned int length, const void *src, void *dst);
asmlinkage void crypto_aegis128_aesni_final(
void *state, void *tag_xor, unsigned int cryptlen,
unsigned int assoclen);
struct aegis_block {
u8 bytes[AEGIS128_BLOCK_SIZE] __aligned(AEGIS128_BLOCK_ALIGN);
};
@ -56,15 +35,31 @@ struct aegis_ctx {
struct aegis_block key;
};
struct aegis_crypt_ops {
int (*skcipher_walk_init)(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
asmlinkage void aegis128_aesni_init(struct aegis_state *state,
const struct aegis_block *key,
const u8 iv[AEGIS128_NONCE_SIZE]);
void (*crypt_blocks)(void *state, unsigned int length, const void *src,
void *dst);
void (*crypt_tail)(void *state, unsigned int length, const void *src,
void *dst);
};
asmlinkage void aegis128_aesni_ad(struct aegis_state *state, const u8 *data,
unsigned int len);
asmlinkage void aegis128_aesni_enc(struct aegis_state *state, const u8 *src,
u8 *dst, unsigned int len);
asmlinkage void aegis128_aesni_dec(struct aegis_state *state, const u8 *src,
u8 *dst, unsigned int len);
asmlinkage void aegis128_aesni_enc_tail(struct aegis_state *state,
const u8 *src, u8 *dst,
unsigned int len);
asmlinkage void aegis128_aesni_dec_tail(struct aegis_state *state,
const u8 *src, u8 *dst,
unsigned int len);
asmlinkage void aegis128_aesni_final(struct aegis_state *state,
struct aegis_block *tag_xor,
unsigned int assoclen,
unsigned int cryptlen);
static void crypto_aegis128_aesni_process_ad(
struct aegis_state *state, struct scatterlist *sg_src,
@ -85,16 +80,15 @@ static void crypto_aegis128_aesni_process_ad(
if (pos > 0) {
unsigned int fill = AEGIS128_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_aegis128_aesni_ad(state,
AEGIS128_BLOCK_SIZE,
buf.bytes);
aegis128_aesni_ad(state, buf.bytes,
AEGIS128_BLOCK_SIZE);
pos = 0;
left -= fill;
src += fill;
}
crypto_aegis128_aesni_ad(state, left, src);
aegis128_aesni_ad(state, src,
left & ~(AEGIS128_BLOCK_SIZE - 1));
src += left & ~(AEGIS128_BLOCK_SIZE - 1);
left &= AEGIS128_BLOCK_SIZE - 1;
}
@ -110,24 +104,37 @@ static void crypto_aegis128_aesni_process_ad(
if (pos > 0) {
memset(buf.bytes + pos, 0, AEGIS128_BLOCK_SIZE - pos);
crypto_aegis128_aesni_ad(state, AEGIS128_BLOCK_SIZE, buf.bytes);
aegis128_aesni_ad(state, buf.bytes, AEGIS128_BLOCK_SIZE);
}
}
static void crypto_aegis128_aesni_process_crypt(
struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
static __always_inline void
crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
struct skcipher_walk *walk, bool enc)
{
while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
ops->crypt_blocks(state,
round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
walk->src.virt.addr, walk->dst.virt.addr);
if (enc)
aegis128_aesni_enc(state, walk->src.virt.addr,
walk->dst.virt.addr,
round_down(walk->nbytes,
AEGIS128_BLOCK_SIZE));
else
aegis128_aesni_dec(state, walk->src.virt.addr,
walk->dst.virt.addr,
round_down(walk->nbytes,
AEGIS128_BLOCK_SIZE));
skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
}
if (walk->nbytes) {
ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
walk->dst.virt.addr);
if (enc)
aegis128_aesni_enc_tail(state, walk->src.virt.addr,
walk->dst.virt.addr,
walk->nbytes);
else
aegis128_aesni_dec_tail(state, walk->src.virt.addr,
walk->dst.virt.addr,
walk->nbytes);
skcipher_walk_done(walk, 0);
}
}
@ -162,42 +169,39 @@ static int crypto_aegis128_aesni_setauthsize(struct crypto_aead *tfm,
return 0;
}
static void crypto_aegis128_aesni_crypt(struct aead_request *req,
static __always_inline void
crypto_aegis128_aesni_crypt(struct aead_request *req,
struct aegis_block *tag_xor,
unsigned int cryptlen,
const struct aegis_crypt_ops *ops)
unsigned int cryptlen, bool enc)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state;
ops->skcipher_walk_init(&walk, req, true);
if (enc)
skcipher_walk_aead_encrypt(&walk, req, true);
else
skcipher_walk_aead_decrypt(&walk, req, true);
kernel_fpu_begin();
crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
aegis128_aesni_init(&state, &ctx->key, req->iv);
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
crypto_aegis128_aesni_process_crypt(&state, &walk, enc);
aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
}
static int crypto_aegis128_aesni_encrypt(struct aead_request *req)
{
static const struct aegis_crypt_ops OPS = {
.skcipher_walk_init = skcipher_walk_aead_encrypt,
.crypt_blocks = crypto_aegis128_aesni_enc,
.crypt_tail = crypto_aegis128_aesni_enc_tail,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen;
crypto_aegis128_aesni_crypt(req, &tag, cryptlen, &OPS);
crypto_aegis128_aesni_crypt(req, &tag, cryptlen, true);
scatterwalk_map_and_copy(tag.bytes, req->dst,
req->assoclen + cryptlen, authsize, 1);
@ -208,12 +212,6 @@ static int crypto_aegis128_aesni_decrypt(struct aead_request *req)
{
static const struct aegis_block zeros = {};
static const struct aegis_crypt_ops OPS = {
.skcipher_walk_init = skcipher_walk_aead_decrypt,
.crypt_blocks = crypto_aegis128_aesni_dec,
.crypt_tail = crypto_aegis128_aesni_dec_tail,
};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
@ -222,27 +220,16 @@ static int crypto_aegis128_aesni_decrypt(struct aead_request *req)
scatterwalk_map_and_copy(tag.bytes, req->src,
req->assoclen + cryptlen, authsize, 0);
crypto_aegis128_aesni_crypt(req, &tag, cryptlen, &OPS);
crypto_aegis128_aesni_crypt(req, &tag, cryptlen, false);
return crypto_memneq(tag.bytes, zeros.bytes, authsize) ? -EBADMSG : 0;
}
static int crypto_aegis128_aesni_init_tfm(struct crypto_aead *aead)
{
return 0;
}
static void crypto_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
{
}
static struct aead_alg crypto_aegis128_aesni_alg = {
.setkey = crypto_aegis128_aesni_setkey,
.setauthsize = crypto_aegis128_aesni_setauthsize,
.encrypt = crypto_aegis128_aesni_encrypt,
.decrypt = crypto_aegis128_aesni_decrypt,
.init = crypto_aegis128_aesni_init_tfm,
.exit = crypto_aegis128_aesni_exit_tfm,
.ivsize = AEGIS128_NONCE_SIZE,
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
@ -267,7 +254,7 @@ static struct simd_aead_alg *simd_alg;
static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
if (!boot_cpu_has(X86_FEATURE_XMM4_1) ||
!boot_cpu_has(X86_FEATURE_AES) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
@ -286,6 +273,6 @@ module_exit(crypto_aegis128_aesni_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm -- AESNI+SSE2 implementation");
MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm -- AESNI+SSE4.1 implementation");
MODULE_ALIAS_CRYPTO("aegis128");
MODULE_ALIAS_CRYPTO("aegis128-aesni");

View File

@ -1747,7 +1747,7 @@ static void __exit aesni_exit(void)
unregister_avx_algs();
}
late_initcall(aesni_init);
module_init(aesni_init);
module_exit(aesni_exit);
MODULE_DESCRIPTION("AES cipher and modes, optimized with AES-NI or VAES instructions");

View File

@ -487,79 +487,3 @@ SYM_FUNC_START(cast5_cbc_dec_16way)
FRAME_END
RET;
SYM_FUNC_END(cast5_cbc_dec_16way)
SYM_FUNC_START(cast5_ctr_16way)
/* input:
* %rdi: ctx
* %rsi: dst
* %rdx: src
* %rcx: iv (big endian, 64bit)
*/
FRAME_BEGIN
pushq %r12;
pushq %r15;
movq %rdi, CTX;
movq %rsi, %r11;
movq %rdx, %r12;
vpcmpeqd RTMP, RTMP, RTMP;
vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
vpcmpeqd RKR, RKR, RKR;
vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
vmovdqa .Lbswap_iv_mask(%rip), R1ST;
vmovdqa .Lbswap128_mask(%rip), RKM;
/* load IV and byteswap */
vmovq (%rcx), RX;
vpshufb R1ST, RX, RX;
/* construct IVs */
vpsubq RTMP, RX, RX; /* le: IV1, IV0 */
vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
vpsubq RKR, RX, RX;
vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
vpsubq RKR, RX, RX;
vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
vpsubq RKR, RX, RX;
vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
vpsubq RKR, RX, RX;
vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
vpsubq RKR, RX, RX;
vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
vpsubq RKR, RX, RX;
vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
vpsubq RKR, RX, RX;
vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
/* store last IV */
vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
vmovq RX, (%rcx);
call __cast5_enc_blk16;
/* dst = src ^ iv */
vpxor (0*16)(%r12), RR1, RR1;
vpxor (1*16)(%r12), RL1, RL1;
vpxor (2*16)(%r12), RR2, RR2;
vpxor (3*16)(%r12), RL2, RL2;
vpxor (4*16)(%r12), RR3, RR3;
vpxor (5*16)(%r12), RL3, RL3;
vpxor (6*16)(%r12), RR4, RR4;
vpxor (7*16)(%r12), RL4, RL4;
vmovdqu RR1, (0*16)(%r11);
vmovdqu RL1, (1*16)(%r11);
vmovdqu RR2, (2*16)(%r11);
vmovdqu RL2, (3*16)(%r11);
vmovdqu RR3, (4*16)(%r11);
vmovdqu RL3, (5*16)(%r11);
vmovdqu RR4, (6*16)(%r11);
vmovdqu RL4, (7*16)(%r11);
popq %r15;
popq %r12;
FRAME_END
RET;
SYM_FUNC_END(cast5_ctr_16way)

View File

@ -41,7 +41,7 @@
*/
#define CRC32C_PCL_BREAKEVEN 512
asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
asmlinkage unsigned int crc_pcl(const u8 *buffer, unsigned int len,
unsigned int crc_init);
#endif /* CONFIG_X86_64 */

View File

@ -7,6 +7,7 @@
* http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-paper.pdf
*
* Copyright (C) 2012 Intel Corporation.
* Copyright 2024 Google LLC
*
* Authors:
* Wajdi Feghali <wajdi.k.feghali@intel.com>
@ -44,185 +45,129 @@
*/
#include <linux/linkage.h>
#include <asm/nospec-branch.h>
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
.macro LABEL prefix n
.L\prefix\n\():
.endm
.macro JMPTBL_ENTRY i
.quad .Lcrc_\i
.endm
.macro JNC_LESS_THAN j
jnc .Lless_than_\j
.endm
# Define threshold where buffers are considered "small" and routed to more
# efficient "by-1" code. This "by-1" code only handles up to 255 bytes, so
# SMALL_SIZE can be no larger than 255.
# Define threshold below which buffers are considered "small" and routed to
# regular CRC code that does not interleave the CRC instructions.
#define SMALL_SIZE 200
.if (SMALL_SIZE > 255)
.error "SMALL_ SIZE must be < 256"
.endif
# unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init);
# unsigned int crc_pcl(const u8 *buffer, unsigned int len, unsigned int crc_init);
.text
SYM_FUNC_START(crc_pcl)
#define bufp rdi
#define bufp_dw %edi
#define bufp_w %di
#define bufp_b %dil
#define bufptmp %rcx
#define block_0 %rcx
#define block_1 %rdx
#define block_2 %r11
#define len %rsi
#define len_dw %esi
#define len_w %si
#define len_b %sil
#define crc_init_arg %rdx
#define tmp %rbx
#define crc_init %r8
#define crc_init_dw %r8d
#define crc1 %r9
#define crc2 %r10
#define bufp %rdi
#define bufp_d %edi
#define len %esi
#define crc_init %edx
#define crc_init_q %rdx
#define n_misaligned %ecx /* overlaps chunk_bytes! */
#define n_misaligned_q %rcx
#define chunk_bytes %ecx /* overlaps n_misaligned! */
#define chunk_bytes_q %rcx
#define crc1 %r8
#define crc2 %r9
pushq %rbx
pushq %rdi
pushq %rsi
## Move crc_init for Linux to a different
mov crc_init_arg, crc_init
cmp $SMALL_SIZE, len
jb .Lsmall
################################################################
## 1) ALIGN:
################################################################
mov %bufp, bufptmp # rdi = *buf
neg %bufp
and $7, %bufp # calculate the unalignment amount of
mov bufp_d, n_misaligned
neg n_misaligned
and $7, n_misaligned # calculate the misalignment amount of
# the address
je .Lproc_block # Skip if aligned
## If len is less than 8 and we're unaligned, we need to jump
## to special code to avoid reading beyond the end of the buffer
cmp $8, len
jae .Ldo_align
# less_than_8 expects length in upper 3 bits of len_dw
# less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
shl $32-3+1, len_dw
jmp .Lless_than_8_post_shl1
je .Laligned # Skip if aligned
# Process 1 <= n_misaligned <= 7 bytes individually in order to align
# the remaining data to an 8-byte boundary.
.Ldo_align:
#### Calculate CRC of unaligned bytes of the buffer (if any)
movq (bufptmp), tmp # load a quadward from the buffer
add %bufp, bufptmp # align buffer pointer for quadword
# processing
sub %bufp, len # update buffer length
movq (bufp), %rax
add n_misaligned_q, bufp
sub n_misaligned, len
.Lalign_loop:
crc32b %bl, crc_init_dw # compute crc32 of 1-byte
shr $8, tmp # get next byte
dec %bufp
crc32b %al, crc_init # compute crc32 of 1-byte
shr $8, %rax # get next byte
dec n_misaligned
jne .Lalign_loop
.Lproc_block:
.Laligned:
################################################################
## 2) PROCESS BLOCKS:
## 2) PROCESS BLOCK:
################################################################
## compute num of bytes to be processed
movq len, tmp # save num bytes in tmp
cmpq $128*24, len
cmp $128*24, len
jae .Lfull_block
.Lcontinue_block:
cmpq $SMALL_SIZE, len
jb .Lsmall
.Lpartial_block:
# Compute floor(len / 24) to get num qwords to process from each lane.
imul $2731, len, %eax # 2731 = ceil(2^16 / 24)
shr $16, %eax
jmp .Lcrc_3lanes
## len < 128*24
movq $2731, %rax # 2731 = ceil(2^16 / 24)
mul len_dw
shrq $16, %rax
## eax contains floor(bytes / 24) = num 24-byte chunks to do
## process rax 24-byte chunks (128 >= rax >= 0)
## compute end address of each block
## block 0 (base addr + RAX * 8)
## block 1 (base addr + RAX * 16)
## block 2 (base addr + RAX * 24)
lea (bufptmp, %rax, 8), block_0
lea (block_0, %rax, 8), block_1
lea (block_1, %rax, 8), block_2
xor crc1, crc1
xor crc2, crc2
## branch into array
leaq jump_table(%rip), %bufp
mov (%bufp,%rax,8), %bufp
JMP_NOSPEC bufp
################################################################
## 2a) PROCESS FULL BLOCKS:
################################################################
.Lfull_block:
movl $128,%eax
lea 128*8*2(block_0), block_1
lea 128*8*3(block_0), block_2
add $128*8*1, block_0
# Processing 128 qwords from each lane.
mov $128, %eax
################################################################
## 3) CRC each of three lanes:
################################################################
.Lcrc_3lanes:
xor crc1,crc1
xor crc2,crc2
mov %eax, chunk_bytes
shl $3, chunk_bytes # num bytes to process from each lane
sub $5, %eax # 4 for 4x_loop, 1 for special last iter
jl .Lcrc_3lanes_4x_done
# Fall through into top of crc array (crc_128)
# Unroll the loop by a factor of 4 to reduce the overhead of the loop
# bookkeeping instructions, which can compete with crc32q for the ALUs.
.Lcrc_3lanes_4x_loop:
crc32q (bufp), crc_init_q
crc32q (bufp,chunk_bytes_q), crc1
crc32q (bufp,chunk_bytes_q,2), crc2
crc32q 8(bufp), crc_init_q
crc32q 8(bufp,chunk_bytes_q), crc1
crc32q 8(bufp,chunk_bytes_q,2), crc2
crc32q 16(bufp), crc_init_q
crc32q 16(bufp,chunk_bytes_q), crc1
crc32q 16(bufp,chunk_bytes_q,2), crc2
crc32q 24(bufp), crc_init_q
crc32q 24(bufp,chunk_bytes_q), crc1
crc32q 24(bufp,chunk_bytes_q,2), crc2
add $32, bufp
sub $4, %eax
jge .Lcrc_3lanes_4x_loop
################################################################
## 3) CRC Array:
################################################################
.Lcrc_3lanes_4x_done:
add $4, %eax
jz .Lcrc_3lanes_last_qword
i=128
.rept 128-1
.altmacro
LABEL crc_ %i
.noaltmacro
ENDBR
crc32q -i*8(block_0), crc_init
crc32q -i*8(block_1), crc1
crc32q -i*8(block_2), crc2
i=(i-1)
.endr
.Lcrc_3lanes_1x_loop:
crc32q (bufp), crc_init_q
crc32q (bufp,chunk_bytes_q), crc1
crc32q (bufp,chunk_bytes_q,2), crc2
add $8, bufp
dec %eax
jnz .Lcrc_3lanes_1x_loop
.altmacro
LABEL crc_ %i
.noaltmacro
ENDBR
crc32q -i*8(block_0), crc_init
crc32q -i*8(block_1), crc1
# SKIP crc32 -i*8(block_2), crc2 ; Don't do this one yet
mov block_2, block_0
.Lcrc_3lanes_last_qword:
crc32q (bufp), crc_init_q
crc32q (bufp,chunk_bytes_q), crc1
# SKIP crc32q (bufp,chunk_bytes_q,2), crc2 ; Don't do this one yet
################################################################
## 4) Combine three results:
################################################################
lea (K_table-8)(%rip), %bufp # first entry is for idx 1
shlq $3, %rax # rax *= 8
pmovzxdq (%bufp,%rax), %xmm0 # 2 consts: K1:K2
leal (%eax,%eax,2), %eax # rax *= 3 (total *24)
subq %rax, tmp # tmp -= rax*24
lea (K_table-8)(%rip), %rax # first entry is for idx 1
pmovzxdq (%rax,chunk_bytes_q), %xmm0 # 2 consts: K1:K2
lea (chunk_bytes,chunk_bytes,2), %eax # chunk_bytes * 3
sub %eax, len # len -= chunk_bytes * 3
movq crc_init, %xmm1 # CRC for block 1
movq crc_init_q, %xmm1 # CRC for block 1
pclmulqdq $0x00, %xmm0, %xmm1 # Multiply by K2
movq crc1, %xmm2 # CRC for block 2
@ -230,103 +175,54 @@ LABEL crc_ %i
pxor %xmm2,%xmm1
movq %xmm1, %rax
xor -i*8(block_2), %rax
mov crc2, crc_init
crc32 %rax, crc_init
xor (bufp,chunk_bytes_q,2), %rax
mov crc2, crc_init_q
crc32 %rax, crc_init_q
lea 8(bufp,chunk_bytes_q,2), bufp
################################################################
## 5) Check for end:
## 5) If more blocks remain, goto (2):
################################################################
LABEL crc_ 0
ENDBR
mov tmp, len
cmp $128*24, tmp
cmp $128*24, len
jae .Lfull_block
cmp $24, tmp
jae .Lcontinue_block
.Lless_than_24:
shl $32-4, len_dw # less_than_16 expects length
# in upper 4 bits of len_dw
jnc .Lless_than_16
crc32q (bufptmp), crc_init
crc32q 8(bufptmp), crc_init
jz .Ldo_return
add $16, bufptmp
# len is less than 8 if we got here
# less_than_8 expects length in upper 3 bits of len_dw
# less_than_8_post_shl1 expects length = carryflag * 8 + len_dw[31:30]
shl $2, len_dw
jmp .Lless_than_8_post_shl1
cmp $SMALL_SIZE, len
jae .Lpartial_block
#######################################################################
## 6) LESS THAN 256-bytes REMAIN AT THIS POINT (8-bits of len are full)
## 6) Process any remainder without interleaving:
#######################################################################
.Lsmall:
shl $32-8, len_dw # Prepare len_dw for less_than_256
j=256
.rept 5 # j = {256, 128, 64, 32, 16}
.altmacro
LABEL less_than_ %j # less_than_j: Length should be in
# upper lg(j) bits of len_dw
j=(j/2)
shl $1, len_dw # Get next MSB
JNC_LESS_THAN %j
.noaltmacro
i=0
.rept (j/8)
crc32q i(bufptmp), crc_init # Compute crc32 of 8-byte data
i=i+8
.endr
jz .Ldo_return # Return if remaining length is zero
add $j, bufptmp # Advance buf
.endr
.Lless_than_8: # Length should be stored in
# upper 3 bits of len_dw
shl $1, len_dw
.Lless_than_8_post_shl1:
jnc .Lless_than_4
crc32l (bufptmp), crc_init_dw # CRC of 4 bytes
jz .Ldo_return # return if remaining data is zero
add $4, bufptmp
.Lless_than_4: # Length should be stored in
# upper 2 bits of len_dw
shl $1, len_dw
jnc .Lless_than_2
crc32w (bufptmp), crc_init_dw # CRC of 2 bytes
jz .Ldo_return # return if remaining data is zero
add $2, bufptmp
.Lless_than_2: # Length should be stored in the MSB
# of len_dw
shl $1, len_dw
jnc .Lless_than_1
crc32b (bufptmp), crc_init_dw # CRC of 1 byte
.Lless_than_1: # Length should be zero
.Ldo_return:
movq crc_init, %rax
popq %rsi
popq %rdi
popq %rbx
test len, len
jz .Ldone
mov len, %eax
shr $3, %eax
jz .Ldo_dword
.Ldo_qwords:
crc32q (bufp), crc_init_q
add $8, bufp
dec %eax
jnz .Ldo_qwords
.Ldo_dword:
test $4, len
jz .Ldo_word
crc32l (bufp), crc_init
add $4, bufp
.Ldo_word:
test $2, len
jz .Ldo_byte
crc32w (bufp), crc_init
add $2, bufp
.Ldo_byte:
test $1, len
jz .Ldone
crc32b (bufp), crc_init
.Ldone:
mov crc_init, %eax
RET
SYM_FUNC_END(crc_pcl)
.section .rodata, "a", @progbits
################################################################
## jump table Table is 129 entries x 2 bytes each
################################################################
.align 4
jump_table:
i=0
.rept 129
.altmacro
JMPTBL_ENTRY %i
.noaltmacro
i=i+1
.endr
################################################################
## PCLMULQDQ tables
## Table is 128 entries x 2 words (8 bytes) each

View File

@ -250,6 +250,7 @@ config CRYPTO_RSA
tristate "RSA (Rivest-Shamir-Adleman)"
select CRYPTO_AKCIPHER
select CRYPTO_MANAGER
select CRYPTO_SIG
select MPILIB
select ASN1
help
@ -290,19 +291,19 @@ config CRYPTO_ECDH
config CRYPTO_ECDSA
tristate "ECDSA (Elliptic Curve Digital Signature Algorithm)"
select CRYPTO_ECC
select CRYPTO_AKCIPHER
select CRYPTO_SIG
select ASN1
help
ECDSA (Elliptic Curve Digital Signature Algorithm) (FIPS 186,
ISO/IEC 14888-3)
using curves P-192, P-256, and P-384
using curves P-192, P-256, P-384 and P-521
Only signature verification is implemented.
config CRYPTO_ECRDSA
tristate "EC-RDSA (Elliptic Curve Russian Digital Signature Algorithm)"
select CRYPTO_ECC
select CRYPTO_AKCIPHER
select CRYPTO_SIG
select CRYPTO_STREEBOG
select OID_REGISTRY
select ASN1

View File

@ -48,11 +48,14 @@ rsa_generic-y += rsaprivkey.asn1.o
rsa_generic-y += rsa.o
rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
rsa_generic-y += rsassa-pkcs1.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
$(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h
$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h
$(obj)/ecdsa-x962.o: $(obj)/ecdsasignature.asn1.h
ecdsa_generic-y += ecdsa.o
ecdsa_generic-y += ecdsa-x962.o
ecdsa_generic-y += ecdsa-p1363.o
ecdsa_generic-y += ecdsasignature.asn1.o
obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o
@ -152,6 +155,8 @@ obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
CFLAGS_crc32c_generic.o += -DARCH=$(ARCH)
CFLAGS_crc32_generic.o += -DARCH=$(ARCH)
obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o

View File

@ -20,6 +20,19 @@
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
struct crypto_akcipher_sync_data {
struct crypto_akcipher *tfm;
const void *src;
void *dst;
unsigned int slen;
unsigned int dlen;
struct akcipher_request *req;
struct crypto_wait cwait;
struct scatterlist sg;
u8 *buf;
};
static int __maybe_unused crypto_akcipher_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
@ -126,10 +139,6 @@ int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
if (!alg->sign)
alg->sign = akcipher_default_op;
if (!alg->verify)
alg->verify = akcipher_default_op;
if (!alg->encrypt)
alg->encrypt = akcipher_default_op;
if (!alg->decrypt)
@ -158,7 +167,7 @@ int akcipher_register_instance(struct crypto_template *tmpl,
}
EXPORT_SYMBOL_GPL(akcipher_register_instance);
int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
static int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
{
unsigned int reqsize = crypto_akcipher_reqsize(data->tfm);
struct akcipher_request *req;
@ -167,10 +176,7 @@ int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
unsigned int len;
u8 *buf;
if (data->dst)
mlen = max(data->slen, data->dlen);
else
mlen = data->slen + data->dlen;
len = sizeof(*req) + reqsize + mlen;
if (len < mlen)
@ -189,8 +195,7 @@ int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
sg = &data->sg;
sg_init_one(sg, buf, mlen);
akcipher_request_set_crypt(req, sg, data->dst ? sg : NULL,
data->slen, data->dlen);
akcipher_request_set_crypt(req, sg, sg, data->slen, data->dlen);
crypto_init_wait(&data->cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
@ -198,18 +203,16 @@ int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
return 0;
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_prep);
int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err)
static int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data,
int err)
{
err = crypto_wait_req(err, &data->cwait);
if (data->dst)
memcpy(data->dst, data->buf, data->dlen);
data->dlen = data->req->dst_len;
kfree_sensitive(data->req);
return err;
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_post);
int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
const void *src, unsigned int slen,
@ -248,34 +251,5 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt);
static void crypto_exit_akcipher_ops_sig(struct crypto_tfm *tfm)
{
struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm);
crypto_free_akcipher(*ctx);
}
int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm)
{
struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *calg = tfm->__crt_alg;
struct crypto_akcipher *akcipher;
if (!crypto_mod_get(calg))
return -EAGAIN;
akcipher = crypto_create_tfm(calg, &crypto_akcipher_type);
if (IS_ERR(akcipher)) {
crypto_mod_put(calg);
return PTR_ERR(akcipher);
}
*ctx = akcipher;
tfm->exit = crypto_exit_akcipher_ops_sig;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_init_akcipher_ops_sig);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");

View File

@ -6,7 +6,6 @@
*/
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/fips.h>
@ -23,11 +22,6 @@
static LIST_HEAD(crypto_template_list);
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
#endif
static inline void crypto_check_module_sig(struct module *mod)
{
if (fips_enabled && mod && !module_sig_ok(mod))

View File

@ -83,13 +83,19 @@ software_key_determine_akcipher(const struct public_key *pkey,
if (strcmp(encoding, "pkcs1") == 0) {
*sig = op == kernel_pkey_sign ||
op == kernel_pkey_verify;
if (!hash_algo) {
if (!*sig) {
/*
* For encrypt/decrypt, hash_algo is not used
* but allowed to be set for historic reasons.
*/
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s)",
pkey->pkey_algo);
} else {
if (!hash_algo)
hash_algo = "none";
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)",
"pkcs1(%s,%s)",
pkey->pkey_algo, hash_algo);
}
return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
@ -104,7 +110,8 @@ software_key_determine_akcipher(const struct public_key *pkey,
return -EINVAL;
*sig = false;
} else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
if (strcmp(encoding, "x962") != 0)
if (strcmp(encoding, "x962") != 0 &&
strcmp(encoding, "p1363") != 0)
return -EINVAL;
/*
* ECDSA signatures are taken over a raw hash, so they don't
@ -124,6 +131,9 @@ software_key_determine_akcipher(const struct public_key *pkey,
strcmp(hash_algo, "sha3-384") != 0 &&
strcmp(hash_algo, "sha3-512") != 0)
return -EINVAL;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
encoding, pkey->pkey_algo);
return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
} else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) {
if (strcmp(encoding, "raw") != 0)
return -EINVAL;
@ -192,7 +202,9 @@ static int software_key_query(const struct kernel_pkey_params *params,
if (ret < 0)
goto error_free_tfm;
len = crypto_sig_maxsize(sig);
len = crypto_sig_keysize(sig);
info->max_sig_size = crypto_sig_maxsize(sig);
info->max_data_size = crypto_sig_digestsize(sig);
info->supported_ops = KEYCTL_SUPPORTS_VERIFY;
if (pkey->key_is_private)
@ -218,6 +230,8 @@ static int software_key_query(const struct kernel_pkey_params *params,
goto error_free_tfm;
len = crypto_akcipher_maxsize(tfm);
info->max_sig_size = len;
info->max_data_size = len;
info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
@ -225,40 +239,6 @@ static int software_key_query(const struct kernel_pkey_params *params,
}
info->key_size = len * 8;
if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
int slen = len;
/*
* ECDSA key sizes are much smaller than RSA, and thus could
* operate on (hashed) inputs that are larger than key size.
* For example SHA384-hashed input used with secp256r1
* based keys. Set max_data_size to be at least as large as
* the largest supported hash size (SHA512)
*/
info->max_data_size = 64;
/*
* Verify takes ECDSA-Sig (described in RFC 5480) as input,
* which is actually 2 'key_size'-bit integers encoded in
* ASN.1. Account for the ASN.1 encoding overhead here.
*
* NIST P192/256/384 may prepend a '0' to a coordinate to
* indicate a positive integer. NIST P521 never needs it.
*/
if (strcmp(pkey->pkey_algo, "ecdsa-nist-p521") != 0)
slen += 1;
/* Length of encoding the x & y coordinates */
slen = 2 * (slen + 2);
/*
* If coordinate encoding takes at least 128 bytes then an
* additional byte for length encoding is needed.
*/
info->max_sig_size = 1 + (slen >= 128) + 1 + slen;
} else {
info->max_data_size = len;
info->max_sig_size = len;
}
info->max_enc_size = len;
info->max_dec_size = len;
@ -323,7 +303,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
if (ret)
goto error_free_tfm;
ksz = crypto_sig_maxsize(sig);
ksz = crypto_sig_keysize(sig);
} else {
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm)) {

View File

@ -64,69 +64,6 @@ int query_asymmetric_key(const struct kernel_pkey_params *params,
}
EXPORT_SYMBOL_GPL(query_asymmetric_key);
/**
* encrypt_blob - Encrypt data using an asymmetric key
* @params: Various parameters
* @data: Data blob to be encrypted, length params->data_len
* @enc: Encrypted data buffer, length params->enc_len
*
* Encrypt the specified data blob using the private key specified by
* params->key. The encrypted data is wrapped in an encoding if
* params->encoding is specified (eg. "pkcs1").
*
* Returns the length of the data placed in the encrypted data buffer or an
* error.
*/
int encrypt_blob(struct kernel_pkey_params *params,
const void *data, void *enc)
{
params->op = kernel_pkey_encrypt;
return asymmetric_key_eds_op(params, data, enc);
}
EXPORT_SYMBOL_GPL(encrypt_blob);
/**
* decrypt_blob - Decrypt data using an asymmetric key
* @params: Various parameters
* @enc: Encrypted data to be decrypted, length params->enc_len
* @data: Decrypted data buffer, length params->data_len
*
* Decrypt the specified data blob using the private key specified by
* params->key. The decrypted data is wrapped in an encoding if
* params->encoding is specified (eg. "pkcs1").
*
* Returns the length of the data placed in the decrypted data buffer or an
* error.
*/
int decrypt_blob(struct kernel_pkey_params *params,
const void *enc, void *data)
{
params->op = kernel_pkey_decrypt;
return asymmetric_key_eds_op(params, enc, data);
}
EXPORT_SYMBOL_GPL(decrypt_blob);
/**
* create_signature - Sign some data using an asymmetric key
* @params: Various parameters
* @data: Data blob to be signed, length params->data_len
* @enc: Signature buffer, length params->enc_len
*
* Sign the specified data blob using the private key specified by params->key.
* The signature is wrapped in an encoding if params->encoding is specified
* (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
* passed through params->hash_algo (eg. "sha1").
*
* Returns the length of the data placed in the signature buffer or an error.
*/
int create_signature(struct kernel_pkey_params *params,
const void *data, void *enc)
{
params->op = kernel_pkey_sign;
return asymmetric_key_eds_op(params, data, enc);
}
EXPORT_SYMBOL_GPL(create_signature);
/**
* verify_signature - Initiate the use of an asymmetric key to verify a signature
* @key: The asymmetric key to verify against

View File

@ -59,6 +59,15 @@ static int crc32_update(struct shash_desc *desc, const u8 *data,
{
u32 *crcp = shash_desc_ctx(desc);
*crcp = crc32_le_base(*crcp, data, len);
return 0;
}
static int crc32_update_arch(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
u32 *crcp = shash_desc_ctx(desc);
*crcp = crc32_le(*crcp, data, len);
return 0;
}
@ -66,6 +75,13 @@ static int crc32_update(struct shash_desc *desc, const u8 *data,
/* No final XOR 0xFFFFFFFF, like crc32_le */
static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
put_unaligned_le32(crc32_le_base(*crcp, data, len), out);
return 0;
}
static int __crc32_finup_arch(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
put_unaligned_le32(crc32_le(*crcp, data, len), out);
return 0;
@ -77,6 +93,12 @@ static int crc32_finup(struct shash_desc *desc, const u8 *data,
return __crc32_finup(shash_desc_ctx(desc), data, len, out);
}
static int crc32_finup_arch(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32_finup_arch(shash_desc_ctx(desc), data, len, out);
}
static int crc32_final(struct shash_desc *desc, u8 *out)
{
u32 *crcp = shash_desc_ctx(desc);
@ -88,10 +110,16 @@ static int crc32_final(struct shash_desc *desc, u8 *out)
static int crc32_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len,
out);
return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, out);
}
static struct shash_alg alg = {
static int crc32_digest_arch(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32_finup_arch(crypto_shash_ctx(desc->tfm), data, len, out);
}
static struct shash_alg algs[] = {{
.setkey = crc32_setkey,
.init = crc32_init,
.update = crc32_update,
@ -100,26 +128,44 @@ static struct shash_alg alg = {
.digest = crc32_digest,
.descsize = sizeof(u32),
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32",
.cra_driver_name = "crc32-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_init = crc32_cra_init,
}
};
.base.cra_name = "crc32",
.base.cra_driver_name = "crc32-generic",
.base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = CHKSUM_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(u32),
.base.cra_module = THIS_MODULE,
.base.cra_init = crc32_cra_init,
}, {
.setkey = crc32_setkey,
.init = crc32_init,
.update = crc32_update_arch,
.final = crc32_final,
.finup = crc32_finup_arch,
.digest = crc32_digest_arch,
.descsize = sizeof(u32),
.digestsize = CHKSUM_DIGEST_SIZE,
.base.cra_name = "crc32",
.base.cra_driver_name = "crc32-" __stringify(ARCH),
.base.cra_priority = 150,
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = CHKSUM_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(u32),
.base.cra_module = THIS_MODULE,
.base.cra_init = crc32_cra_init,
}};
static int __init crc32_mod_init(void)
{
return crypto_register_shash(&alg);
/* register the arch flavor only if it differs from the generic one */
return crypto_register_shashes(algs, 1 + (&crc32_le != &crc32_le_base));
}
static void __exit crc32_mod_fini(void)
{
crypto_unregister_shash(&alg);
crypto_unregister_shashes(algs, 1 + (&crc32_le != &crc32_le_base));
}
subsys_initcall(crc32_mod_init);

View File

@ -85,6 +85,15 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = __crc32c_le_base(ctx->crc, data, length);
return 0;
}
static int chksum_update_arch(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = __crc32c_le(ctx->crc, data, length);
return 0;
}
@ -98,6 +107,13 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
}
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(~__crc32c_le_base(*crcp, data, len), out);
return 0;
}
static int __chksum_finup_arch(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
put_unaligned_le32(~__crc32c_le(*crcp, data, len), out);
return 0;
@ -111,6 +127,14 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
return __chksum_finup(&ctx->crc, data, len, out);
}
static int chksum_finup_arch(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup_arch(&ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
@ -119,6 +143,14 @@ static int chksum_digest(struct shash_desc *desc, const u8 *data,
return __chksum_finup(&mctx->key, data, length, out);
}
static int chksum_digest_arch(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
return __chksum_finup_arch(&mctx->key, data, length, out);
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
@ -127,7 +159,7 @@ static int crc32c_cra_init(struct crypto_tfm *tfm)
return 0;
}
static struct shash_alg alg = {
static struct shash_alg algs[] = {{
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
@ -136,26 +168,44 @@ static struct shash_alg alg = {
.finup = chksum_finup,
.digest = chksum_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = crc32c_cra_init,
}
};
.base.cra_name = "crc32c",
.base.cra_driver_name = "crc32c-generic",
.base.cra_priority = 100,
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = CHKSUM_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct chksum_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_init = crc32c_cra_init,
}, {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksum_update_arch,
.final = chksum_final,
.finup = chksum_finup_arch,
.digest = chksum_digest_arch,
.descsize = sizeof(struct chksum_desc_ctx),
.base.cra_name = "crc32c",
.base.cra_driver_name = "crc32c-" __stringify(ARCH),
.base.cra_priority = 150,
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = CHKSUM_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct chksum_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_init = crc32c_cra_init,
}};
static int __init crc32c_mod_init(void)
{
return crypto_register_shash(&alg);
/* register the arch flavor only if it differs from the generic one */
return crypto_register_shashes(algs, 1 + (&__crc32c_le != &__crc32c_le_base));
}
static void __exit crc32c_mod_fini(void)
{
crypto_unregister_shash(&alg);
crypto_unregister_shashes(algs, 1 + (&__crc32c_le != &__crc32c_le_base));
}
subsys_initcall(crc32c_mod_init);

View File

@ -101,6 +101,7 @@
#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string_choices.h>
/***************************************************************
* Backend cipher definitions available to DRBG
@ -1412,7 +1413,7 @@ static int drbg_generate(struct drbg_state *drbg,
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
pr_devel("DRBG: reseeding before generation (prediction "
"resistance: %s, state %s)\n",
drbg->pr ? "true" : "false",
str_true_false(drbg->pr),
(drbg->seeded == DRBG_SEED_STATE_FULL ?
"seeded" : "unseeded"));
/* 9.3.1 steps 7.1 through 7.3 */
@ -1562,7 +1563,7 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
bool reseed = true;
pr_devel("DRBG: Initializing DRBG core %d with prediction resistance "
"%s\n", coreref, pr ? "enabled" : "disabled");
"%s\n", coreref, str_enabled_disabled(pr));
mutex_lock(&drbg->drbg_mutex);
/* 9.1 step 1 is implicit with the selected DRBG type */

159
crypto/ecdsa-p1363.c Normal file
View File

@ -0,0 +1,159 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ECDSA P1363 signature encoding
*
* Copyright (c) 2024 Intel Corporation
*/
#include <linux/err.h>
#include <linux/module.h>
#include <crypto/algapi.h>
#include <crypto/sig.h>
#include <crypto/internal/ecc.h>
#include <crypto/internal/sig.h>
struct ecdsa_p1363_ctx {
struct crypto_sig *child;
};
static int ecdsa_p1363_verify(struct crypto_sig *tfm,
const void *src, unsigned int slen,
const void *digest, unsigned int dlen)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int keylen = crypto_sig_keysize(ctx->child);
unsigned int ndigits = DIV_ROUND_UP(keylen, sizeof(u64));
struct ecdsa_raw_sig sig;
if (slen != 2 * keylen)
return -EINVAL;
ecc_digits_from_bytes(src, keylen, sig.r, ndigits);
ecc_digits_from_bytes(src + keylen, keylen, sig.s, ndigits);
return crypto_sig_verify(ctx->child, &sig, sizeof(sig), digest, dlen);
}
static unsigned int ecdsa_p1363_key_size(struct crypto_sig *tfm)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
return crypto_sig_keysize(ctx->child);
}
static unsigned int ecdsa_p1363_max_size(struct crypto_sig *tfm)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
return 2 * crypto_sig_keysize(ctx->child);
}
static unsigned int ecdsa_p1363_digest_size(struct crypto_sig *tfm)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
return crypto_sig_digestsize(ctx->child);
}
static int ecdsa_p1363_set_pub_key(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
return crypto_sig_set_pubkey(ctx->child, key, keylen);
}
static int ecdsa_p1363_init_tfm(struct crypto_sig *tfm)
{
struct sig_instance *inst = sig_alg_instance(tfm);
struct crypto_sig_spawn *spawn = sig_instance_ctx(inst);
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
struct crypto_sig *child_tfm;
child_tfm = crypto_spawn_sig(spawn);
if (IS_ERR(child_tfm))
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
return 0;
}
static void ecdsa_p1363_exit_tfm(struct crypto_sig *tfm)
{
struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm);
crypto_free_sig(ctx->child);
}
static void ecdsa_p1363_free(struct sig_instance *inst)
{
struct crypto_sig_spawn *spawn = sig_instance_ctx(inst);
crypto_drop_sig(spawn);
kfree(inst);
}
static int ecdsa_p1363_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_sig_spawn *spawn;
struct sig_instance *inst;
struct sig_alg *ecdsa_alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = sig_instance_ctx(inst);
err = crypto_grab_sig(spawn, sig_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
ecdsa_alg = crypto_spawn_sig_alg(spawn);
err = -EINVAL;
if (strncmp(ecdsa_alg->base.cra_name, "ecdsa", 5) != 0)
goto err_free_inst;
err = crypto_inst_setname(sig_crypto_instance(inst), tmpl->name,
&ecdsa_alg->base);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = ecdsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct ecdsa_p1363_ctx);
inst->alg.init = ecdsa_p1363_init_tfm;
inst->alg.exit = ecdsa_p1363_exit_tfm;
inst->alg.verify = ecdsa_p1363_verify;
inst->alg.key_size = ecdsa_p1363_key_size;
inst->alg.max_size = ecdsa_p1363_max_size;
inst->alg.digest_size = ecdsa_p1363_digest_size;
inst->alg.set_pub_key = ecdsa_p1363_set_pub_key;
inst->free = ecdsa_p1363_free;
err = sig_register_instance(tmpl, inst);
if (err) {
err_free_inst:
ecdsa_p1363_free(inst);
}
return err;
}
struct crypto_template ecdsa_p1363_tmpl = {
.name = "p1363",
.create = ecdsa_p1363_create,
.module = THIS_MODULE,
};
MODULE_ALIAS_CRYPTO("p1363");

237
crypto/ecdsa-x962.c Normal file
View File

@ -0,0 +1,237 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* ECDSA X9.62 signature encoding
*
* Copyright (c) 2021 IBM Corporation
* Copyright (c) 2024 Intel Corporation
*/
#include <linux/asn1_decoder.h>
#include <linux/err.h>
#include <linux/module.h>
#include <crypto/algapi.h>
#include <crypto/sig.h>
#include <crypto/internal/ecc.h>
#include <crypto/internal/sig.h>
#include "ecdsasignature.asn1.h"
struct ecdsa_x962_ctx {
struct crypto_sig *child;
};
struct ecdsa_x962_signature_ctx {
struct ecdsa_raw_sig sig;
unsigned int ndigits;
};
/* Get the r and s components of a signature from the X.509 certificate. */
static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen,
unsigned int ndigits)
{
size_t bufsize = ndigits * sizeof(u64);
const char *d = value;
if (!value || !vlen || vlen > bufsize + 1)
return -EINVAL;
/*
* vlen may be 1 byte larger than bufsize due to a leading zero byte
* (necessary if the most significant bit of the integer is set).
*/
if (vlen > bufsize) {
/* skip over leading zeros that make 'value' a positive int */
if (*d == 0) {
vlen -= 1;
d++;
} else {
return -EINVAL;
}
}
ecc_digits_from_bytes(d, vlen, dest, ndigits);
return 0;
}
int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecdsa_x962_signature_ctx *sig_ctx = context;
return ecdsa_get_signature_rs(sig_ctx->sig.r, hdrlen, tag, value, vlen,
sig_ctx->ndigits);
}
int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecdsa_x962_signature_ctx *sig_ctx = context;
return ecdsa_get_signature_rs(sig_ctx->sig.s, hdrlen, tag, value, vlen,
sig_ctx->ndigits);
}
static int ecdsa_x962_verify(struct crypto_sig *tfm,
const void *src, unsigned int slen,
const void *digest, unsigned int dlen)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
struct ecdsa_x962_signature_ctx sig_ctx;
int err;
sig_ctx.ndigits = DIV_ROUND_UP(crypto_sig_keysize(ctx->child),
sizeof(u64));
err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen);
if (err < 0)
return err;
return crypto_sig_verify(ctx->child, &sig_ctx.sig, sizeof(sig_ctx.sig),
digest, dlen);
}
static unsigned int ecdsa_x962_key_size(struct crypto_sig *tfm)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
return crypto_sig_keysize(ctx->child);
}
static unsigned int ecdsa_x962_max_size(struct crypto_sig *tfm)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
struct sig_alg *alg = crypto_sig_alg(ctx->child);
int slen = crypto_sig_keysize(ctx->child);
/*
* Verify takes ECDSA-Sig-Value (described in RFC 5480) as input,
* which is actually 2 'key_size'-bit integers encoded in ASN.1.
* Account for the ASN.1 encoding overhead here.
*
* NIST P192/256/384 may prepend a '0' to a coordinate to indicate
* a positive integer. NIST P521 never needs it.
*/
if (strcmp(alg->base.cra_name, "ecdsa-nist-p521") != 0)
slen += 1;
/* Length of encoding the x & y coordinates */
slen = 2 * (slen + 2);
/*
* If coordinate encoding takes at least 128 bytes then an
* additional byte for length encoding is needed.
*/
return 1 + (slen >= 128) + 1 + slen;
}
static unsigned int ecdsa_x962_digest_size(struct crypto_sig *tfm)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
return crypto_sig_digestsize(ctx->child);
}
static int ecdsa_x962_set_pub_key(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
return crypto_sig_set_pubkey(ctx->child, key, keylen);
}
static int ecdsa_x962_init_tfm(struct crypto_sig *tfm)
{
struct sig_instance *inst = sig_alg_instance(tfm);
struct crypto_sig_spawn *spawn = sig_instance_ctx(inst);
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
struct crypto_sig *child_tfm;
child_tfm = crypto_spawn_sig(spawn);
if (IS_ERR(child_tfm))
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
return 0;
}
static void ecdsa_x962_exit_tfm(struct crypto_sig *tfm)
{
struct ecdsa_x962_ctx *ctx = crypto_sig_ctx(tfm);
crypto_free_sig(ctx->child);
}
static void ecdsa_x962_free(struct sig_instance *inst)
{
struct crypto_sig_spawn *spawn = sig_instance_ctx(inst);
crypto_drop_sig(spawn);
kfree(inst);
}
static int ecdsa_x962_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_sig_spawn *spawn;
struct sig_instance *inst;
struct sig_alg *ecdsa_alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = sig_instance_ctx(inst);
err = crypto_grab_sig(spawn, sig_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
ecdsa_alg = crypto_spawn_sig_alg(spawn);
err = -EINVAL;
if (strncmp(ecdsa_alg->base.cra_name, "ecdsa", 5) != 0)
goto err_free_inst;
err = crypto_inst_setname(sig_crypto_instance(inst), tmpl->name,
&ecdsa_alg->base);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = ecdsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct ecdsa_x962_ctx);
inst->alg.init = ecdsa_x962_init_tfm;
inst->alg.exit = ecdsa_x962_exit_tfm;
inst->alg.verify = ecdsa_x962_verify;
inst->alg.key_size = ecdsa_x962_key_size;
inst->alg.max_size = ecdsa_x962_max_size;
inst->alg.digest_size = ecdsa_x962_digest_size;
inst->alg.set_pub_key = ecdsa_x962_set_pub_key;
inst->free = ecdsa_x962_free;
err = sig_register_instance(tmpl, inst);
if (err) {
err_free_inst:
ecdsa_x962_free(inst);
}
return err;
}
struct crypto_template ecdsa_x962_tmpl = {
.name = "x962",
.create = ecdsa_x962_create,
.module = THIS_MODULE,
};
MODULE_ALIAS_CRYPTO("x962");

View File

@ -4,14 +4,11 @@
*/
#include <linux/module.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/ecc.h>
#include <crypto/akcipher.h>
#include <crypto/internal/sig.h>
#include <crypto/ecdh.h>
#include <linux/asn1_decoder.h>
#include <linux/scatterlist.h>
#include "ecdsasignature.asn1.h"
#include <crypto/sha2.h>
#include <crypto/sig.h>
struct ecc_ctx {
unsigned int curve_id;
@ -23,66 +20,6 @@ struct ecc_ctx {
struct ecc_point pub_key;
};
struct ecdsa_signature_ctx {
const struct ecc_curve *curve;
u64 r[ECC_MAX_DIGITS];
u64 s[ECC_MAX_DIGITS];
};
/*
* Get the r and s components of a signature from the X509 certificate.
*/
static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen, unsigned int ndigits)
{
size_t bufsize = ndigits * sizeof(u64);
ssize_t diff = vlen - bufsize;
const char *d = value;
if (!value || !vlen)
return -EINVAL;
/* diff = 0: 'value' has exacly the right size
* diff > 0: 'value' has too many bytes; one leading zero is allowed that
* makes the value a positive integer; error on more
* diff < 0: 'value' is missing leading zeros
*/
if (diff > 0) {
/* skip over leading zeros that make 'value' a positive int */
if (*d == 0) {
vlen -= 1;
diff--;
d++;
}
if (diff)
return -EINVAL;
}
if (-diff >= bufsize)
return -EINVAL;
ecc_digits_from_bytes(d, vlen, dest, ndigits);
return 0;
}
int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecdsa_signature_ctx *sig = context;
return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen,
sig->curve->g.ndigits);
}
int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecdsa_signature_ctx *sig = context;
return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen,
sig->curve->g.ndigits);
}
static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s)
{
const struct ecc_curve *curve = ctx->curve;
@ -126,46 +63,27 @@ static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, con
/*
* Verify an ECDSA signature.
*/
static int ecdsa_verify(struct akcipher_request *req)
static int ecdsa_verify(struct crypto_sig *tfm,
const void *src, unsigned int slen,
const void *digest, unsigned int dlen)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
size_t bufsize = ctx->curve->g.ndigits * sizeof(u64);
struct ecdsa_signature_ctx sig_ctx = {
.curve = ctx->curve,
};
const struct ecdsa_raw_sig *sig = src;
u64 hash[ECC_MAX_DIGITS];
unsigned char *buffer;
int ret;
if (unlikely(!ctx->pub_key_set))
return -EINVAL;
buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
if (slen != sizeof(*sig))
return -EINVAL;
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src, req->src_len + req->dst_len),
buffer, req->src_len + req->dst_len, 0);
if (bufsize > dlen)
bufsize = dlen;
ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx,
buffer, req->src_len);
if (ret < 0)
goto error;
ecc_digits_from_bytes(digest, bufsize, hash, ctx->curve->g.ndigits);
if (bufsize > req->dst_len)
bufsize = req->dst_len;
ecc_digits_from_bytes(buffer + req->src_len, bufsize,
hash, ctx->curve->g.ndigits);
ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s);
error:
kfree(buffer);
return ret;
return _ecdsa_verify(ctx, hash, sig->r, sig->s);
}
static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id)
@ -201,9 +119,10 @@ static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
* Set the public ECC key as defined by RFC5480 section 2.2 "Subject Public
* Key". Only the uncompressed format is supported.
*/
static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
static int ecdsa_set_pub_key(struct crypto_sig *tfm, const void *key,
unsigned int keylen)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int digitlen, ndigits;
const unsigned char *d = key;
int ret;
@ -237,31 +156,43 @@ static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsig
return ret;
}
static void ecdsa_exit_tfm(struct crypto_akcipher *tfm)
static void ecdsa_exit_tfm(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
ecdsa_ecc_ctx_deinit(ctx);
}
static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm)
static unsigned int ecdsa_key_size(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
return DIV_ROUND_UP(ctx->curve->nbits, 8);
}
static int ecdsa_nist_p521_init_tfm(struct crypto_akcipher *tfm)
static unsigned int ecdsa_digest_size(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
/*
* ECDSA key sizes are much smaller than RSA, and thus could
* operate on (hashed) inputs that are larger than the key size.
* E.g. SHA384-hashed input used with secp256r1 based keys.
* Return the largest supported hash size (SHA512).
*/
return SHA512_DIGEST_SIZE;
}
static int ecdsa_nist_p521_init_tfm(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P521);
}
static struct akcipher_alg ecdsa_nist_p521 = {
static struct sig_alg ecdsa_nist_p521 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.key_size = ecdsa_key_size,
.digest_size = ecdsa_digest_size,
.init = ecdsa_nist_p521_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
@ -273,17 +204,18 @@ static struct akcipher_alg ecdsa_nist_p521 = {
},
};
static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm)
static int ecdsa_nist_p384_init_tfm(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384);
}
static struct akcipher_alg ecdsa_nist_p384 = {
static struct sig_alg ecdsa_nist_p384 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.key_size = ecdsa_key_size,
.digest_size = ecdsa_digest_size,
.init = ecdsa_nist_p384_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
@ -295,17 +227,18 @@ static struct akcipher_alg ecdsa_nist_p384 = {
},
};
static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm)
static int ecdsa_nist_p256_init_tfm(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256);
}
static struct akcipher_alg ecdsa_nist_p256 = {
static struct sig_alg ecdsa_nist_p256 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.key_size = ecdsa_key_size,
.digest_size = ecdsa_digest_size,
.init = ecdsa_nist_p256_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
@ -317,17 +250,18 @@ static struct akcipher_alg ecdsa_nist_p256 = {
},
};
static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm)
static int ecdsa_nist_p192_init_tfm(struct crypto_sig *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecc_ctx *ctx = crypto_sig_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192);
}
static struct akcipher_alg ecdsa_nist_p192 = {
static struct sig_alg ecdsa_nist_p192 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.key_size = ecdsa_key_size,
.digest_size = ecdsa_digest_size,
.init = ecdsa_nist_p192_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
@ -345,42 +279,59 @@ static int __init ecdsa_init(void)
int ret;
/* NIST p192 may not be available in FIPS mode */
ret = crypto_register_akcipher(&ecdsa_nist_p192);
ret = crypto_register_sig(&ecdsa_nist_p192);
ecdsa_nist_p192_registered = ret == 0;
ret = crypto_register_akcipher(&ecdsa_nist_p256);
ret = crypto_register_sig(&ecdsa_nist_p256);
if (ret)
goto nist_p256_error;
ret = crypto_register_akcipher(&ecdsa_nist_p384);
ret = crypto_register_sig(&ecdsa_nist_p384);
if (ret)
goto nist_p384_error;
ret = crypto_register_akcipher(&ecdsa_nist_p521);
ret = crypto_register_sig(&ecdsa_nist_p521);
if (ret)
goto nist_p521_error;
ret = crypto_register_template(&ecdsa_x962_tmpl);
if (ret)
goto x962_tmpl_error;
ret = crypto_register_template(&ecdsa_p1363_tmpl);
if (ret)
goto p1363_tmpl_error;
return 0;
p1363_tmpl_error:
crypto_unregister_template(&ecdsa_x962_tmpl);
x962_tmpl_error:
crypto_unregister_sig(&ecdsa_nist_p521);
nist_p521_error:
crypto_unregister_akcipher(&ecdsa_nist_p384);
crypto_unregister_sig(&ecdsa_nist_p384);
nist_p384_error:
crypto_unregister_akcipher(&ecdsa_nist_p256);
crypto_unregister_sig(&ecdsa_nist_p256);
nist_p256_error:
if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192);
crypto_unregister_sig(&ecdsa_nist_p192);
return ret;
}
static void __exit ecdsa_exit(void)
{
crypto_unregister_template(&ecdsa_x962_tmpl);
crypto_unregister_template(&ecdsa_p1363_tmpl);
if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192);
crypto_unregister_akcipher(&ecdsa_nist_p256);
crypto_unregister_akcipher(&ecdsa_nist_p384);
crypto_unregister_akcipher(&ecdsa_nist_p521);
crypto_unregister_sig(&ecdsa_nist_p192);
crypto_unregister_sig(&ecdsa_nist_p256);
crypto_unregister_sig(&ecdsa_nist_p384);
crypto_unregister_sig(&ecdsa_nist_p521);
}
subsys_initcall(ecdsa_init);

View File

@ -18,12 +18,11 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <crypto/sig.h>
#include <crypto/streebog.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/ecc.h>
#include <crypto/akcipher.h>
#include <crypto/internal/sig.h>
#include <linux/oid_registry.h>
#include <linux/scatterlist.h>
#include "ecrdsa_params.asn1.h"
#include "ecrdsa_pub_key.asn1.h"
#include "ecrdsa_defs.h"
@ -68,13 +67,12 @@ static const struct ecc_curve *get_curve_by_oid(enum OID oid)
}
}
static int ecrdsa_verify(struct akcipher_request *req)
static int ecrdsa_verify(struct crypto_sig *tfm,
const void *src, unsigned int slen,
const void *digest, unsigned int dlen)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
unsigned char sig[ECRDSA_MAX_SIG_SIZE];
unsigned char digest[STREEBOG512_DIGEST_SIZE];
unsigned int ndigits = req->dst_len / sizeof(u64);
struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int ndigits = dlen / sizeof(u64);
u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */
u64 _r[ECRDSA_MAX_DIGITS]; /* -r */
u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */
@ -91,25 +89,19 @@ static int ecrdsa_verify(struct akcipher_request *req)
*/
if (!ctx->curve ||
!ctx->digest ||
!req->src ||
!src ||
!digest ||
!ctx->pub_key.x ||
req->dst_len != ctx->digest_len ||
req->dst_len != ctx->curve->g.ndigits * sizeof(u64) ||
dlen != ctx->digest_len ||
dlen != ctx->curve->g.ndigits * sizeof(u64) ||
ctx->pub_key.ndigits != ctx->curve->g.ndigits ||
req->dst_len * 2 != req->src_len ||
WARN_ON(req->src_len > sizeof(sig)) ||
WARN_ON(req->dst_len > sizeof(digest)))
dlen * 2 != slen ||
WARN_ON(slen > ECRDSA_MAX_SIG_SIZE) ||
WARN_ON(dlen > STREEBOG512_DIGEST_SIZE))
return -EBADMSG;
sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len),
sig, req->src_len);
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src,
req->src_len + req->dst_len),
digest, req->dst_len, req->src_len);
vli_from_be64(s, sig, ndigits);
vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits);
vli_from_be64(s, src, ndigits);
vli_from_be64(r, src + ndigits * sizeof(u64), ndigits);
/* Step 1: verify that 0 < r < q, 0 < s < q */
if (vli_is_zero(r, ndigits) ||
@ -188,10 +180,10 @@ static u8 *ecrdsa_unpack_u32(u32 *dst, void *src)
}
/* Parse BER encoded subjectPublicKey. */
static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
static int ecrdsa_set_pub_key(struct crypto_sig *tfm, const void *key,
unsigned int keylen)
{
struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int ndigits;
u32 algo, paramlen;
u8 *params;
@ -249,9 +241,9 @@ static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return 0;
}
static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm)
static unsigned int ecrdsa_key_size(struct crypto_sig *tfm)
{
struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm);
/*
* Verify doesn't need any output, so it's just informational
@ -260,13 +252,21 @@ static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm)
return ctx->pub_key.ndigits * sizeof(u64);
}
static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm)
static unsigned int ecrdsa_max_size(struct crypto_sig *tfm)
{
struct ecrdsa_ctx *ctx = crypto_sig_ctx(tfm);
return 2 * ctx->pub_key.ndigits * sizeof(u64);
}
static void ecrdsa_exit_tfm(struct crypto_sig *tfm)
{
}
static struct akcipher_alg ecrdsa_alg = {
static struct sig_alg ecrdsa_alg = {
.verify = ecrdsa_verify,
.set_pub_key = ecrdsa_set_pub_key,
.key_size = ecrdsa_key_size,
.max_size = ecrdsa_max_size,
.exit = ecrdsa_exit_tfm,
.base = {
@ -280,12 +280,12 @@ static struct akcipher_alg ecrdsa_alg = {
static int __init ecrdsa_mod_init(void)
{
return crypto_register_akcipher(&ecrdsa_alg);
return crypto_register_sig(&ecrdsa_alg);
}
static void __exit ecrdsa_mod_fini(void)
{
crypto_unregister_akcipher(&ecrdsa_alg);
crypto_unregister_sig(&ecrdsa_alg);
}
module_init(ecrdsa_mod_init);

View File

@ -22,8 +22,6 @@
#include <linux/sched.h>
#include <linux/types.h>
struct akcipher_request;
struct crypto_akcipher;
struct crypto_instance;
struct crypto_template;
@ -35,19 +33,6 @@ struct crypto_larval {
bool test_started;
};
struct crypto_akcipher_sync_data {
struct crypto_akcipher *tfm;
const void *src;
void *dst;
unsigned int slen;
unsigned int dlen;
struct akcipher_request *req;
struct crypto_wait cwait;
struct scatterlist sg;
u8 *buf;
};
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
@ -129,10 +114,6 @@ void *crypto_create_tfm_node(struct crypto_alg *alg,
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm);
int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data);
int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err);
int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
{

View File

@ -15,7 +15,7 @@
#define JENT_TEST_RINGBUFFER_MASK (JENT_TEST_RINGBUFFER_SIZE - 1)
struct jent_testing {
u32 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE];
u64 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE];
u32 rb_reader;
atomic_t rb_writer;
atomic_t jent_testing_enabled;
@ -72,7 +72,7 @@ static void jent_testing_fini(struct jent_testing *data, u32 boot)
pr_warn("Disabling data collection\n");
}
static bool jent_testing_store(struct jent_testing *data, u32 value,
static bool jent_testing_store(struct jent_testing *data, u64 value,
u32 *boot)
{
unsigned long flags;
@ -156,20 +156,20 @@ static int jent_testing_reader(struct jent_testing *data, u32 *boot,
}
/* We copy out word-wise */
if (outbuflen < sizeof(u32)) {
if (outbuflen < sizeof(u64)) {
spin_unlock_irqrestore(&data->lock, flags);
goto out;
}
memcpy(outbuf, &data->jent_testing_rb[data->rb_reader],
sizeof(u32));
sizeof(u64));
data->rb_reader++;
spin_unlock_irqrestore(&data->lock, flags);
outbuf += sizeof(u32);
outbuflen -= sizeof(u32);
collected_data += sizeof(u32);
outbuf += sizeof(u64);
outbuflen -= sizeof(u64);
collected_data += sizeof(u64);
}
out:
@ -189,16 +189,17 @@ static int jent_testing_extract_user(struct file *file, char __user *buf,
/*
* The intention of this interface is for collecting at least
* 1000 samples due to the SP800-90B requirements. So, we make no
* effort in avoiding allocating more memory that actually needed
* by the user. Hence, we allocate sufficient memory to always hold
* that amount of data.
* 1000 samples due to the SP800-90B requirements. However, due to
* memory and performance constraints, it is not desirable to allocate
* 8000 bytes of memory. Instead, we allocate space for only 125
* samples, which will allow the user to collect all 1000 samples using
* 8 calls to this interface.
*/
tmp = kmalloc(JENT_TEST_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL);
tmp = kmalloc(125 * sizeof(u64) + sizeof(u64), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
tmp_aligned = PTR_ALIGN(tmp, sizeof(u32));
tmp_aligned = PTR_ALIGN(tmp, sizeof(u64));
while (nbytes) {
int i;
@ -212,7 +213,7 @@ static int jent_testing_extract_user(struct file *file, char __user *buf,
schedule();
}
i = min_t(int, nbytes, JENT_TEST_RINGBUFFER_SIZE);
i = min_t(int, nbytes, 125 * sizeof(u64));
i = reader(tmp_aligned, i);
if (i <= 0) {
if (i < 0)
@ -251,7 +252,7 @@ static struct jent_testing jent_raw_hires = {
.read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait)
};
int jent_raw_hires_entropy_store(__u32 value)
int jent_raw_hires_entropy_store(__u64 value)
{
return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test);
}

View File

@ -22,11 +22,11 @@ extern struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
extern void jent_entropy_collector_free(struct rand_data *entropy_collector);
#ifdef CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE
int jent_raw_hires_entropy_store(__u32 value);
int jent_raw_hires_entropy_store(__u64 value);
void jent_testing_init(void);
void jent_testing_exit(void);
#else /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */
static inline int jent_raw_hires_entropy_store(__u32 value) { return 0; }
static inline int jent_raw_hires_entropy_store(__u64 value) { return 0; }
static inline void jent_testing_init(void) { }
static inline void jent_testing_exit(void) { }
#endif /* CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE */

View File

@ -117,8 +117,10 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
if (err == -EBUSY)
return -EAGAIN;
if (err == -EBUSY) {
/* try non-parallel mode */
return crypto_aead_encrypt(creq);
}
return err;
}
@ -166,8 +168,10 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
if (err == -EBUSY)
return -EAGAIN;
if (err == -EBUSY) {
/* try non-parallel mode */
return crypto_aead_decrypt(creq);
}
return err;
}

View File

@ -16,101 +16,6 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
/*
* Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
*/
static const u8 rsa_digest_info_md5[] = {
0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
0x05, 0x00, 0x04, 0x10
};
static const u8 rsa_digest_info_sha1[] = {
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2b, 0x0e, 0x03, 0x02, 0x1a,
0x05, 0x00, 0x04, 0x14
};
static const u8 rsa_digest_info_rmd160[] = {
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2b, 0x24, 0x03, 0x02, 0x01,
0x05, 0x00, 0x04, 0x14
};
static const u8 rsa_digest_info_sha224[] = {
0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
0x05, 0x00, 0x04, 0x1c
};
static const u8 rsa_digest_info_sha256[] = {
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
0x05, 0x00, 0x04, 0x20
};
static const u8 rsa_digest_info_sha384[] = {
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
0x05, 0x00, 0x04, 0x30
};
static const u8 rsa_digest_info_sha512[] = {
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
0x05, 0x00, 0x04, 0x40
};
static const u8 rsa_digest_info_sha3_256[] = {
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08,
0x05, 0x00, 0x04, 0x20
};
static const u8 rsa_digest_info_sha3_384[] = {
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09,
0x05, 0x00, 0x04, 0x30
};
static const u8 rsa_digest_info_sha3_512[] = {
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0A,
0x05, 0x00, 0x04, 0x40
};
static const struct rsa_asn1_template {
const char *name;
const u8 *data;
size_t size;
} rsa_asn1_templates[] = {
#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
_(md5),
_(sha1),
_(rmd160),
_(sha256),
_(sha384),
_(sha512),
_(sha224),
#undef _
#define _(X) { "sha3-" #X, rsa_digest_info_sha3_##X, sizeof(rsa_digest_info_sha3_##X) }
_(256),
_(384),
_(512),
#undef _
{ NULL }
};
static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
{
const struct rsa_asn1_template *p;
for (p = rsa_asn1_templates; p->name; p++)
if (strcmp(name, p->name) == 0)
return p;
return NULL;
}
struct pkcs1pad_ctx {
struct crypto_akcipher *child;
unsigned int key_size;
@ -118,7 +23,6 @@ struct pkcs1pad_ctx {
struct pkcs1pad_inst_ctx {
struct crypto_akcipher_spawn spawn;
const struct rsa_asn1_template *digest_info;
};
struct pkcs1pad_request {
@ -131,42 +35,16 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
int err;
ctx->key_size = 0;
err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
if (err)
return err;
/* Find out new modulus size from rsa implementation */
err = crypto_akcipher_maxsize(ctx->child);
if (err > PAGE_SIZE)
return -ENOTSUPP;
ctx->key_size = err;
return 0;
return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen);
}
static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
int err;
ctx->key_size = 0;
err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
if (err)
return err;
/* Find out new modulus size from rsa implementation */
err = crypto_akcipher_maxsize(ctx->child);
if (err > PAGE_SIZE)
return -ENOTSUPP;
ctx->key_size = err;
return 0;
return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen);
}
static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
@ -174,9 +52,9 @@ static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
/*
* The maximum destination buffer size for the encrypt/sign operations
* The maximum destination buffer size for the encrypt operation
* will be the same as for RSA, even though it's smaller for
* decrypt/verify.
* decrypt.
*/
return ctx->key_size;
@ -194,7 +72,7 @@ static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
sg_chain(sg, nsegs, next);
}
static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
static int pkcs1pad_encrypt_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
@ -233,14 +111,14 @@ out:
return err;
}
static void pkcs1pad_encrypt_sign_complete_cb(void *data, int err)
static void pkcs1pad_encrypt_complete_cb(void *data, int err)
{
struct akcipher_request *req = data;
if (err == -EINPROGRESS)
goto out;
err = pkcs1pad_encrypt_sign_complete(req, err);
err = pkcs1pad_encrypt_complete(req, err);
out:
akcipher_request_complete(req, err);
@ -281,7 +159,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
pkcs1pad_encrypt_complete_cb, req);
/* Reuse output buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
@ -289,7 +167,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
return pkcs1pad_encrypt_complete(req, err);
return err;
}
@ -394,195 +272,6 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
return err;
}
static int pkcs1pad_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
const struct rsa_asn1_template *digest_info = ictx->digest_info;
int err;
unsigned int ps_end, digest_info_size = 0;
if (!ctx->key_size)
return -EINVAL;
if (digest_info)
digest_info_size = digest_info->size;
if (req->src_len + digest_info_size > ctx->key_size - 11)
return -EOVERFLOW;
if (req->dst_len < ctx->key_size) {
req->dst_len = ctx->key_size;
return -EOVERFLOW;
}
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
GFP_KERNEL);
if (!req_ctx->in_buf)
return -ENOMEM;
ps_end = ctx->key_size - digest_info_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x01;
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
req_ctx->in_buf[ps_end] = 0x00;
if (digest_info)
memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
digest_info->size);
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
/* Reuse output buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_decrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
}
static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
const struct rsa_asn1_template *digest_info = ictx->digest_info;
const unsigned int sig_size = req->src_len;
const unsigned int digest_size = req->dst_len;
unsigned int dst_len;
unsigned int pos;
u8 *out_buf;
if (err)
goto done;
err = -EINVAL;
dst_len = req_ctx->child_req.dst_len;
if (dst_len < ctx->key_size - 1)
goto done;
out_buf = req_ctx->out_buf;
if (dst_len == ctx->key_size) {
if (out_buf[0] != 0x00)
/* Decrypted value had no leading 0 byte */
goto done;
dst_len--;
out_buf++;
}
err = -EBADMSG;
if (out_buf[0] != 0x01)
goto done;
for (pos = 1; pos < dst_len; pos++)
if (out_buf[pos] != 0xff)
break;
if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
goto done;
pos++;
if (digest_info) {
if (digest_info->size > dst_len - pos)
goto done;
if (crypto_memneq(out_buf + pos, digest_info->data,
digest_info->size))
goto done;
pos += digest_info->size;
}
err = 0;
if (digest_size != dst_len - pos) {
err = -EKEYREJECTED;
req->dst_len = dst_len - pos;
goto done;
}
/* Extract appended digest. */
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src, sig_size + digest_size),
req_ctx->out_buf + ctx->key_size,
digest_size, sig_size);
/* Do the actual verification step. */
if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
digest_size) != 0)
err = -EKEYREJECTED;
done:
kfree_sensitive(req_ctx->out_buf);
return err;
}
static void pkcs1pad_verify_complete_cb(void *data, int err)
{
struct akcipher_request *req = data;
if (err == -EINPROGRESS)
goto out;
err = pkcs1pad_verify_complete(req, err);
out:
akcipher_request_complete(req, err);
}
/*
* The verify operation is here for completeness similar to the verification
* defined in RFC2313 section 10.2 except that block type 0 is not accepted,
* as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
* retrieve the DigestInfo from a signature, instead the user is expected
* to call the sign operation to generate the expected signature and compare
* signatures instead of the message-digests.
*/
static int pkcs1pad_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
const unsigned int sig_size = req->src_len;
const unsigned int digest_size = req->dst_len;
int err;
if (WARN_ON(req->dst) || WARN_ON(!digest_size) ||
!ctx->key_size || sig_size != ctx->key_size)
return -EINVAL;
req_ctx->out_buf = kmalloc(ctx->key_size + digest_size, GFP_KERNEL);
if (!req_ctx->out_buf)
return -ENOMEM;
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
ctx->key_size, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_verify_complete_cb, req);
/* Reuse input buffer, output to a new buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req->src,
req_ctx->out_sg, sig_size, ctx->key_size);
err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_verify_complete(req, err);
return err;
}
static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
{
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
@ -624,7 +313,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
struct akcipher_instance *inst;
struct pkcs1pad_inst_ctx *ctx;
struct akcipher_alg *rsa_alg;
const char *hash_name;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask);
@ -650,8 +338,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
}
err = -ENAMETOOLONG;
hash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(hash_name)) {
if (snprintf(inst->alg.base.cra_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
@ -659,27 +345,8 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
if (snprintf(inst->alg.base.cra_driver_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
rsa_alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
rsa_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
} else {
ctx->digest_info = rsa_lookup_asn1(hash_name);
if (!ctx->digest_info) {
err = -EINVAL;
goto err_free_inst;
}
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
rsa_alg->base.cra_driver_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
}
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
@ -689,8 +356,6 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.encrypt = pkcs1pad_encrypt;
inst->alg.decrypt = pkcs1pad_decrypt;
inst->alg.sign = pkcs1pad_sign;
inst->alg.verify = pkcs1pad_verify;
inst->alg.set_pub_key = pkcs1pad_set_pub_key;
inst->alg.set_priv_key = pkcs1pad_set_priv_key;
inst->alg.max_size = pkcs1pad_get_max_size;

View File

@ -407,16 +407,25 @@ static int __init rsa_init(void)
return err;
err = crypto_register_template(&rsa_pkcs1pad_tmpl);
if (err) {
if (err)
goto err_unregister_rsa;
err = crypto_register_template(&rsassa_pkcs1_tmpl);
if (err)
goto err_unregister_rsa_pkcs1pad;
return 0;
err_unregister_rsa_pkcs1pad:
crypto_unregister_template(&rsa_pkcs1pad_tmpl);
err_unregister_rsa:
crypto_unregister_akcipher(&rsa);
return err;
}
return 0;
}
static void __exit rsa_exit(void)
{
crypto_unregister_template(&rsassa_pkcs1_tmpl);
crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
}

454
crypto/rsassa-pkcs1.c Normal file
View File

@ -0,0 +1,454 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RSA Signature Scheme with Appendix - PKCS #1 v1.5 (RFC 8017 sec 8.2)
*
* https://www.rfc-editor.org/rfc/rfc8017#section-8.2
*
* Copyright (c) 2015 - 2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sig.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <crypto/internal/sig.h>
/*
* Full Hash Prefix for EMSA-PKCS1-v1_5 encoding method (RFC 9580 table 24)
*
* RSA keys are usually much larger than the hash of the message to be signed.
* The hash is therefore prepended by the Full Hash Prefix and a 0xff padding.
* The Full Hash Prefix is an ASN.1 SEQUENCE containing the hash algorithm OID.
*
* https://www.rfc-editor.org/rfc/rfc9580#table-24
*/
static const u8 hash_prefix_none[] = { };
static const u8 hash_prefix_md5[] = {
0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, /* SEQUENCE (SEQUENCE (OID */
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* <algorithm>, */
0x05, 0x00, 0x04, 0x10 /* NULL), OCTET STRING <hash>) */
};
static const u8 hash_prefix_sha1[] = {
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2b, 0x0e, 0x03, 0x02, 0x1a,
0x05, 0x00, 0x04, 0x14
};
static const u8 hash_prefix_rmd160[] = {
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2b, 0x24, 0x03, 0x02, 0x01,
0x05, 0x00, 0x04, 0x14
};
static const u8 hash_prefix_sha224[] = {
0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
0x05, 0x00, 0x04, 0x1c
};
static const u8 hash_prefix_sha256[] = {
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
0x05, 0x00, 0x04, 0x20
};
static const u8 hash_prefix_sha384[] = {
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
0x05, 0x00, 0x04, 0x30
};
static const u8 hash_prefix_sha512[] = {
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
0x05, 0x00, 0x04, 0x40
};
static const u8 hash_prefix_sha3_256[] = {
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08,
0x05, 0x00, 0x04, 0x20
};
static const u8 hash_prefix_sha3_384[] = {
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09,
0x05, 0x00, 0x04, 0x30
};
static const u8 hash_prefix_sha3_512[] = {
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0a,
0x05, 0x00, 0x04, 0x40
};
static const struct hash_prefix {
const char *name;
const u8 *data;
size_t size;
} hash_prefixes[] = {
#define _(X) { #X, hash_prefix_##X, sizeof(hash_prefix_##X) }
_(none),
_(md5),
_(sha1),
_(rmd160),
_(sha256),
_(sha384),
_(sha512),
_(sha224),
#undef _
#define _(X) { "sha3-" #X, hash_prefix_sha3_##X, sizeof(hash_prefix_sha3_##X) }
_(256),
_(384),
_(512),
#undef _
{ NULL }
};
static const struct hash_prefix *rsassa_pkcs1_find_hash_prefix(const char *name)
{
const struct hash_prefix *p;
for (p = hash_prefixes; p->name; p++)
if (strcmp(name, p->name) == 0)
return p;
return NULL;
}
static bool rsassa_pkcs1_invalid_hash_len(unsigned int len,
const struct hash_prefix *p)
{
/*
* Legacy protocols such as TLS 1.1 or earlier and IKE version 1
* do not prepend a Full Hash Prefix to the hash. In that case,
* the size of the Full Hash Prefix is zero.
*/
if (p->data == hash_prefix_none)
return false;
/*
* The final byte of the Full Hash Prefix encodes the hash length.
*
* This needs to be revisited should hash algorithms with more than
* 1016 bits (127 bytes * 8) ever be added. The length would then
* be encoded into more than one byte by ASN.1.
*/
static_assert(HASH_MAX_DIGESTSIZE <= 127);
return len != p->data[p->size - 1];
}
struct rsassa_pkcs1_ctx {
struct crypto_akcipher *child;
unsigned int key_size;
};
struct rsassa_pkcs1_inst_ctx {
struct crypto_akcipher_spawn spawn;
const struct hash_prefix *hash_prefix;
};
static int rsassa_pkcs1_sign(struct crypto_sig *tfm,
const void *src, unsigned int slen,
void *dst, unsigned int dlen)
{
struct sig_instance *inst = sig_alg_instance(tfm);
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
const struct hash_prefix *hash_prefix = ictx->hash_prefix;
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
struct scatterlist in_sg[3], out_sg;
struct crypto_wait cwait;
unsigned int pad_len;
unsigned int ps_end;
unsigned int len;
u8 *in_buf;
int err;
if (!ctx->key_size)
return -EINVAL;
if (dlen < ctx->key_size)
return -EOVERFLOW;
if (rsassa_pkcs1_invalid_hash_len(slen, hash_prefix))
return -EINVAL;
if (slen + hash_prefix->size > ctx->key_size - 11)
return -EOVERFLOW;
pad_len = ctx->key_size - slen - hash_prefix->size - 1;
child_req = kmalloc(sizeof(*child_req) + child_reqsize + pad_len,
GFP_KERNEL);
if (!child_req)
return -ENOMEM;
/* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */
in_buf = (u8 *)(child_req + 1) + child_reqsize;
ps_end = pad_len - 1;
in_buf[0] = 0x01;
memset(in_buf + 1, 0xff, ps_end - 1);
in_buf[ps_end] = 0x00;
/* RFC 8017 sec 8.2.1 step 2 - RSA signature */
crypto_init_wait(&cwait);
sg_init_table(in_sg, 3);
sg_set_buf(&in_sg[0], in_buf, pad_len);
sg_set_buf(&in_sg[1], hash_prefix->data, hash_prefix->size);
sg_set_buf(&in_sg[2], src, slen);
sg_init_one(&out_sg, dst, dlen);
akcipher_request_set_tfm(child_req, ctx->child);
akcipher_request_set_crypt(child_req, in_sg, &out_sg,
ctx->key_size - 1, dlen);
akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &cwait);
err = crypto_akcipher_decrypt(child_req);
err = crypto_wait_req(err, &cwait);
if (err)
return err;
len = child_req->dst_len;
pad_len = ctx->key_size - len;
/* Four billion to one */
if (unlikely(pad_len)) {
memmove(dst + pad_len, dst, len);
memset(dst, 0, pad_len);
}
return 0;
}
static int rsassa_pkcs1_verify(struct crypto_sig *tfm,
const void *src, unsigned int slen,
const void *digest, unsigned int dlen)
{
struct sig_instance *inst = sig_alg_instance(tfm);
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
const struct hash_prefix *hash_prefix = ictx->hash_prefix;
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child);
struct akcipher_request *child_req __free(kfree_sensitive) = NULL;
struct scatterlist in_sg, out_sg;
struct crypto_wait cwait;
unsigned int dst_len;
unsigned int pos;
u8 *out_buf;
int err;
/* RFC 8017 sec 8.2.2 step 1 - length checking */
if (!ctx->key_size ||
slen != ctx->key_size ||
rsassa_pkcs1_invalid_hash_len(dlen, hash_prefix))
return -EINVAL;
/* RFC 8017 sec 8.2.2 step 2 - RSA verification */
child_req = kmalloc(sizeof(*child_req) + child_reqsize + ctx->key_size,
GFP_KERNEL);
if (!child_req)
return -ENOMEM;
out_buf = (u8 *)(child_req + 1) + child_reqsize;
crypto_init_wait(&cwait);
sg_init_one(&in_sg, src, slen);
sg_init_one(&out_sg, out_buf, ctx->key_size);
akcipher_request_set_tfm(child_req, ctx->child);
akcipher_request_set_crypt(child_req, &in_sg, &out_sg,
slen, ctx->key_size);
akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &cwait);
err = crypto_akcipher_encrypt(child_req);
err = crypto_wait_req(err, &cwait);
if (err)
return err;
/* RFC 8017 sec 8.2.2 step 3 - EMSA-PKCS1-v1_5 encoding verification */
dst_len = child_req->dst_len;
if (dst_len < ctx->key_size - 1)
return -EINVAL;
if (dst_len == ctx->key_size) {
if (out_buf[0] != 0x00)
/* Encrypted value had no leading 0 byte */
return -EINVAL;
dst_len--;
out_buf++;
}
if (out_buf[0] != 0x01)
return -EBADMSG;
for (pos = 1; pos < dst_len; pos++)
if (out_buf[pos] != 0xff)
break;
if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
return -EBADMSG;
pos++;
if (hash_prefix->size > dst_len - pos)
return -EBADMSG;
if (crypto_memneq(out_buf + pos, hash_prefix->data, hash_prefix->size))
return -EBADMSG;
pos += hash_prefix->size;
/* RFC 8017 sec 8.2.2 step 4 - comparison of digest with out_buf */
if (dlen != dst_len - pos)
return -EKEYREJECTED;
if (memcmp(digest, out_buf + pos, dlen) != 0)
return -EKEYREJECTED;
return 0;
}
static unsigned int rsassa_pkcs1_key_size(struct crypto_sig *tfm)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
return ctx->key_size;
}
static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen);
}
static int rsassa_pkcs1_set_priv_key(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen);
}
static int rsassa_pkcs1_init_tfm(struct crypto_sig *tfm)
{
struct sig_instance *inst = sig_alg_instance(tfm);
struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst);
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
struct crypto_akcipher *child_tfm;
child_tfm = crypto_spawn_akcipher(&ictx->spawn);
if (IS_ERR(child_tfm))
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
return 0;
}
static void rsassa_pkcs1_exit_tfm(struct crypto_sig *tfm)
{
struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm);
crypto_free_akcipher(ctx->child);
}
static void rsassa_pkcs1_free(struct sig_instance *inst)
{
struct rsassa_pkcs1_inst_ctx *ctx = sig_instance_ctx(inst);
struct crypto_akcipher_spawn *spawn = &ctx->spawn;
crypto_drop_akcipher(spawn);
kfree(inst);
}
static int rsassa_pkcs1_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct rsassa_pkcs1_inst_ctx *ctx;
struct akcipher_alg *rsa_alg;
struct sig_instance *inst;
const char *hash_name;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = sig_instance_ctx(inst);
err = crypto_grab_akcipher(&ctx->spawn, sig_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) {
err = -EINVAL;
goto err_free_inst;
}
hash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(hash_name)) {
err = PTR_ERR(hash_name);
goto err_free_inst;
}
ctx->hash_prefix = rsassa_pkcs1_find_hash_prefix(hash_name);
if (!ctx->hash_prefix) {
err = -EINVAL;
goto err_free_inst;
}
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"pkcs1(%s,%s)", rsa_alg->base.cra_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"pkcs1(%s,%s)", rsa_alg->base.cra_driver_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct rsassa_pkcs1_ctx);
inst->alg.init = rsassa_pkcs1_init_tfm;
inst->alg.exit = rsassa_pkcs1_exit_tfm;
inst->alg.sign = rsassa_pkcs1_sign;
inst->alg.verify = rsassa_pkcs1_verify;
inst->alg.key_size = rsassa_pkcs1_key_size;
inst->alg.set_pub_key = rsassa_pkcs1_set_pub_key;
inst->alg.set_priv_key = rsassa_pkcs1_set_priv_key;
inst->free = rsassa_pkcs1_free;
err = sig_register_instance(tmpl, inst);
if (err) {
err_free_inst:
rsassa_pkcs1_free(inst);
}
return err;
}
struct crypto_template rsassa_pkcs1_tmpl = {
.name = "pkcs1",
.create = rsassa_pkcs1_create,
.module = THIS_MODULE,
};
MODULE_ALIAS_CRYPTO("pkcs1");

View File

@ -5,12 +5,10 @@
* Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
*/
#include <crypto/akcipher.h>
#include <crypto/internal/sig.h>
#include <linux/cryptouser.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <net/netlink.h>
@ -19,16 +17,35 @@
#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e
static const struct crypto_type crypto_sig_type;
static void crypto_sig_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_sig *sig = __crypto_sig_tfm(tfm);
struct sig_alg *alg = crypto_sig_alg(sig);
alg->exit(sig);
}
static int crypto_sig_init_tfm(struct crypto_tfm *tfm)
{
if (tfm->__crt_alg->cra_type != &crypto_sig_type)
return crypto_init_akcipher_ops_sig(tfm);
struct crypto_sig *sig = __crypto_sig_tfm(tfm);
struct sig_alg *alg = crypto_sig_alg(sig);
if (alg->exit)
sig->base.exit = crypto_sig_exit_tfm;
if (alg->init)
return alg->init(sig);
return 0;
}
static void crypto_sig_free_instance(struct crypto_instance *inst)
{
struct sig_instance *sig = sig_instance(inst);
sig->free(sig);
}
static void __maybe_unused crypto_sig_show(struct seq_file *m,
struct crypto_alg *alg)
{
@ -38,16 +55,17 @@ static void __maybe_unused crypto_sig_show(struct seq_file *m,
static int __maybe_unused crypto_sig_report(struct sk_buff *skb,
struct crypto_alg *alg)
{
struct crypto_report_akcipher rsig = {};
struct crypto_report_sig rsig = {};
strscpy(rsig.type, "sig", sizeof(rsig.type));
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig);
return nla_put(skb, CRYPTOCFGA_REPORT_SIG, sizeof(rsig), &rsig);
}
static const struct crypto_type crypto_sig_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_sig_init_tfm,
.free = crypto_sig_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_sig_show,
#endif
@ -66,74 +84,95 @@ struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alloc_sig);
int crypto_sig_maxsize(struct crypto_sig *tfm)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
return crypto_akcipher_maxsize(*ctx);
}
EXPORT_SYMBOL_GPL(crypto_sig_maxsize);
int crypto_sig_sign(struct crypto_sig *tfm,
static int sig_default_sign(struct crypto_sig *tfm,
const void *src, unsigned int slen,
void *dst, unsigned int dlen)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
struct crypto_akcipher_sync_data data = {
.tfm = *ctx,
.src = src,
.dst = dst,
.slen = slen,
.dlen = dlen,
};
return crypto_akcipher_sync_prep(&data) ?:
crypto_akcipher_sync_post(&data,
crypto_akcipher_sign(data.req));
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(crypto_sig_sign);
int crypto_sig_verify(struct crypto_sig *tfm,
static int sig_default_verify(struct crypto_sig *tfm,
const void *src, unsigned int slen,
const void *digest, unsigned int dlen)
const void *dst, unsigned int dlen)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
struct crypto_akcipher_sync_data data = {
.tfm = *ctx,
.src = src,
.slen = slen,
.dlen = dlen,
};
return -ENOSYS;
}
static int sig_default_set_key(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
return -ENOSYS;
}
static int sig_prepare_alg(struct sig_alg *alg)
{
struct crypto_alg *base = &alg->base;
if (!alg->sign)
alg->sign = sig_default_sign;
if (!alg->verify)
alg->verify = sig_default_verify;
if (!alg->set_priv_key)
alg->set_priv_key = sig_default_set_key;
if (!alg->set_pub_key)
return -EINVAL;
if (!alg->key_size)
return -EINVAL;
if (!alg->max_size)
alg->max_size = alg->key_size;
if (!alg->digest_size)
alg->digest_size = alg->key_size;
base->cra_type = &crypto_sig_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SIG;
return 0;
}
int crypto_register_sig(struct sig_alg *alg)
{
struct crypto_alg *base = &alg->base;
int err;
err = crypto_akcipher_sync_prep(&data);
err = sig_prepare_alg(alg);
if (err)
return err;
memcpy(data.buf + slen, digest, dlen);
return crypto_akcipher_sync_post(&data,
crypto_akcipher_verify(data.req));
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_sig_verify);
EXPORT_SYMBOL_GPL(crypto_register_sig);
int crypto_sig_set_pubkey(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
void crypto_unregister_sig(struct sig_alg *alg)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
return crypto_akcipher_set_pub_key(*ctx, key, keylen);
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_sig_set_pubkey);
EXPORT_SYMBOL_GPL(crypto_unregister_sig);
int crypto_sig_set_privkey(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
int sig_register_instance(struct crypto_template *tmpl,
struct sig_instance *inst)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
int err;
return crypto_akcipher_set_priv_key(*ctx, key, keylen);
if (WARN_ON(!inst->free))
return -EINVAL;
err = sig_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, sig_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(crypto_sig_set_privkey);
EXPORT_SYMBOL_GPL(sig_register_instance);
int crypto_grab_sig(struct crypto_sig_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_sig_type;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_sig);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Public Key Signature Algorithms");

View File

@ -33,6 +33,7 @@
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/acompress.h>
#include <crypto/sig.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
@ -131,6 +132,11 @@ struct akcipher_test_suite {
unsigned int count;
};
struct sig_test_suite {
const struct sig_testvec *vecs;
unsigned int count;
};
struct kpp_test_suite {
const struct kpp_testvec *vecs;
unsigned int count;
@ -151,6 +157,7 @@ struct alg_test_desc {
struct cprng_test_suite cprng;
struct drbg_test_suite drbg;
struct akcipher_test_suite akcipher;
struct sig_test_suite sig;
struct kpp_test_suite kpp;
} suite;
};
@ -4123,11 +4130,9 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[3];
const char *m, *c;
unsigned int m_size, c_size;
const char *op;
u8 *key, *ptr;
struct scatterlist src, dst, src_tab[2];
const char *c;
unsigned int c_size;
if (testmgr_alloc_buf(xbuf))
return err;
@ -4138,92 +4143,53 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
crypto_init_wait(&wait);
key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
GFP_KERNEL);
if (!key)
goto free_req;
memcpy(key, vecs->key, vecs->key_len);
ptr = key + vecs->key_len;
ptr = test_pack_u32(ptr, vecs->algo);
ptr = test_pack_u32(ptr, vecs->param_len);
memcpy(ptr, vecs->params, vecs->param_len);
if (vecs->public_key_vec)
err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len);
err = crypto_akcipher_set_pub_key(tfm, vecs->key,
vecs->key_len);
else
err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
err = crypto_akcipher_set_priv_key(tfm, vecs->key,
vecs->key_len);
if (err)
goto free_key;
goto free_req;
/*
* First run test which do not require a private key, such as
* encrypt or verify.
*/
/* First run encrypt test which does not require a private key */
err = -ENOMEM;
out_len_max = crypto_akcipher_maxsize(tfm);
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_enc)
goto free_key;
goto free_req;
if (!vecs->siggen_sigver_test) {
m = vecs->m;
m_size = vecs->m_size;
c = vecs->c;
c_size = vecs->c_size;
op = "encrypt";
} else {
/* Swap args so we could keep plaintext (digest)
* in vecs->m, and cooked signature in vecs->c.
*/
m = vecs->c; /* signature */
m_size = vecs->c_size;
c = vecs->m; /* digest */
c_size = vecs->m_size;
op = "verify";
}
err = -E2BIG;
if (WARN_ON(m_size > PAGE_SIZE))
if (WARN_ON(vecs->m_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[0], m, m_size);
memcpy(xbuf[0], vecs->m, vecs->m_size);
sg_init_table(src_tab, 3);
sg_init_table(src_tab, 2);
sg_set_buf(&src_tab[0], xbuf[0], 8);
sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
if (vecs->siggen_sigver_test) {
if (WARN_ON(c_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[1], c, c_size);
sg_set_buf(&src_tab[2], xbuf[1], c_size);
akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
} else {
sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
sg_init_one(&dst, outbuf_enc, out_len_max);
akcipher_request_set_crypt(req, src_tab, &dst, m_size,
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max);
}
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
err = crypto_wait_req(vecs->siggen_sigver_test ?
/* Run asymmetric signature verification */
crypto_akcipher_verify(req) :
/* Run asymmetric encrypt */
crypto_akcipher_encrypt(req), &wait);
err = crypto_wait_req(crypto_akcipher_encrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
goto free_all;
}
if (!vecs->siggen_sigver_test && c) {
if (c) {
if (req->dst_len != c_size) {
pr_err("alg: akcipher: %s test failed. Invalid output len\n",
op);
pr_err("alg: akcipher: encrypt test failed. Invalid output len\n");
err = -EINVAL;
goto free_all;
}
/* verify that encrypted message is equal to expected */
if (memcmp(c, outbuf_enc, c_size) != 0) {
pr_err("alg: akcipher: %s test failed. Invalid output\n",
op);
pr_err("alg: akcipher: encrypt test failed. Invalid output\n");
hexdump(outbuf_enc, c_size);
err = -EINVAL;
goto free_all;
@ -4231,7 +4197,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
}
/*
* Don't invoke (decrypt or sign) test which require a private key
* Don't invoke decrypt test which requires a private key
* for vectors with only a public key.
*/
if (vecs->public_key_vec) {
@ -4244,13 +4210,12 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
goto free_all;
}
if (!vecs->siggen_sigver_test && !c) {
if (!c) {
c = outbuf_enc;
c_size = req->dst_len;
}
err = -E2BIG;
op = vecs->siggen_sigver_test ? "sign" : "decrypt";
if (WARN_ON(c_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[0], c, c_size);
@ -4260,34 +4225,29 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
crypto_init_wait(&wait);
akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max);
err = crypto_wait_req(vecs->siggen_sigver_test ?
/* Run asymmetric signature generation */
crypto_akcipher_sign(req) :
/* Run asymmetric decrypt */
crypto_akcipher_decrypt(req), &wait);
err = crypto_wait_req(crypto_akcipher_decrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
goto free_all;
}
out_len = req->dst_len;
if (out_len < m_size) {
pr_err("alg: akcipher: %s test failed. Invalid output len %u\n",
op, out_len);
if (out_len < vecs->m_size) {
pr_err("alg: akcipher: decrypt test failed. Invalid output len %u\n",
out_len);
err = -EINVAL;
goto free_all;
}
/* verify that decrypted message is equal to the original msg */
if (memchr_inv(outbuf_dec, 0, out_len - m_size) ||
memcmp(m, outbuf_dec + out_len - m_size, m_size)) {
pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) ||
memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size,
vecs->m_size)) {
pr_err("alg: akcipher: decrypt test failed. Invalid output\n");
hexdump(outbuf_dec, out_len);
err = -EINVAL;
}
free_all:
kfree(outbuf_dec);
kfree(outbuf_enc);
free_key:
kfree(key);
free_req:
akcipher_request_free(req);
free_xbuf:
@ -4337,6 +4297,113 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
return err;
}
static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs)
{
u8 *ptr, *key __free(kfree);
int err, sig_size;
key = kmalloc(vecs->key_len + 2 * sizeof(u32) + vecs->param_len,
GFP_KERNEL);
if (!key)
return -ENOMEM;
/* ecrdsa expects additional parameters appended to the key */
memcpy(key, vecs->key, vecs->key_len);
ptr = key + vecs->key_len;
ptr = test_pack_u32(ptr, vecs->algo);
ptr = test_pack_u32(ptr, vecs->param_len);
memcpy(ptr, vecs->params, vecs->param_len);
if (vecs->public_key_vec)
err = crypto_sig_set_pubkey(tfm, key, vecs->key_len);
else
err = crypto_sig_set_privkey(tfm, key, vecs->key_len);
if (err)
return err;
/*
* Run asymmetric signature verification first
* (which does not require a private key)
*/
err = crypto_sig_verify(tfm, vecs->c, vecs->c_size,
vecs->m, vecs->m_size);
if (err) {
pr_err("alg: sig: verify test failed: err %d\n", err);
return err;
}
/*
* Don't invoke sign test (which requires a private key)
* for vectors with only a public key.
*/
if (vecs->public_key_vec)
return 0;
sig_size = crypto_sig_keysize(tfm);
if (sig_size < vecs->c_size) {
pr_err("alg: sig: invalid maxsize %u\n", sig_size);
return -EINVAL;
}
u8 *sig __free(kfree) = kzalloc(sig_size, GFP_KERNEL);
if (!sig)
return -ENOMEM;
/* Run asymmetric signature generation */
err = crypto_sig_sign(tfm, vecs->m, vecs->m_size, sig, sig_size);
if (err) {
pr_err("alg: sig: sign test failed: err %d\n", err);
return err;
}
/* Verify that generated signature equals cooked signature */
if (memcmp(sig, vecs->c, vecs->c_size) ||
memchr_inv(sig + vecs->c_size, 0, sig_size - vecs->c_size)) {
pr_err("alg: sig: sign test failed: invalid output\n");
hexdump(sig, sig_size);
return -EINVAL;
}
return 0;
}
static int test_sig(struct crypto_sig *tfm, const char *alg,
const struct sig_testvec *vecs, unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_sig_tfm(tfm));
int ret, i;
for (i = 0; i < tcount; i++) {
ret = test_sig_one(tfm, vecs++);
if (ret) {
pr_err("alg: sig: test %d failed for %s: err %d\n",
i + 1, algo, ret);
return ret;
}
}
return 0;
}
static int alg_test_sig(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_sig *tfm;
int err = 0;
tfm = crypto_alloc_sig(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: sig: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
if (desc->suite.sig.vecs)
err = test_sig(tfm, desc->alg, desc->suite.sig.vecs,
desc->suite.sig.count);
crypto_free_sig(tfm);
return err;
}
static int alg_test_null(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
@ -5126,36 +5193,36 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ecdsa-nist-p192",
.test = alg_test_akcipher,
.test = alg_test_sig,
.suite = {
.akcipher = __VECS(ecdsa_nist_p192_tv_template)
.sig = __VECS(ecdsa_nist_p192_tv_template)
}
}, {
.alg = "ecdsa-nist-p256",
.test = alg_test_akcipher,
.test = alg_test_sig,
.fips_allowed = 1,
.suite = {
.akcipher = __VECS(ecdsa_nist_p256_tv_template)
.sig = __VECS(ecdsa_nist_p256_tv_template)
}
}, {
.alg = "ecdsa-nist-p384",
.test = alg_test_akcipher,
.test = alg_test_sig,
.fips_allowed = 1,
.suite = {
.akcipher = __VECS(ecdsa_nist_p384_tv_template)
.sig = __VECS(ecdsa_nist_p384_tv_template)
}
}, {
.alg = "ecdsa-nist-p521",
.test = alg_test_akcipher,
.test = alg_test_sig,
.fips_allowed = 1,
.suite = {
.akcipher = __VECS(ecdsa_nist_p521_tv_template)
.sig = __VECS(ecdsa_nist_p521_tv_template)
}
}, {
.alg = "ecrdsa",
.test = alg_test_akcipher,
.test = alg_test_sig,
.suite = {
.akcipher = __VECS(ecrdsa_tv_template)
.sig = __VECS(ecrdsa_tv_template)
}
}, {
.alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)",
@ -5447,6 +5514,24 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.hash = __VECS(nhpoly1305_tv_template)
}
}, {
.alg = "p1363(ecdsa-nist-p192)",
.test = alg_test_null,
}, {
.alg = "p1363(ecdsa-nist-p256)",
.test = alg_test_sig,
.fips_allowed = 1,
.suite = {
.sig = __VECS(p1363_ecdsa_nist_p256_tv_template)
}
}, {
.alg = "p1363(ecdsa-nist-p384)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "p1363(ecdsa-nist-p521)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pcbc(fcrypt)",
.test = alg_test_skcipher,
@ -5454,34 +5539,44 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(fcrypt_pcbc_tv_template)
}
}, {
.alg = "pkcs1pad(rsa,sha224)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha256)",
.test = alg_test_akcipher,
.fips_allowed = 1,
.alg = "pkcs1(rsa,none)",
.test = alg_test_sig,
.suite = {
.akcipher = __VECS(pkcs1pad_rsa_tv_template)
.sig = __VECS(pkcs1_rsa_none_tv_template)
}
}, {
.alg = "pkcs1pad(rsa,sha3-256)",
.alg = "pkcs1(rsa,sha224)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha3-384)",
.alg = "pkcs1(rsa,sha256)",
.test = alg_test_sig,
.fips_allowed = 1,
.suite = {
.sig = __VECS(pkcs1_rsa_tv_template)
}
}, {
.alg = "pkcs1(rsa,sha3-256)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha3-512)",
.alg = "pkcs1(rsa,sha3-384)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha384)",
.alg = "pkcs1(rsa,sha3-512)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha512)",
.alg = "pkcs1(rsa,sha384)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1(rsa,sha512)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
@ -5678,6 +5773,33 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.hash = __VECS(wp512_tv_template)
}
}, {
.alg = "x962(ecdsa-nist-p192)",
.test = alg_test_sig,
.suite = {
.sig = __VECS(x962_ecdsa_nist_p192_tv_template)
}
}, {
.alg = "x962(ecdsa-nist-p256)",
.test = alg_test_sig,
.fips_allowed = 1,
.suite = {
.sig = __VECS(x962_ecdsa_nist_p256_tv_template)
}
}, {
.alg = "x962(ecdsa-nist-p384)",
.test = alg_test_sig,
.fips_allowed = 1,
.suite = {
.sig = __VECS(x962_ecdsa_nist_p384_tv_template)
}
}, {
.alg = "x962(ecdsa-nist-p521)",
.test = alg_test_sig,
.fips_allowed = 1,
.suite = {
.sig = __VECS(x962_ecdsa_nist_p521_tv_template)
}
}, {
.alg = "xcbc(aes)",
.test = alg_test_hash,

File diff suppressed because it is too large Load Diff

View File

@ -50,7 +50,7 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
depends on (X86 || PPC_MAPLE || COMPILE_TEST)
depends on (X86 || COMPILE_TEST)
depends on PCI && HAS_IOPORT_MAP
default HW_RANDOM
help
@ -62,6 +62,19 @@ config HW_RANDOM_AMD
If unsure, say Y.
config HW_RANDOM_AIROHA
tristate "Airoha True HW Random Number Generator support"
depends on ARCH_AIROHA || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the True Random Number
Generator hardware found on Airoha SoC.
To compile this driver as a module, choose M here: the
module will be called airoha-rng.
If unsure, say Y.
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
depends on (ARCH_AT91 || COMPILE_TEST)
@ -99,9 +112,22 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
config HW_RANDOM_BCM74110
tristate "Broadcom BCM74110 Random Number Generator support"
depends on ARCH_BRCMSTB || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
Generator hardware found on the Broadcom BCM74110 SoCs.
To compile this driver as a module, choose M here: the
module will be called bcm74110-rng
If unsure, say Y.
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the RNG200

View File

@ -8,6 +8,7 @@ rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_AIROHA) += airoha-trng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
obj-$(CONFIG_HW_RANDOM_BA431) += ba431-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
@ -31,6 +32,7 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_BCM74110) += bcm74110-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o

View File

@ -0,0 +1,243 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2024 Christian Marangi */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#define TRNG_IP_RDY 0x800
#define CNT_TRANS GENMASK(15, 8)
#define SAMPLE_RDY BIT(0)
#define TRNG_NS_SEK_AND_DAT_EN 0x804
#define RNG_EN BIT(31) /* referenced as ring_en */
#define RAW_DATA_EN BIT(16)
#define TRNG_HEALTH_TEST_SW_RST 0x808
#define SW_RST BIT(0) /* Active High */
#define TRNG_INTR_EN 0x818
#define INTR_MASK BIT(16)
#define CONTINUOUS_HEALTH_INITR_EN BIT(2)
#define SW_STARTUP_INITR_EN BIT(1)
#define RST_STARTUP_INITR_EN BIT(0)
/* Notice that Health Test are done only out of Reset and with RNG_EN */
#define TRNG_HEALTH_TEST_STATUS 0x824
#define CONTINUOUS_HEALTH_AP_TEST_FAIL BIT(23)
#define CONTINUOUS_HEALTH_RC_TEST_FAIL BIT(22)
#define SW_STARTUP_TEST_DONE BIT(21)
#define SW_STARTUP_AP_TEST_FAIL BIT(20)
#define SW_STARTUP_RC_TEST_FAIL BIT(19)
#define RST_STARTUP_TEST_DONE BIT(18)
#define RST_STARTUP_AP_TEST_FAIL BIT(17)
#define RST_STARTUP_RC_TEST_FAIL BIT(16)
#define RAW_DATA_VALID BIT(7)
#define TRNG_RAW_DATA_OUT 0x828
#define TRNG_CNT_TRANS_VALID 0x80
#define BUSY_LOOP_SLEEP 10
#define BUSY_LOOP_TIMEOUT (BUSY_LOOP_SLEEP * 10000)
struct airoha_trng {
void __iomem *base;
struct hwrng rng;
struct device *dev;
struct completion rng_op_done;
};
static int airoha_trng_irq_mask(struct airoha_trng *trng)
{
u32 val;
val = readl(trng->base + TRNG_INTR_EN);
val |= INTR_MASK;
writel(val, trng->base + TRNG_INTR_EN);
return 0;
}
static int airoha_trng_irq_unmask(struct airoha_trng *trng)
{
u32 val;
val = readl(trng->base + TRNG_INTR_EN);
val &= ~INTR_MASK;
writel(val, trng->base + TRNG_INTR_EN);
return 0;
}
static int airoha_trng_init(struct hwrng *rng)
{
struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
int ret;
u32 val;
val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
val |= RNG_EN;
writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
/* Set out of SW Reset */
airoha_trng_irq_unmask(trng);
writel(0, trng->base + TRNG_HEALTH_TEST_SW_RST);
ret = wait_for_completion_timeout(&trng->rng_op_done, BUSY_LOOP_TIMEOUT);
if (ret <= 0) {
dev_err(trng->dev, "Timeout waiting for Health Check\n");
airoha_trng_irq_mask(trng);
return -ENODEV;
}
/* Check if Health Test Failed */
val = readl(trng->base + TRNG_HEALTH_TEST_STATUS);
if (val & (RST_STARTUP_AP_TEST_FAIL | RST_STARTUP_RC_TEST_FAIL)) {
dev_err(trng->dev, "Health Check fail: %s test fail\n",
val & RST_STARTUP_AP_TEST_FAIL ? "AP" : "RC");
return -ENODEV;
}
/* Check if IP is ready */
ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
val & SAMPLE_RDY, 10, 1000);
if (ret < 0) {
dev_err(trng->dev, "Timeout waiting for IP ready");
return -ENODEV;
}
/* CNT_TRANS must be 0x80 for IP to be considered ready */
ret = readl_poll_timeout(trng->base + TRNG_IP_RDY, val,
FIELD_GET(CNT_TRANS, val) == TRNG_CNT_TRANS_VALID,
10, 1000);
if (ret < 0) {
dev_err(trng->dev, "Timeout waiting for IP ready");
return -ENODEV;
}
return 0;
}
static void airoha_trng_cleanup(struct hwrng *rng)
{
struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
u32 val;
val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
val &= ~RNG_EN;
writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
/* Put it in SW Reset */
writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
}
static int airoha_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct airoha_trng *trng = container_of(rng, struct airoha_trng, rng);
u32 *data = buf;
u32 status;
int ret;
ret = readl_poll_timeout(trng->base + TRNG_HEALTH_TEST_STATUS, status,
status & RAW_DATA_VALID, 10, 1000);
if (ret < 0) {
dev_err(trng->dev, "Timeout waiting for TRNG RAW Data valid\n");
return ret;
}
*data = readl(trng->base + TRNG_RAW_DATA_OUT);
return 4;
}
static irqreturn_t airoha_trng_irq(int irq, void *priv)
{
struct airoha_trng *trng = (struct airoha_trng *)priv;
airoha_trng_irq_mask(trng);
/* Just complete the task, we will read the value later */
complete(&trng->rng_op_done);
return IRQ_HANDLED;
}
static int airoha_trng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct airoha_trng *trng;
int irq, ret;
u32 val;
trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
trng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->base))
return PTR_ERR(trng->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
airoha_trng_irq_mask(trng);
ret = devm_request_irq(&pdev->dev, irq, airoha_trng_irq, 0,
pdev->name, (void *)trng);
if (ret) {
dev_err(dev, "Can't get interrupt working.\n");
return ret;
}
init_completion(&trng->rng_op_done);
/* Enable interrupt for SW reset Health Check */
val = readl(trng->base + TRNG_INTR_EN);
val |= RST_STARTUP_INITR_EN;
writel(val, trng->base + TRNG_INTR_EN);
/* Set output to raw data */
val = readl(trng->base + TRNG_NS_SEK_AND_DAT_EN);
val |= RAW_DATA_EN;
writel(val, trng->base + TRNG_NS_SEK_AND_DAT_EN);
/* Put it in SW Reset */
writel(SW_RST, trng->base + TRNG_HEALTH_TEST_SW_RST);
trng->dev = dev;
trng->rng.name = pdev->name;
trng->rng.init = airoha_trng_init;
trng->rng.cleanup = airoha_trng_cleanup;
trng->rng.read = airoha_trng_read;
ret = devm_hwrng_register(dev, &trng->rng);
if (ret) {
dev_err(dev, "failed to register rng device: %d\n", ret);
return ret;
}
return 0;
}
static const struct of_device_id airoha_trng_of_match[] = {
{ .compatible = "airoha,en7581-trng", },
{},
};
MODULE_DEVICE_TABLE(of, airoha_trng_of_match);
static struct platform_driver airoha_trng_driver = {
.driver = {
.name = "airoha-trng",
.of_match_table = airoha_trng_of_match,
},
.probe = airoha_trng_probe,
};
module_platform_driver(airoha_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
MODULE_DESCRIPTION("Airoha True Random Number Generator driver");

View File

@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
.remove_new = atmel_trng_remove,
.remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.pm = pm_ptr(&atmel_trng_pm_ops),

View File

@ -0,0 +1,125 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2024 Broadcom
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/random.h>
#include <linux/hw_random.h>
#define HOST_REV_ID 0x00
#define HOST_FIFO_DEPTH 0x04
#define HOST_FIFO_COUNT 0x08
#define HOST_FIFO_THRESHOLD 0x0c
#define HOST_FIFO_DATA 0x10
#define HOST_FIFO_COUNT_MASK 0xffff
/* Delay range in microseconds */
#define FIFO_DELAY_MIN_US 3
#define FIFO_DELAY_MAX_US 7
#define FIFO_DELAY_MAX_COUNT 10
struct bcm74110_priv {
void __iomem *base;
};
static inline int bcm74110_rng_fifo_count(void __iomem *mem)
{
return readl_relaxed(mem) & HOST_FIFO_COUNT_MASK;
}
static int bcm74110_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
struct bcm74110_priv *priv = (struct bcm74110_priv *)rng->priv;
void __iomem *fc_addr = priv->base + HOST_FIFO_COUNT;
void __iomem *fd_addr = priv->base + HOST_FIFO_DATA;
unsigned underrun_count = 0;
u32 max_words = max / sizeof(u32);
u32 num_words;
unsigned i;
/*
* We need to check how many words are available in the RNG FIFO. If
* there aren't any, we need to wait for some to become available.
*/
while ((num_words = bcm74110_rng_fifo_count(fc_addr)) == 0) {
if (!wait)
return 0;
/*
* As a precaution, limit how long we wait. If the FIFO doesn't
* refill within the allotted time, return 0 (=no data) to the
* caller.
*/
if (likely(underrun_count < FIFO_DELAY_MAX_COUNT))
usleep_range(FIFO_DELAY_MIN_US, FIFO_DELAY_MAX_US);
else
return 0;
underrun_count++;
}
if (num_words > max_words)
num_words = max_words;
/* Bail early if we run out of random numbers unexpectedly */
for (i = 0; i < num_words && bcm74110_rng_fifo_count(fc_addr) > 0; i++)
((u32 *)buf)[i] = readl_relaxed(fd_addr);
return i * sizeof(u32);
}
static struct hwrng bcm74110_hwrng = {
.read = bcm74110_rng_read,
};
static int bcm74110_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bcm74110_priv *priv;
int rc;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
bcm74110_hwrng.name = pdev->name;
bcm74110_hwrng.priv = (unsigned long)priv;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
rc = devm_hwrng_register(dev, &bcm74110_hwrng);
if (rc)
dev_err(dev, "hwrng registration failed (%d)\n", rc);
else
dev_info(dev, "hwrng registered\n");
return rc;
}
static const struct of_device_id bcm74110_rng_match[] = {
{ .compatible = "brcm,bcm74110-rng", },
{},
};
MODULE_DEVICE_TABLE(of, bcm74110_rng_match);
static struct platform_driver bcm74110_rng_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = bcm74110_rng_match,
},
.probe = bcm74110_rng_probe,
};
module_platform_driver(bcm74110_rng_driver);
MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
MODULE_DESCRIPTION("BCM 74110 Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL v2");

View File

@ -653,7 +653,7 @@ static struct platform_driver cctrng_driver = {
.pm = &cctrng_pm,
},
.probe = cctrng_probe,
.remove_new = cctrng_remove,
.remove = cctrng_remove,
};
module_platform_driver(cctrng_driver);

View File

@ -181,8 +181,15 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int present;
BUG_ON(!mutex_is_locked(&reading_mutex));
if (rng->read)
return rng->read(rng, (void *)buffer, size, wait);
if (rng->read) {
int err;
err = rng->read(rng, buffer, size, wait);
if (WARN_ON_ONCE(err > 0 && err > size))
err = size;
return err;
}
if (rng->data_present)
present = rng->data_present(rng, wait);

View File

@ -335,7 +335,7 @@ static struct platform_driver exynos_trng_driver = {
.of_match_table = exynos_trng_dt_match,
},
.probe = exynos_trng_probe,
.remove_new = exynos_trng_remove,
.remove = exynos_trng_remove,
};
module_platform_driver(exynos_trng_driver);

View File

@ -89,7 +89,7 @@ depth_show(struct device *dev, struct device_attribute *attr, char *buf)
struct histb_rng_priv *priv = dev_get_drvdata(dev);
void __iomem *base = priv->base;
return sprintf(buf, "%d\n", histb_rng_get_depth(base));
return sprintf(buf, "%u\n", histb_rng_get_depth(base));
}
static ssize_t

View File

@ -132,7 +132,7 @@ MODULE_DEVICE_TABLE(of, ingenic_rng_of_match);
static struct platform_driver ingenic_rng_driver = {
.probe = ingenic_rng_probe,
.remove_new = ingenic_rng_remove,
.remove = ingenic_rng_remove,
.driver = {
.name = "ingenic-rng",
.of_match_table = ingenic_rng_of_match,

View File

@ -261,7 +261,7 @@ static struct platform_driver ks_sa_rng_driver = {
.of_match_table = ks_sa_rng_dt_match,
},
.probe = ks_sa_rng_probe,
.remove_new = ks_sa_rng_remove,
.remove = ks_sa_rng_remove,
};
module_platform_driver(ks_sa_rng_driver);

View File

@ -188,7 +188,7 @@ static struct platform_driver mxc_rnga_driver = {
.of_match_table = mxc_rnga_of_match,
},
.probe = mxc_rnga_probe,
.remove_new = mxc_rnga_remove,
.remove = mxc_rnga_remove,
};
module_platform_driver(mxc_rnga_driver);

View File

@ -858,7 +858,7 @@ static struct platform_driver n2rng_driver = {
.of_match_table = n2rng_match,
},
.probe = n2rng_probe,
.remove_new = n2rng_remove,
.remove = n2rng_remove,
};
module_platform_driver(n2rng_driver);

View File

@ -176,7 +176,7 @@ static struct platform_driver npcm_rng_driver = {
.of_match_table = of_match_ptr(rng_dt_id),
},
.probe = npcm_rng_probe,
.remove_new = npcm_rng_remove,
.remove = npcm_rng_remove,
};
module_platform_driver(npcm_rng_driver);

View File

@ -558,7 +558,7 @@ static struct platform_driver omap_rng_driver = {
.of_match_table = of_match_ptr(omap_rng_of_match),
},
.probe = omap_rng_probe,
.remove_new = omap_rng_remove,
.remove = omap_rng_remove,
};
module_platform_driver(omap_rng_driver);

View File

@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
@ -49,6 +50,7 @@
struct stm32_rng_data {
uint max_clock_rate;
uint nb_clock;
u32 cr;
u32 nscr;
u32 htcr;
@ -72,7 +74,7 @@ struct stm32_rng_private {
struct hwrng rng;
struct device *dev;
void __iomem *base;
struct clk *clk;
struct clk_bulk_data *clk_bulk;
struct reset_control *rst;
struct stm32_rng_config pm_conf;
const struct stm32_rng_data *data;
@ -266,7 +268,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
unsigned long clock_rate = 0;
uint clock_div = 0;
clock_rate = clk_get_rate(priv->clk);
clock_rate = clk_get_rate(priv->clk_bulk[0].clk);
/*
* Get the exponent to apply on the CLKDIV field in RNG_CR register
@ -276,7 +278,7 @@ static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
while ((clock_rate >> clock_div) > priv->data->max_clock_rate)
clock_div++;
pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk) >> clock_div);
pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div);
return clock_div;
}
@ -288,7 +290,7 @@ static int stm32_rng_init(struct hwrng *rng)
int err;
u32 reg;
err = clk_prepare_enable(priv->clk);
err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@ -328,7 +330,7 @@ static int stm32_rng_init(struct hwrng *rng)
(!(reg & RNG_CR_CONDRST)),
10, 50000);
if (err) {
clk_disable_unprepare(priv->clk);
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg);
return -EINVAL;
}
@ -356,12 +358,13 @@ static int stm32_rng_init(struct hwrng *rng)
reg & RNG_SR_DRDY,
10, 100000);
if (err || (reg & ~RNG_SR_DRDY)) {
clk_disable_unprepare(priv->clk);
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg);
return -EINVAL;
}
clk_disable_unprepare(priv->clk);
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@ -379,7 +382,8 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
reg = readl_relaxed(priv->base + RNG_CR);
reg &= ~RNG_CR_RNGEN;
writel_relaxed(reg, priv->base + RNG_CR);
clk_disable_unprepare(priv->clk);
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@ -389,7 +393,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
struct stm32_rng_private *priv = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(priv->clk);
err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@ -403,7 +407,7 @@ static int __maybe_unused stm32_rng_suspend(struct device *dev)
writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR);
clk_disable_unprepare(priv->clk);
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@ -414,7 +418,7 @@ static int __maybe_unused stm32_rng_runtime_resume(struct device *dev)
int err;
u32 reg;
err = clk_prepare_enable(priv->clk);
err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@ -434,7 +438,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
int err;
u32 reg;
err = clk_prepare_enable(priv->clk);
err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
if (err)
return err;
@ -462,7 +466,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
reg & ~RNG_CR_CONDRST, 10, 100000);
if (err) {
clk_disable_unprepare(priv->clk);
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg);
return -EINVAL;
}
@ -472,7 +476,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
writel_relaxed(reg, priv->base + RNG_CR);
}
clk_disable_unprepare(priv->clk);
clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
return 0;
}
@ -484,9 +488,19 @@ static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = {
stm32_rng_resume)
};
static const struct stm32_rng_data stm32mp25_rng_data = {
.has_cond_reset = true,
.max_clock_rate = 48000000,
.nb_clock = 2,
.cr = 0x00F00D00,
.nscr = 0x2B5BB,
.htcr = 0x969D,
};
static const struct stm32_rng_data stm32mp13_rng_data = {
.has_cond_reset = true,
.max_clock_rate = 48000000,
.nb_clock = 1,
.cr = 0x00F00D00,
.nscr = 0x2B5BB,
.htcr = 0x969D,
@ -494,10 +508,15 @@ static const struct stm32_rng_data stm32mp13_rng_data = {
static const struct stm32_rng_data stm32_rng_data = {
.has_cond_reset = false,
.max_clock_rate = 3000000,
.max_clock_rate = 48000000,
.nb_clock = 1,
};
static const struct of_device_id stm32_rng_match[] = {
{
.compatible = "st,stm32mp25-rng",
.data = &stm32mp25_rng_data,
},
{
.compatible = "st,stm32mp13-rng",
.data = &stm32mp13_rng_data,
@ -516,6 +535,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
struct device_node *np = ofdev->dev.of_node;
struct stm32_rng_private *priv;
struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@ -525,10 +545,6 @@ static int stm32_rng_probe(struct platform_device *ofdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->clk = devm_clk_get(&ofdev->dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
@ -551,6 +567,28 @@ static int stm32_rng_probe(struct platform_device *ofdev)
priv->rng.read = stm32_rng_read;
priv->rng.quality = 900;
if (!priv->data->nb_clock || priv->data->nb_clock > 2)
return -EINVAL;
ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk);
if (ret != priv->data->nb_clock)
return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret);
if (priv->data->nb_clock == 2) {
const char *id = priv->clk_bulk[1].id;
struct clk *clk = priv->clk_bulk[1].clk;
if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id)
return dev_err_probe(dev, -EINVAL, "Missing clock name\n");
if (strcmp(priv->clk_bulk[0].id, "core")) {
priv->clk_bulk[1].id = priv->clk_bulk[0].id;
priv->clk_bulk[1].clk = priv->clk_bulk[0].clk;
priv->clk_bulk[0].id = id;
priv->clk_bulk[0].clk = clk;
}
}
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
@ -565,7 +603,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
.remove_new = stm32_rng_remove,
.remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);

View File

@ -193,7 +193,7 @@ static struct platform_driver timeriomem_rng_driver = {
.of_match_table = timeriomem_rng_match,
},
.probe = timeriomem_rng_probe,
.remove_new = timeriomem_rng_remove,
.remove = timeriomem_rng_remove,
};
module_platform_driver(timeriomem_rng_driver);

View File

@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(of, xgene_rng_of_match);
static struct platform_driver xgene_rng_driver = {
.probe = xgene_rng_probe,
.remove_new = xgene_rng_remove,
.remove = xgene_rng_remove,
.driver = {
.name = "xgene-rng",
.of_match_table = xgene_rng_of_match,

View File

@ -542,7 +542,7 @@ MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
static struct platform_driver sun4i_ss_driver = {
.probe = sun4i_ss_probe,
.remove_new = sun4i_ss_remove,
.remove = sun4i_ss_remove,
.driver = {
.name = "sun4i-ss",
.pm = &sun4i_ss_pm_ops,

View File

@ -1129,7 +1129,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
static struct platform_driver sun8i_ce_driver = {
.probe = sun8i_ce_probe,
.remove_new = sun8i_ce_remove,
.remove = sun8i_ce_remove,
.driver = {
.name = "sun8i-ce",
.pm = &sun8i_ce_pm_ops,

View File

@ -929,7 +929,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table);
static struct platform_driver sun8i_ss_driver = {
.probe = sun8i_ss_probe,
.remove_new = sun8i_ss_remove,
.remove = sun8i_ss_remove,
.driver = {
.name = "sun8i-ss",
.pm = &sun8i_ss_pm_ops,

View File

@ -653,9 +653,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
crypto4xx_destroy_pdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_sdr(core_dev->dev);
iounmap(core_dev->dev->ce_base);
kfree(core_dev->dev);
kfree(core_dev);
}
static u32 get_next_gd(u32 current)
@ -1333,17 +1330,12 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
static int crypto4xx_probe(struct platform_device *ofdev)
{
int rc;
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
struct device_node *np;
u32 pvr;
bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
return -ENODEV;
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto");
if (np) {
mtdcri(SDR0, PPC460EX_SDR0_SRST,
@ -1374,16 +1366,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
of_node_put(np);
core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
core_dev = devm_kzalloc(
&ofdev->dev, sizeof(struct crypto4xx_core_device), GFP_KERNEL);
if (!core_dev)
return -ENOMEM;
dev_set_drvdata(dev, core_dev);
core_dev->ofdev = ofdev;
core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
rc = -ENOMEM;
core_dev->dev = devm_kzalloc(
&ofdev->dev, sizeof(struct crypto4xx_device), GFP_KERNEL);
if (!core_dev->dev)
goto err_alloc_dev;
return -ENOMEM;
/*
* Older version of 460EX/GT have a hardware bug.
@ -1402,7 +1395,9 @@ static int crypto4xx_probe(struct platform_device *ofdev)
core_dev->dev->core_dev = core_dev;
core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
mutex_init(&core_dev->rng_lock);
rc = devm_mutex_init(&ofdev->dev, &core_dev->rng_lock);
if (rc)
return rc;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
@ -1421,21 +1416,21 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
rc = -ENOMEM;
goto err_iomap;
core_dev->dev->ce_base = devm_platform_ioremap_resource(ofdev, 0);
if (IS_ERR(core_dev->dev->ce_base)) {
dev_err(&ofdev->dev, "failed to ioremap resource");
rc = PTR_ERR(core_dev->dev->ce_base);
goto err_build_sdr;
}
/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
rc = request_irq(core_dev->irq, is_revb ?
crypto4xx_ce_interrupt_handler_revb :
crypto4xx_ce_interrupt_handler, 0,
KBUILD_MODNAME, dev);
rc = devm_request_irq(&ofdev->dev, core_dev->irq,
is_revb ? crypto4xx_ce_interrupt_handler_revb :
crypto4xx_ce_interrupt_handler,
0, KBUILD_MODNAME, dev);
if (rc)
goto err_request_irq;
goto err_iomap;
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@ -1444,26 +1439,17 @@ static int crypto4xx_probe(struct platform_device *ofdev)
rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
ARRAY_SIZE(crypto4xx_alg));
if (rc)
goto err_start_dev;
goto err_iomap;
ppc4xx_trng_probe(core_dev);
return 0;
err_start_dev:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
iounmap(core_dev->dev->ce_base);
err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
kfree(core_dev);
return rc;
}
@ -1474,13 +1460,9 @@ static void crypto4xx_remove(struct platform_device *ofdev)
ppc4xx_trng_remove(core_dev);
free_irq(core_dev->irq, dev);
irq_dispose_mapping(core_dev->irq);
tasklet_kill(&core_dev->tasklet);
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
}
@ -1497,7 +1479,7 @@ static struct platform_driver crypto4xx_driver = {
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
.remove_new = crypto4xx_remove,
.remove = crypto4xx_remove,
};
module_platform_driver(crypto4xx_driver);

View File

@ -240,11 +240,9 @@ static int meson_crypto_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc);
mc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mc->base)) {
err = PTR_ERR(mc->base);
dev_err(&pdev->dev, "Cannot request MMIO err=%d\n", err);
return err;
}
if (IS_ERR(mc->base))
return PTR_ERR(mc->base);
mc->busclk = devm_clk_get(&pdev->dev, "blkmv");
if (IS_ERR(mc->busclk)) {
err = PTR_ERR(mc->busclk);
@ -322,7 +320,7 @@ MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
static struct platform_driver meson_crypto_driver = {
.probe = meson_crypto_probe,
.remove_new = meson_crypto_remove,
.remove = meson_crypto_remove,
.driver = {
.name = "gxl-crypto",
.of_match_table = meson_crypto_of_match_table,

View File

@ -601,8 +601,6 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
.akcipher.base = {
.encrypt = aspeed_acry_rsa_enc,
.decrypt = aspeed_acry_rsa_dec,
.sign = aspeed_acry_rsa_dec,
.verify = aspeed_acry_rsa_enc,
.set_pub_key = aspeed_acry_rsa_set_pub_key,
.set_priv_key = aspeed_acry_rsa_set_priv_key,
.max_size = aspeed_acry_rsa_max_size,
@ -808,7 +806,7 @@ MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
static struct platform_driver aspeed_acry_driver = {
.probe = aspeed_acry_probe,
.remove_new = aspeed_acry_remove,
.remove = aspeed_acry_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_acry_of_matches,

View File

@ -266,7 +266,7 @@ MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
static struct platform_driver aspeed_hace_driver = {
.probe = aspeed_hace_probe,
.remove_new = aspeed_hace_remove,
.remove = aspeed_hace_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_hace_of_matches,

View File

@ -2453,7 +2453,7 @@ static void atmel_aes_remove(struct platform_device *pdev)
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
.remove_new = atmel_aes_remove,
.remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.of_match_table = atmel_aes_dt_ids,

View File

@ -379,7 +379,7 @@ MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
#endif
static const struct i2c_device_id atmel_ecc_id[] = {
{ "atecc508a", 0 },
{ "atecc508a" },
{ }
};
MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);

View File

@ -2691,7 +2691,7 @@ static void atmel_sha_remove(struct platform_device *pdev)
static struct platform_driver atmel_sha_driver = {
.probe = atmel_sha_probe,
.remove_new = atmel_sha_remove,
.remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.of_match_table = atmel_sha_dt_ids,

View File

@ -202,8 +202,8 @@ static const struct of_device_id atmel_sha204a_dt_ids[] __maybe_unused = {
MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids);
static const struct i2c_device_id atmel_sha204a_id[] = {
{ "atsha204", 0 },
{ "atsha204a", 0 },
{ "atsha204" },
{ "atsha204a" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id);

View File

@ -872,7 +872,7 @@ static void atmel_tdes_done_task(unsigned long data)
if (!err)
err = atmel_tdes_crypt_start(dd);
if (!err)
return; /* DMA started. Not fininishing. */
return; /* DMA started. Not finishing. */
}
atmel_tdes_finish_req(dd, err);
@ -1074,7 +1074,7 @@ static void atmel_tdes_remove(struct platform_device *pdev)
static struct platform_driver atmel_tdes_driver = {
.probe = atmel_tdes_probe,
.remove_new = atmel_tdes_remove,
.remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.of_match_table = atmel_tdes_dt_ids,

View File

@ -2975,7 +2975,7 @@ static void artpec6_crypto_remove(struct platform_device *pdev)
static struct platform_driver artpec6_crypto_driver = {
.probe = artpec6_crypto_probe,
.remove_new = artpec6_crypto_remove,
.remove = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
.of_match_table = artpec6_crypto_of_match,

View File

@ -2415,6 +2415,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
static int ahash_hmac_init(struct ahash_request *req)
{
int ret;
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
@ -2424,7 +2425,9 @@ static int ahash_hmac_init(struct ahash_request *req)
flow_log("ahash_hmac_init()\n");
/* init the context as a hash */
ahash_init(req);
ret = ahash_init(req);
if (ret)
return ret;
if (!spu_no_incr_hash(ctx)) {
/* SPU-M can do incr hashing but needs sw for outer HMAC */
@ -4704,7 +4707,7 @@ static struct platform_driver bcm_spu_pdriver = {
.of_match_table = of_match_ptr(bcm_spu_dt_ids),
},
.probe = bcm_spu_probe,
.remove_new = bcm_spu_remove,
.remove = bcm_spu_remove,
};
module_platform_driver(bcm_spu_pdriver);

View File

@ -984,7 +984,7 @@ err:
return -ENOMEM;
}
static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
struct rsa_key *raw_key)
{
struct caam_rsa_key *rsa_key = &ctx->key;
@ -994,7 +994,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
if (!rsa_key->p)
return;
return -ENOMEM;
rsa_key->p_sz = p_sz;
rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
@ -1029,7 +1029,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
rsa_key->priv_form = FORM3;
return;
return 0;
free_dq:
kfree_sensitive(rsa_key->dq);
@ -1043,6 +1043,7 @@ free_q:
kfree_sensitive(rsa_key->q);
free_p:
kfree_sensitive(rsa_key->p);
return -ENOMEM;
}
static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
@ -1088,7 +1089,9 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
rsa_key->e_sz = raw_key.e_sz;
rsa_key->n_sz = raw_key.n_sz;
caam_rsa_set_priv_key_form(ctx, &raw_key);
ret = caam_rsa_set_priv_key_form(ctx, &raw_key);
if (ret)
goto err;
return 0;

View File

@ -819,7 +819,7 @@ static struct platform_driver caam_jr_driver = {
.pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
.remove_new = caam_jr_remove,
.remove = caam_jr_remove,
.shutdown = caam_jr_remove,
};

View File

@ -733,7 +733,7 @@ static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus)
int caam_qi_init(struct platform_device *caam_pdev)
{
int err, i;
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct device *qidev = &caam_pdev->dev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
cpumask_var_t clean_mask;
@ -742,8 +742,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
goto fail_cpumask;
ctrlpriv = dev_get_drvdata(ctrldev);
qidev = ctrldev;
ctrlpriv = dev_get_drvdata(qidev);
/* Initialize the congestion detection */
err = init_cgr(qidev);
@ -794,7 +793,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
caam_debugfs_qi_init(ctrlpriv);
err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
err = devm_add_action_or_reset(qidev, caam_qi_shutdown, qidev);
if (err)
goto fail2;

View File

@ -44,7 +44,7 @@ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
dev_err(dev, "Cores still busy %llx", coremask);
grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0));
if (timeout--)
if (!timeout--)
break;
udelay(CSR_DELAY);
@ -302,6 +302,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ret = do_cpt_init(cpt, mcode);
if (ret) {
dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
mcode->code, mcode->phys_base);
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
goto fw_release;
}
@ -394,7 +396,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
dev_err(dev, "Cores still busy");
grp = cpt_read_csr64(cpt->reg_base,
CPTX_PF_EXEC_BUSY(0));
if (timeout--)
if (!timeout--)
break;
udelay(CSR_DELAY);

View File

@ -238,7 +238,7 @@ static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
qinfo = &cptvf->cqinfo;
queue = &qinfo->queue[qno];
/* lock commad queue */
/* lock command queue */
spin_lock(&queue->lock);
ent = &queue->qhead->head[queue->idx * qinfo->cmd_size];
memcpy(ent, (void *)cmd, qinfo->cmd_size);
@ -510,7 +510,7 @@ get_pending_entry:
info->time_in = jiffies;
info->req = req;
/* Create the CPT_INST_S type command for HW intrepretation */
/* Create the CPT_INST_S type command for HW interpretation */
cptinst.s.doneint = true;
cptinst.s.res_addr = (u64)info->comp_baddr;
cptinst.s.tag = 0;

View File

@ -17,7 +17,7 @@
#define CRYPTO_CTX_SIZE 256
/* packet inuput ring alignments */
/* packet input ring alignments */
#define PKTIN_Q_ALIGN_BYTES 16
/* AQM Queue input alignments */
#define AQM_Q_ALIGN_BYTES 32

View File

@ -210,7 +210,7 @@ static struct platform_driver sp_platform_driver = {
.of_match_table = sp_of_match,
},
.probe = sp_platform_probe,
.remove_new = sp_platform_remove,
.remove = sp_platform_remove,
#ifdef CONFIG_PM
.suspend = sp_platform_suspend,
.resume = sp_platform_resume,

View File

@ -2226,7 +2226,7 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
memset(areq_ctx, 0, sizeof(*areq_ctx));
//plaintext is not encryped with rfc4543
//plaintext is not encrypted with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
@ -2277,7 +2277,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
memset(areq_ctx, 0, sizeof(*areq_ctx));
//plaintext is not decryped with rfc4543
//plaintext is not decrypted with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */

View File

@ -179,7 +179,7 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
}
max_key_buf_size <<= 1;
/* Alloc fallabck tfm or essiv when key size != 256 bit */
/* Alloc fallback tfm or essiv when key size != 256 bit */
ctx_p->fallback_tfm =
crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);

View File

@ -643,7 +643,7 @@ static struct platform_driver ccree_driver = {
#endif
},
.probe = ccree_probe,
.remove_new = ccree_remove,
.remove = ccree_remove,
};
static int __init ccree_init(void)

View File

@ -1577,7 +1577,7 @@ struct cc_hash_template {
/* hash descriptors */
static struct cc_hash_template driver_hash[] = {
//Asynchronize hash template
//Asynchronous hash template
{
.name = "sha1",
.driver_name = "sha1-ccree",

View File

@ -1186,7 +1186,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
else
bytes = rounddown(bytes, 16);
} else {
/*CTR mode counter overfloa*/
/*CTR mode counter overflow*/
bytes = req->cryptlen - reqctx->processed;
}
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);

View File

@ -389,7 +389,7 @@ static struct platform_driver exynos_rng_driver = {
.of_match_table = exynos_rng_dt_match,
},
.probe = exynos_rng_probe,
.remove_new = exynos_rng_remove,
.remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);

View File

@ -528,7 +528,7 @@ MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
static struct platform_driver sl3516_ce_driver = {
.probe = sl3516_ce_probe,
.remove_new = sl3516_ce_remove,
.remove = sl3516_ce_remove,
.driver = {
.name = "sl3516-crypto",
.pm = &sl3516_ce_pm_ops,

View File

@ -100,6 +100,29 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
enum hpre_cap_table_type {
QM_RAS_NFE_TYPE = 0x0,
QM_RAS_NFE_RESET,
QM_RAS_CE_TYPE,
HPRE_RAS_NFE_TYPE,
HPRE_RAS_NFE_RESET,
HPRE_RAS_CE_TYPE,
HPRE_CORE_INFO,
HPRE_CORE_EN,
HPRE_DRV_ALG_BITMAP,
HPRE_ALG_BITMAP,
HPRE_CORE1_BITMAP_CAP,
HPRE_CORE2_BITMAP_CAP,
HPRE_CORE3_BITMAP_CAP,
HPRE_CORE4_BITMAP_CAP,
HPRE_CORE5_BITMAP_CAP,
HPRE_CORE6_BITMAP_CAP,
HPRE_CORE7_BITMAP_CAP,
HPRE_CORE8_BITMAP_CAP,
HPRE_CORE9_BITMAP_CAP,
HPRE_CORE10_BITMAP_CAP,
};
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);

View File

@ -2006,8 +2006,6 @@ static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
}
static struct akcipher_alg rsa = {
.sign = hpre_rsa_dec,
.verify = hpre_rsa_enc,
.encrypt = hpre_rsa_enc,
.decrypt = hpre_rsa_dec,
.set_pub_key = hpre_rsa_setpubkey,

View File

@ -13,6 +13,7 @@
#include <linux/uacce.h>
#include "hpre.h"
#define CAP_FILE_PERMISSION 0444
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_CTRL_CNT_CLR_CE 0x301000
#define HPRE_FSM_MAX_CNT 0x301008
@ -222,18 +223,27 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
};
enum hpre_pre_store_cap_idx {
HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
HPRE_DRV_ALG_BITMAP_CAP_IDX,
HPRE_DEV_ALG_BITMAP_CAP_IDX,
};
static const u32 hpre_pre_store_caps[] = {
HPRE_CLUSTER_NUM_CAP,
HPRE_CORE_ENABLE_BITMAP_CAP,
HPRE_DRV_ALG_BITMAP_CAP,
HPRE_DEV_ALG_BITMAP_CAP,
static const struct hisi_qm_cap_query_info hpre_cap_query_info[] = {
{QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37},
{QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
{QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
{HPRE_RAS_NFE_TYPE, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E},
{HPRE_RAS_NFE_RESET, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E},
{HPRE_RAS_CE_TYPE, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
{HPRE_CORE_INFO, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A},
{HPRE_CORE_EN, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF},
{HPRE_DRV_ALG_BITMAP, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27},
{HPRE_ALG_BITMAP, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F},
{HPRE_CORE1_BITMAP_CAP, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F},
{HPRE_CORE2_BITMAP_CAP, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F},
{HPRE_CORE3_BITMAP_CAP, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F},
{HPRE_CORE4_BITMAP_CAP, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F},
{HPRE_CORE5_BITMAP_CAP, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F},
{HPRE_CORE6_BITMAP_CAP, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F},
{HPRE_CORE7_BITMAP_CAP, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F},
{HPRE_CORE8_BITMAP_CAP, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F},
{HPRE_CORE9_BITMAP_CAP, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10},
{HPRE_CORE10_BITMAP_CAP, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10},
};
static const struct hpre_hw_error hpre_hw_errors[] = {
@ -360,7 +370,7 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val;
if (alg & cap_val)
return true;
@ -415,7 +425,7 @@ static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
pf_q_num_flag = true;
return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
}
static const struct kernel_param_ops hpre_pf_q_num_ops = {
@ -503,14 +513,17 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
static int hpre_set_cluster(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
unsigned long offset;
u32 cluster_core_mask;
unsigned long offset;
u32 hpre_core_info;
u8 clusters_num;
u32 val = 0;
int ret, i;
cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val;
hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = i * HPRE_CLSTR_ADDR_INTRVL;
@ -593,6 +606,9 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
static void hpre_enable_clock_gate(struct hisi_qm *qm)
{
unsigned long offset;
u8 clusters_num, i;
u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@ -606,17 +622,26 @@ static void hpre_enable_clock_gate(struct hisi_qm *qm)
val |= HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
val |= HPRE_CLUSTER_DYN_CTL_EN;
writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
val |= HPRE_CORE_GATE_EN;
writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
}
}
static void hpre_disable_clock_gate(struct hisi_qm *qm)
{
unsigned long offset;
u8 clusters_num, i;
u32 hpre_core_info;
u32 val;
if (qm->ver < QM_HW_V3)
@ -630,13 +655,19 @@ static void hpre_disable_clock_gate(struct hisi_qm *qm)
val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL);
hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
val &= ~HPRE_CLUSTER_DYN_CTL_EN;
writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL);
writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG);
val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
val &= ~HPRE_CORE_GATE_EN;
writel(val, qm->io_base + HPRE_CORE_SHB_CFG);
writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
}
}
static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
@ -699,11 +730,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
unsigned long offset;
u32 hpre_core_info;
u8 clusters_num;
int i;
/* clear clusterX/cluster_ctrl */
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
@ -995,10 +1029,13 @@ static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
u32 hpre_core_info;
u8 clusters_num;
int i, ret;
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
@ -1041,6 +1078,26 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
return hpre_cluster_debugfs_init(qm);
}
static int hpre_cap_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
u32 i, size;
size = qm->cap_tables.qm_cap_size;
for (i = 0; i < size; i++)
seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
qm->cap_tables.qm_cap_table[i].cap_val);
size = qm->cap_tables.dev_cap_size;
for (i = 0; i < size; i++)
seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
qm->cap_tables.dev_cap_table[i].cap_val);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs);
static void hpre_dfx_debug_init(struct hisi_qm *qm)
{
struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
@ -1059,6 +1116,9 @@ static void hpre_dfx_debug_init(struct hisi_qm *qm)
if (qm->fun_type == QM_HW_PF && hpre_regs)
debugfs_create_file("diff_regs", 0444, parent,
qm, &hpre_diff_regs_fops);
debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
qm->debug.debug_root, qm, &hpre_cap_regs_fops);
}
static int hpre_debugfs_init(struct hisi_qm *qm)
@ -1106,26 +1166,33 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
{
struct hisi_qm_cap_record *hpre_cap;
struct device *dev = &qm->pdev->dev;
u32 hpre_core_info;
u8 clusters_num;
size_t i, size;
size = ARRAY_SIZE(hpre_pre_store_caps);
size = ARRAY_SIZE(hpre_cap_query_info);
hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
if (!hpre_cap)
return -ENOMEM;
for (i = 0; i < size; i++) {
hpre_cap[i].type = hpre_pre_store_caps[i];
hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
hpre_pre_store_caps[i], qm->cap_ver);
hpre_cap[i].type = hpre_cap_query_info[i].type;
hpre_cap[i].name = hpre_cap_query_info[i].name;
hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info,
i, qm->cap_ver);
}
if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
hpre_core_info = hpre_cap[HPRE_CORE_INFO].cap_val;
clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
if (clusters_num > HPRE_CLUSTERS_NUM_MAX) {
dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
clusters_num, HPRE_CLUSTERS_NUM_MAX);
return -EINVAL;
}
qm->cap_tables.dev_cap_table = hpre_cap;
qm->cap_tables.dev_cap_size = size;
return 0;
}
@ -1172,7 +1239,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val;
ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
if (ret) {
pci_err(pdev, "Failed to set hpre algs!\n");
@ -1188,10 +1255,13 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
@ -1231,6 +1301,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
struct qm_debug *debug = &qm->debug;
struct pci_dev *pdev = qm->pdev;
void __iomem *io_base;
u32 hpre_core_info;
u8 clusters_num;
int i, j, idx;
u32 val;
@ -1246,7 +1317,9 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
hpre_com_dfx_regs[i].name, debug->last_words[i], val);
}
clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
for (i = 0; i < clusters_num; i++) {
io_base = qm->io_base + hpre_cluster_offsets[i];
for (j = 0; j < cluster_dfx_regs_num; j++) {
@ -1280,11 +1353,15 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
u32 nfe;
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
u32 nfe_mask;
nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@ -1298,6 +1375,27 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
{
u32 err_status;
err_status = hpre_get_hw_err_status(qm);
if (err_status) {
if (err_status & qm->err_info.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
hpre_log_hw_error(qm, err_status);
if (err_status & qm->err_info.dev_reset_mask) {
/* Disable the same error reporting until device is recovered. */
hpre_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
hpre_clear_hw_err_status(qm, err_status);
}
return ACC_ERR_RECOVERED;
}
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
@ -1324,12 +1422,12 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_err_disable = hpre_hw_error_disable,
.get_dev_hw_err_status = hpre_get_hw_err_status,
.clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo,
.open_sva_prefetch = hpre_open_sva_prefetch,
.close_sva_prefetch = hpre_close_sva_prefetch,
.show_last_dfx_regs = hpre_show_last_dfx_regs,
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
};
static int hpre_pf_probe_init(struct hpre *hpre)

Some files were not shown because too many files have changed in this diff Show More