mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:02:20 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - CTR(AES) optimisation on x86_64 using "by8" AVX. - arm64 support to ccp - Intel QAT crypto driver - Qualcomm crypto engine driver - x86-64 assembly optimisation for 3DES - CTR(3DES) speed test - move FIPS panic from module.c so that it only triggers on crypto modules - SP800-90A Deterministic Random Bit Generator (drbg). - more test vectors for ghash. - tweak self tests to catch partial block bugs. - misc fixes. * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (94 commits) crypto: drbg - fix failure of generating multiple of 2**16 bytes crypto: ccp - Do not sign extend input data to CCP crypto: testmgr - add missing spaces to drbg error strings crypto: atmel-tdes - Switch to managed version of kzalloc crypto: atmel-sha - Switch to managed version of kzalloc crypto: testmgr - use chunks smaller than algo block size in chunk tests crypto: qat - Fixed SKU1 dev issue crypto: qat - Use hweight for bit counting crypto: qat - Updated print outputs crypto: qat - change ae_num to ae_id crypto: qat - change slice->regions to slice->region crypto: qat - use min_t macro crypto: qat - remove unnecessary parentheses crypto: qat - remove unneeded header crypto: qat - checkpatch blank lines crypto: qat - remove unnecessary return codes crypto: Resolve shadow warnings crypto: ccp - Remove "select OF" from Kconfig crypto: caam - fix DECO RSR polling crypto: qce - Let 'DEV_QCE' depend on both HAS_DMA and HAS_IOMEM ...
This commit is contained in:
commit
3e7a716a92
19
Documentation/devicetree/bindings/crypto/amd-ccp.txt
Normal file
19
Documentation/devicetree/bindings/crypto/amd-ccp.txt
Normal file
@ -0,0 +1,19 @@
|
||||
* AMD Cryptographic Coprocessor driver (ccp)
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "amd,ccp-seattle-v1a"
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupt-parent: Should be the phandle for the interrupt controller
|
||||
that services interrupts for this device
|
||||
- interrupts: Should contain the CCP interrupt
|
||||
|
||||
Optional properties:
|
||||
- dma-coherent: Present if dma operations are coherent
|
||||
|
||||
Example:
|
||||
ccp@e0100000 {
|
||||
compatible = "amd,ccp-seattle-v1a";
|
||||
reg = <0 0xe0100000 0 0x10000>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <0 3 4>;
|
||||
};
|
25
Documentation/devicetree/bindings/crypto/qcom-qce.txt
Normal file
25
Documentation/devicetree/bindings/crypto/qcom-qce.txt
Normal file
@ -0,0 +1,25 @@
|
||||
Qualcomm crypto engine driver
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should be "qcom,crypto-v5.1"
|
||||
- reg : specifies base physical address and size of the registers map
|
||||
- clocks : phandle to clock-controller plus clock-specifier pair
|
||||
- clock-names : "iface" clocks register interface
|
||||
"bus" clocks data transfer interface
|
||||
"core" clocks rest of the crypto block
|
||||
- dmas : DMA specifiers for tx and rx dma channels. For more see
|
||||
Documentation/devicetree/bindings/dma/dma.txt
|
||||
- dma-names : DMA request names should be "rx" and "tx"
|
||||
|
||||
Example:
|
||||
crypto@fd45a000 {
|
||||
compatible = "qcom,crypto-v5.1";
|
||||
reg = <0xfd45a000 0x6000>;
|
||||
clocks = <&gcc GCC_CE2_AHB_CLK>,
|
||||
<&gcc GCC_CE2_AXI_CLK>,
|
||||
<&gcc GCC_CE2_CLK>;
|
||||
clock-names = "iface", "bus", "core";
|
||||
dmas = <&cryptobam 2>, <&cryptobam 3>;
|
||||
dma-names = "rx", "tx";
|
||||
};
|
@ -197,6 +197,7 @@ Code Seq#(hex) Include File Comments
|
||||
<mailto:gregkh@linuxfoundation.org>
|
||||
'a' all linux/atm*.h, linux/sonet.h ATM on linux
|
||||
<http://lrcwww.epfl.ch/>
|
||||
'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver
|
||||
'b' 00-FF conflict! bit3 vme host bridge
|
||||
<mailto:natalia@nikhefk.nikhef.nl>
|
||||
'c' all linux/cm4000_cs.h conflict!
|
||||
|
@ -7250,6 +7250,12 @@ M: Robert Jarzmik <robert.jarzmik@free.fr>
|
||||
L: rtc-linux@googlegroups.com
|
||||
S: Maintained
|
||||
|
||||
QAT DRIVER
|
||||
M: Tadeusz Struk <tadeusz.struk@intel.com>
|
||||
L: qat-linux@intel.com
|
||||
S: Supported
|
||||
F: drivers/crypto/qat/
|
||||
|
||||
QIB DRIVER
|
||||
M: Mike Marciniszyn <infinipath@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
|
@ -32,7 +32,8 @@
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
compatible = "fsl,sec-v6.0";
|
||||
compatible = "fsl,sec-v6.0", "fsl,sec-v5.0",
|
||||
"fsl,sec-v4.0";
|
||||
fsl,sec-era = <6>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
@ -14,6 +14,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
|
||||
obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
|
||||
@ -52,6 +53,7 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
|
||||
serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
|
||||
|
||||
aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
|
||||
des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o
|
||||
camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
|
||||
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
|
||||
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
|
||||
@ -76,7 +78,7 @@ ifeq ($(avx2_supported),yes)
|
||||
endif
|
||||
|
||||
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
|
||||
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o
|
||||
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
|
||||
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
|
||||
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
|
||||
ifeq ($(avx2_supported),yes)
|
||||
|
546
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
Normal file
546
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
Normal file
@ -0,0 +1,546 @@
|
||||
/*
|
||||
* Implement AES CTR mode by8 optimization with AVX instructions. (x86_64)
|
||||
*
|
||||
* This is AES128/192/256 CTR mode optimization implementation. It requires
|
||||
* the support of Intel(R) AESNI and AVX instructions.
|
||||
*
|
||||
* This work was inspired by the AES CTR mode optimization published
|
||||
* in Intel Optimized IPSEC Cryptograhpic library.
|
||||
* Additional information on it can be found at:
|
||||
* http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2014 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* Contact Information:
|
||||
* James Guilford <james.guilford@intel.com>
|
||||
* Sean Gulley <sean.m.gulley@intel.com>
|
||||
* Chandramouli Narayanan <mouli@linux.intel.com>
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2014 Intel Corporation.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
#define CONCAT(a,b) a##b
|
||||
#define VMOVDQ vmovdqu
|
||||
|
||||
#define xdata0 %xmm0
|
||||
#define xdata1 %xmm1
|
||||
#define xdata2 %xmm2
|
||||
#define xdata3 %xmm3
|
||||
#define xdata4 %xmm4
|
||||
#define xdata5 %xmm5
|
||||
#define xdata6 %xmm6
|
||||
#define xdata7 %xmm7
|
||||
#define xcounter %xmm8
|
||||
#define xbyteswap %xmm9
|
||||
#define xkey0 %xmm10
|
||||
#define xkey3 %xmm11
|
||||
#define xkey6 %xmm12
|
||||
#define xkey9 %xmm13
|
||||
#define xkey4 %xmm11
|
||||
#define xkey8 %xmm12
|
||||
#define xkey12 %xmm13
|
||||
#define xkeyA %xmm14
|
||||
#define xkeyB %xmm15
|
||||
|
||||
#define p_in %rdi
|
||||
#define p_iv %rsi
|
||||
#define p_keys %rdx
|
||||
#define p_out %rcx
|
||||
#define num_bytes %r8
|
||||
|
||||
#define tmp %r10
|
||||
#define DDQ(i) CONCAT(ddq_add_,i)
|
||||
#define XMM(i) CONCAT(%xmm, i)
|
||||
#define DDQ_DATA 0
|
||||
#define XDATA 1
|
||||
#define KEY_128 1
|
||||
#define KEY_192 2
|
||||
#define KEY_256 3
|
||||
|
||||
.section .rodata
|
||||
.align 16
|
||||
|
||||
byteswap_const:
|
||||
.octa 0x000102030405060708090A0B0C0D0E0F
|
||||
ddq_add_1:
|
||||
.octa 0x00000000000000000000000000000001
|
||||
ddq_add_2:
|
||||
.octa 0x00000000000000000000000000000002
|
||||
ddq_add_3:
|
||||
.octa 0x00000000000000000000000000000003
|
||||
ddq_add_4:
|
||||
.octa 0x00000000000000000000000000000004
|
||||
ddq_add_5:
|
||||
.octa 0x00000000000000000000000000000005
|
||||
ddq_add_6:
|
||||
.octa 0x00000000000000000000000000000006
|
||||
ddq_add_7:
|
||||
.octa 0x00000000000000000000000000000007
|
||||
ddq_add_8:
|
||||
.octa 0x00000000000000000000000000000008
|
||||
|
||||
.text
|
||||
|
||||
/* generate a unique variable for ddq_add_x */
|
||||
|
||||
.macro setddq n
|
||||
var_ddq_add = DDQ(\n)
|
||||
.endm
|
||||
|
||||
/* generate a unique variable for xmm register */
|
||||
.macro setxdata n
|
||||
var_xdata = XMM(\n)
|
||||
.endm
|
||||
|
||||
/* club the numeric 'id' to the symbol 'name' */
|
||||
|
||||
.macro club name, id
|
||||
.altmacro
|
||||
.if \name == DDQ_DATA
|
||||
setddq %\id
|
||||
.elseif \name == XDATA
|
||||
setxdata %\id
|
||||
.endif
|
||||
.noaltmacro
|
||||
.endm
|
||||
|
||||
/*
|
||||
* do_aes num_in_par load_keys key_len
|
||||
* This increments p_in, but not p_out
|
||||
*/
|
||||
.macro do_aes b, k, key_len
|
||||
.set by, \b
|
||||
.set load_keys, \k
|
||||
.set klen, \key_len
|
||||
|
||||
.if (load_keys)
|
||||
vmovdqa 0*16(p_keys), xkey0
|
||||
.endif
|
||||
|
||||
vpshufb xbyteswap, xcounter, xdata0
|
||||
|
||||
.set i, 1
|
||||
.rept (by - 1)
|
||||
club DDQ_DATA, i
|
||||
club XDATA, i
|
||||
vpaddd var_ddq_add(%rip), xcounter, var_xdata
|
||||
vpshufb xbyteswap, var_xdata, var_xdata
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
vmovdqa 1*16(p_keys), xkeyA
|
||||
|
||||
vpxor xkey0, xdata0, xdata0
|
||||
club DDQ_DATA, by
|
||||
vpaddd var_ddq_add(%rip), xcounter, xcounter
|
||||
|
||||
.set i, 1
|
||||
.rept (by - 1)
|
||||
club XDATA, i
|
||||
vpxor xkey0, var_xdata, var_xdata
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
vmovdqa 2*16(p_keys), xkeyB
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyA, var_xdata, var_xdata /* key 1 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen == KEY_128)
|
||||
.if (load_keys)
|
||||
vmovdqa 3*16(p_keys), xkeyA
|
||||
.endif
|
||||
.else
|
||||
vmovdqa 3*16(p_keys), xkeyA
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyB, var_xdata, var_xdata /* key 2 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
add $(16*by), p_in
|
||||
|
||||
.if (klen == KEY_128)
|
||||
vmovdqa 4*16(p_keys), xkey4
|
||||
.else
|
||||
.if (load_keys)
|
||||
vmovdqa 4*16(p_keys), xkey4
|
||||
.endif
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyA, var_xdata, var_xdata /* key 3 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
vmovdqa 5*16(p_keys), xkeyA
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkey4, var_xdata, var_xdata /* key 4 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen == KEY_128)
|
||||
.if (load_keys)
|
||||
vmovdqa 6*16(p_keys), xkeyB
|
||||
.endif
|
||||
.else
|
||||
vmovdqa 6*16(p_keys), xkeyB
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyA, var_xdata, var_xdata /* key 5 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
vmovdqa 7*16(p_keys), xkeyA
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyB, var_xdata, var_xdata /* key 6 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen == KEY_128)
|
||||
vmovdqa 8*16(p_keys), xkey8
|
||||
.else
|
||||
.if (load_keys)
|
||||
vmovdqa 8*16(p_keys), xkey8
|
||||
.endif
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyA, var_xdata, var_xdata /* key 7 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen == KEY_128)
|
||||
.if (load_keys)
|
||||
vmovdqa 9*16(p_keys), xkeyA
|
||||
.endif
|
||||
.else
|
||||
vmovdqa 9*16(p_keys), xkeyA
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkey8, var_xdata, var_xdata /* key 8 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
vmovdqa 10*16(p_keys), xkeyB
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyA, var_xdata, var_xdata /* key 9 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen != KEY_128)
|
||||
vmovdqa 11*16(p_keys), xkeyA
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
/* key 10 */
|
||||
.if (klen == KEY_128)
|
||||
vaesenclast xkeyB, var_xdata, var_xdata
|
||||
.else
|
||||
vaesenc xkeyB, var_xdata, var_xdata
|
||||
.endif
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen != KEY_128)
|
||||
.if (load_keys)
|
||||
vmovdqa 12*16(p_keys), xkey12
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
vaesenc xkeyA, var_xdata, var_xdata /* key 11 */
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen == KEY_256)
|
||||
vmovdqa 13*16(p_keys), xkeyA
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
.if (klen == KEY_256)
|
||||
/* key 12 */
|
||||
vaesenc xkey12, var_xdata, var_xdata
|
||||
.else
|
||||
vaesenclast xkey12, var_xdata, var_xdata
|
||||
.endif
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.if (klen == KEY_256)
|
||||
vmovdqa 14*16(p_keys), xkeyB
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
/* key 13 */
|
||||
vaesenc xkeyA, var_xdata, var_xdata
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
/* key 14 */
|
||||
vaesenclast xkeyB, var_xdata, var_xdata
|
||||
.set i, (i +1)
|
||||
.endr
|
||||
.endif
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept (by / 2)
|
||||
.set j, (i+1)
|
||||
VMOVDQ (i*16 - 16*by)(p_in), xkeyA
|
||||
VMOVDQ (j*16 - 16*by)(p_in), xkeyB
|
||||
club XDATA, i
|
||||
vpxor xkeyA, var_xdata, var_xdata
|
||||
club XDATA, j
|
||||
vpxor xkeyB, var_xdata, var_xdata
|
||||
.set i, (i+2)
|
||||
.endr
|
||||
|
||||
.if (i < by)
|
||||
VMOVDQ (i*16 - 16*by)(p_in), xkeyA
|
||||
club XDATA, i
|
||||
vpxor xkeyA, var_xdata, var_xdata
|
||||
.endif
|
||||
|
||||
.set i, 0
|
||||
.rept by
|
||||
club XDATA, i
|
||||
VMOVDQ var_xdata, i*16(p_out)
|
||||
.set i, (i+1)
|
||||
.endr
|
||||
.endm
|
||||
|
||||
.macro do_aes_load val, key_len
|
||||
do_aes \val, 1, \key_len
|
||||
.endm
|
||||
|
||||
.macro do_aes_noload val, key_len
|
||||
do_aes \val, 0, \key_len
|
||||
.endm
|
||||
|
||||
/* main body of aes ctr load */
|
||||
|
||||
.macro do_aes_ctrmain key_len
|
||||
|
||||
cmp $16, num_bytes
|
||||
jb .Ldo_return2\key_len
|
||||
|
||||
vmovdqa byteswap_const(%rip), xbyteswap
|
||||
vmovdqu (p_iv), xcounter
|
||||
vpshufb xbyteswap, xcounter, xcounter
|
||||
|
||||
mov num_bytes, tmp
|
||||
and $(7*16), tmp
|
||||
jz .Lmult_of_8_blks\key_len
|
||||
|
||||
/* 1 <= tmp <= 7 */
|
||||
cmp $(4*16), tmp
|
||||
jg .Lgt4\key_len
|
||||
je .Leq4\key_len
|
||||
|
||||
.Llt4\key_len:
|
||||
cmp $(2*16), tmp
|
||||
jg .Leq3\key_len
|
||||
je .Leq2\key_len
|
||||
|
||||
.Leq1\key_len:
|
||||
do_aes_load 1, \key_len
|
||||
add $(1*16), p_out
|
||||
and $(~7*16), num_bytes
|
||||
jz .Ldo_return2\key_len
|
||||
jmp .Lmain_loop2\key_len
|
||||
|
||||
.Leq2\key_len:
|
||||
do_aes_load 2, \key_len
|
||||
add $(2*16), p_out
|
||||
and $(~7*16), num_bytes
|
||||
jz .Ldo_return2\key_len
|
||||
jmp .Lmain_loop2\key_len
|
||||
|
||||
|
||||
.Leq3\key_len:
|
||||
do_aes_load 3, \key_len
|
||||
add $(3*16), p_out
|
||||
and $(~7*16), num_bytes
|
||||
jz .Ldo_return2\key_len
|
||||
jmp .Lmain_loop2\key_len
|
||||
|
||||
.Leq4\key_len:
|
||||
do_aes_load 4, \key_len
|
||||
add $(4*16), p_out
|
||||
and $(~7*16), num_bytes
|
||||
jz .Ldo_return2\key_len
|
||||
jmp .Lmain_loop2\key_len
|
||||
|
||||
.Lgt4\key_len:
|
||||
cmp $(6*16), tmp
|
||||
jg .Leq7\key_len
|
||||
je .Leq6\key_len
|
||||
|
||||
.Leq5\key_len:
|
||||
do_aes_load 5, \key_len
|
||||
add $(5*16), p_out
|
||||
and $(~7*16), num_bytes
|
||||
jz .Ldo_return2\key_len
|
||||
jmp .Lmain_loop2\key_len
|
||||
|
||||
.Leq6\key_len:
|
||||
do_aes_load 6, \key_len
|
||||
add $(6*16), p_out
|
||||
and $(~7*16), num_bytes
|
||||
jz .Ldo_return2\key_len
|
||||
jmp .Lmain_loop2\key_len
|
||||
|
||||
.Leq7\key_len:
|
||||
do_aes_load 7, \key_len
|
||||
add $(7*16), p_out
|
||||
and $(~7*16), num_bytes
|
||||
jz .Ldo_return2\key_len
|
||||
jmp .Lmain_loop2\key_len
|
||||
|
||||
.Lmult_of_8_blks\key_len:
|
||||
.if (\key_len != KEY_128)
|
||||
vmovdqa 0*16(p_keys), xkey0
|
||||
vmovdqa 4*16(p_keys), xkey4
|
||||
vmovdqa 8*16(p_keys), xkey8
|
||||
vmovdqa 12*16(p_keys), xkey12
|
||||
.else
|
||||
vmovdqa 0*16(p_keys), xkey0
|
||||
vmovdqa 3*16(p_keys), xkey4
|
||||
vmovdqa 6*16(p_keys), xkey8
|
||||
vmovdqa 9*16(p_keys), xkey12
|
||||
.endif
|
||||
.align 16
|
||||
.Lmain_loop2\key_len:
|
||||
/* num_bytes is a multiple of 8 and >0 */
|
||||
do_aes_noload 8, \key_len
|
||||
add $(8*16), p_out
|
||||
sub $(8*16), num_bytes
|
||||
jne .Lmain_loop2\key_len
|
||||
|
||||
.Ldo_return2\key_len:
|
||||
/* return updated IV */
|
||||
vpshufb xbyteswap, xcounter, xcounter
|
||||
vmovdqu xcounter, (p_iv)
|
||||
ret
|
||||
.endm
|
||||
|
||||
/*
|
||||
* routine to do AES128 CTR enc/decrypt "by8"
|
||||
* XMM registers are clobbered.
|
||||
* Saving/restoring must be done at a higher level
|
||||
* aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out,
|
||||
* unsigned int num_bytes)
|
||||
*/
|
||||
ENTRY(aes_ctr_enc_128_avx_by8)
|
||||
/* call the aes main loop */
|
||||
do_aes_ctrmain KEY_128
|
||||
|
||||
ENDPROC(aes_ctr_enc_128_avx_by8)
|
||||
|
||||
/*
|
||||
* routine to do AES192 CTR enc/decrypt "by8"
|
||||
* XMM registers are clobbered.
|
||||
* Saving/restoring must be done at a higher level
|
||||
* aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out,
|
||||
* unsigned int num_bytes)
|
||||
*/
|
||||
ENTRY(aes_ctr_enc_192_avx_by8)
|
||||
/* call the aes main loop */
|
||||
do_aes_ctrmain KEY_192
|
||||
|
||||
ENDPROC(aes_ctr_enc_192_avx_by8)
|
||||
|
||||
/*
|
||||
* routine to do AES256 CTR enc/decrypt "by8"
|
||||
* XMM registers are clobbered.
|
||||
* Saving/restoring must be done at a higher level
|
||||
* aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out,
|
||||
* unsigned int num_bytes)
|
||||
*/
|
||||
ENTRY(aes_ctr_enc_256_avx_by8)
|
||||
/* call the aes main loop */
|
||||
do_aes_ctrmain KEY_256
|
||||
|
||||
ENDPROC(aes_ctr_enc_256_avx_by8)
|
@ -105,6 +105,9 @@ void crypto_fpu_exit(void);
|
||||
#define AVX_GEN4_OPTSIZE 4096
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
const u8 *in, unsigned int len, u8 *iv);
|
||||
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
const u8 *in, unsigned int len, u8 *iv);
|
||||
|
||||
@ -155,6 +158,12 @@ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
|
||||
|
||||
|
||||
#ifdef CONFIG_AS_AVX
|
||||
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
|
||||
void *keys, u8 *out, unsigned int num_bytes);
|
||||
asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
|
||||
void *keys, u8 *out, unsigned int num_bytes);
|
||||
asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
|
||||
void *keys, u8 *out, unsigned int num_bytes);
|
||||
/*
|
||||
* asmlinkage void aesni_gcm_precomp_avx_gen2()
|
||||
* gcm_data *my_ctx_data, context data
|
||||
@ -472,6 +481,25 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
|
||||
crypto_inc(ctrblk, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AS_AVX
|
||||
static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
|
||||
const u8 *in, unsigned int len, u8 *iv)
|
||||
{
|
||||
/*
|
||||
* based on key length, override with the by8 version
|
||||
* of ctr mode encryption/decryption for improved performance
|
||||
* aes_set_key_common() ensures that key length is one of
|
||||
* {128,192,256}
|
||||
*/
|
||||
if (ctx->key_length == AES_KEYSIZE_128)
|
||||
aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
|
||||
else if (ctx->key_length == AES_KEYSIZE_192)
|
||||
aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
|
||||
else
|
||||
aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
@ -486,8 +514,8 @@ static int ctr_crypt(struct blkcipher_desc *desc,
|
||||
|
||||
kernel_fpu_begin();
|
||||
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
||||
aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK, walk.iv);
|
||||
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
||||
nbytes & AES_BLOCK_MASK, walk.iv);
|
||||
nbytes &= AES_BLOCK_SIZE - 1;
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
@ -1493,6 +1521,14 @@ static int __init aesni_init(void)
|
||||
aesni_gcm_enc_tfm = aesni_gcm_enc;
|
||||
aesni_gcm_dec_tfm = aesni_gcm_dec;
|
||||
}
|
||||
aesni_ctr_enc_tfm = aesni_ctr_enc;
|
||||
#ifdef CONFIG_AS_AVX
|
||||
if (cpu_has_avx) {
|
||||
/* optimize performance of ctr mode encryption transform */
|
||||
aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
|
||||
pr_info("AES CTR mode by8 optimization enabled\n");
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
err = crypto_fpu_init();
|
||||
|
@ -72,6 +72,7 @@
|
||||
|
||||
# unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init);
|
||||
|
||||
.text
|
||||
ENTRY(crc_pcl)
|
||||
#define bufp %rdi
|
||||
#define bufp_dw %edi
|
||||
@ -216,15 +217,11 @@ LABEL crc_ %i
|
||||
## 4) Combine three results:
|
||||
################################################################
|
||||
|
||||
lea (K_table-16)(%rip), bufp # first entry is for idx 1
|
||||
lea (K_table-8)(%rip), bufp # first entry is for idx 1
|
||||
shlq $3, %rax # rax *= 8
|
||||
subq %rax, tmp # tmp -= rax*8
|
||||
shlq $1, %rax
|
||||
subq %rax, tmp # tmp -= rax*16
|
||||
# (total tmp -= rax*24)
|
||||
addq %rax, bufp
|
||||
|
||||
movdqa (bufp), %xmm0 # 2 consts: K1:K2
|
||||
pmovzxdq (bufp,%rax), %xmm0 # 2 consts: K1:K2
|
||||
leal (%eax,%eax,2), %eax # rax *= 3 (total *24)
|
||||
subq %rax, tmp # tmp -= rax*24
|
||||
|
||||
movq crc_init, %xmm1 # CRC for block 1
|
||||
PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2
|
||||
@ -238,9 +235,9 @@ LABEL crc_ %i
|
||||
mov crc2, crc_init
|
||||
crc32 %rax, crc_init
|
||||
|
||||
################################################################
|
||||
## 5) Check for end:
|
||||
################################################################
|
||||
################################################################
|
||||
## 5) Check for end:
|
||||
################################################################
|
||||
|
||||
LABEL crc_ 0
|
||||
mov tmp, len
|
||||
@ -331,136 +328,136 @@ ENDPROC(crc_pcl)
|
||||
|
||||
################################################################
|
||||
## PCLMULQDQ tables
|
||||
## Table is 128 entries x 2 quad words each
|
||||
## Table is 128 entries x 2 words (8 bytes) each
|
||||
################################################################
|
||||
.data
|
||||
.align 64
|
||||
.section .rotata, "a", %progbits
|
||||
.align 8
|
||||
K_table:
|
||||
.quad 0x14cd00bd6,0x105ec76f0
|
||||
.quad 0x0ba4fc28e,0x14cd00bd6
|
||||
.quad 0x1d82c63da,0x0f20c0dfe
|
||||
.quad 0x09e4addf8,0x0ba4fc28e
|
||||
.quad 0x039d3b296,0x1384aa63a
|
||||
.quad 0x102f9b8a2,0x1d82c63da
|
||||
.quad 0x14237f5e6,0x01c291d04
|
||||
.quad 0x00d3b6092,0x09e4addf8
|
||||
.quad 0x0c96cfdc0,0x0740eef02
|
||||
.quad 0x18266e456,0x039d3b296
|
||||
.quad 0x0daece73e,0x0083a6eec
|
||||
.quad 0x0ab7aff2a,0x102f9b8a2
|
||||
.quad 0x1248ea574,0x1c1733996
|
||||
.quad 0x083348832,0x14237f5e6
|
||||
.quad 0x12c743124,0x02ad91c30
|
||||
.quad 0x0b9e02b86,0x00d3b6092
|
||||
.quad 0x018b33a4e,0x06992cea2
|
||||
.quad 0x1b331e26a,0x0c96cfdc0
|
||||
.quad 0x17d35ba46,0x07e908048
|
||||
.quad 0x1bf2e8b8a,0x18266e456
|
||||
.quad 0x1a3e0968a,0x11ed1f9d8
|
||||
.quad 0x0ce7f39f4,0x0daece73e
|
||||
.quad 0x061d82e56,0x0f1d0f55e
|
||||
.quad 0x0d270f1a2,0x0ab7aff2a
|
||||
.quad 0x1c3f5f66c,0x0a87ab8a8
|
||||
.quad 0x12ed0daac,0x1248ea574
|
||||
.quad 0x065863b64,0x08462d800
|
||||
.quad 0x11eef4f8e,0x083348832
|
||||
.quad 0x1ee54f54c,0x071d111a8
|
||||
.quad 0x0b3e32c28,0x12c743124
|
||||
.quad 0x0064f7f26,0x0ffd852c6
|
||||
.quad 0x0dd7e3b0c,0x0b9e02b86
|
||||
.quad 0x0f285651c,0x0dcb17aa4
|
||||
.quad 0x010746f3c,0x018b33a4e
|
||||
.quad 0x1c24afea4,0x0f37c5aee
|
||||
.quad 0x0271d9844,0x1b331e26a
|
||||
.quad 0x08e766a0c,0x06051d5a2
|
||||
.quad 0x093a5f730,0x17d35ba46
|
||||
.quad 0x06cb08e5c,0x11d5ca20e
|
||||
.quad 0x06b749fb2,0x1bf2e8b8a
|
||||
.quad 0x1167f94f2,0x021f3d99c
|
||||
.quad 0x0cec3662e,0x1a3e0968a
|
||||
.quad 0x19329634a,0x08f158014
|
||||
.quad 0x0e6fc4e6a,0x0ce7f39f4
|
||||
.quad 0x08227bb8a,0x1a5e82106
|
||||
.quad 0x0b0cd4768,0x061d82e56
|
||||
.quad 0x13c2b89c4,0x188815ab2
|
||||
.quad 0x0d7a4825c,0x0d270f1a2
|
||||
.quad 0x10f5ff2ba,0x105405f3e
|
||||
.quad 0x00167d312,0x1c3f5f66c
|
||||
.quad 0x0f6076544,0x0e9adf796
|
||||
.quad 0x026f6a60a,0x12ed0daac
|
||||
.quad 0x1a2adb74e,0x096638b34
|
||||
.quad 0x19d34af3a,0x065863b64
|
||||
.quad 0x049c3cc9c,0x1e50585a0
|
||||
.quad 0x068bce87a,0x11eef4f8e
|
||||
.quad 0x1524fa6c6,0x19f1c69dc
|
||||
.quad 0x16cba8aca,0x1ee54f54c
|
||||
.quad 0x042d98888,0x12913343e
|
||||
.quad 0x1329d9f7e,0x0b3e32c28
|
||||
.quad 0x1b1c69528,0x088f25a3a
|
||||
.quad 0x02178513a,0x0064f7f26
|
||||
.quad 0x0e0ac139e,0x04e36f0b0
|
||||
.quad 0x0170076fa,0x0dd7e3b0c
|
||||
.quad 0x141a1a2e2,0x0bd6f81f8
|
||||
.quad 0x16ad828b4,0x0f285651c
|
||||
.quad 0x041d17b64,0x19425cbba
|
||||
.quad 0x1fae1cc66,0x010746f3c
|
||||
.quad 0x1a75b4b00,0x18db37e8a
|
||||
.quad 0x0f872e54c,0x1c24afea4
|
||||
.quad 0x01e41e9fc,0x04c144932
|
||||
.quad 0x086d8e4d2,0x0271d9844
|
||||
.quad 0x160f7af7a,0x052148f02
|
||||
.quad 0x05bb8f1bc,0x08e766a0c
|
||||
.quad 0x0a90fd27a,0x0a3c6f37a
|
||||
.quad 0x0b3af077a,0x093a5f730
|
||||
.quad 0x04984d782,0x1d22c238e
|
||||
.quad 0x0ca6ef3ac,0x06cb08e5c
|
||||
.quad 0x0234e0b26,0x063ded06a
|
||||
.quad 0x1d88abd4a,0x06b749fb2
|
||||
.quad 0x04597456a,0x04d56973c
|
||||
.quad 0x0e9e28eb4,0x1167f94f2
|
||||
.quad 0x07b3ff57a,0x19385bf2e
|
||||
.quad 0x0c9c8b782,0x0cec3662e
|
||||
.quad 0x13a9cba9e,0x0e417f38a
|
||||
.quad 0x093e106a4,0x19329634a
|
||||
.quad 0x167001a9c,0x14e727980
|
||||
.quad 0x1ddffc5d4,0x0e6fc4e6a
|
||||
.quad 0x00df04680,0x0d104b8fc
|
||||
.quad 0x02342001e,0x08227bb8a
|
||||
.quad 0x00a2a8d7e,0x05b397730
|
||||
.quad 0x168763fa6,0x0b0cd4768
|
||||
.quad 0x1ed5a407a,0x0e78eb416
|
||||
.quad 0x0d2c3ed1a,0x13c2b89c4
|
||||
.quad 0x0995a5724,0x1641378f0
|
||||
.quad 0x19b1afbc4,0x0d7a4825c
|
||||
.quad 0x109ffedc0,0x08d96551c
|
||||
.quad 0x0f2271e60,0x10f5ff2ba
|
||||
.quad 0x00b0bf8ca,0x00bf80dd2
|
||||
.quad 0x123888b7a,0x00167d312
|
||||
.quad 0x1e888f7dc,0x18dcddd1c
|
||||
.quad 0x002ee03b2,0x0f6076544
|
||||
.quad 0x183e8d8fe,0x06a45d2b2
|
||||
.quad 0x133d7a042,0x026f6a60a
|
||||
.quad 0x116b0f50c,0x1dd3e10e8
|
||||
.quad 0x05fabe670,0x1a2adb74e
|
||||
.quad 0x130004488,0x0de87806c
|
||||
.quad 0x000bcf5f6,0x19d34af3a
|
||||
.quad 0x18f0c7078,0x014338754
|
||||
.quad 0x017f27698,0x049c3cc9c
|
||||
.quad 0x058ca5f00,0x15e3e77ee
|
||||
.quad 0x1af900c24,0x068bce87a
|
||||
.quad 0x0b5cfca28,0x0dd07448e
|
||||
.quad 0x0ded288f8,0x1524fa6c6
|
||||
.quad 0x059f229bc,0x1d8048348
|
||||
.quad 0x06d390dec,0x16cba8aca
|
||||
.quad 0x037170390,0x0a3e3e02c
|
||||
.quad 0x06353c1cc,0x042d98888
|
||||
.quad 0x0c4584f5c,0x0d73c7bea
|
||||
.quad 0x1f16a3418,0x1329d9f7e
|
||||
.quad 0x0531377e2,0x185137662
|
||||
.quad 0x1d8d9ca7c,0x1b1c69528
|
||||
.quad 0x0b25b29f2,0x18a08b5bc
|
||||
.quad 0x19fb2a8b0,0x02178513a
|
||||
.quad 0x1a08fe6ac,0x1da758ae0
|
||||
.quad 0x045cddf4e,0x0e0ac139e
|
||||
.quad 0x1a91647f2,0x169cf9eb0
|
||||
.quad 0x1a0f717c4,0x0170076fa
|
||||
.long 0x493c7d27, 0x00000001
|
||||
.long 0xba4fc28e, 0x493c7d27
|
||||
.long 0xddc0152b, 0xf20c0dfe
|
||||
.long 0x9e4addf8, 0xba4fc28e
|
||||
.long 0x39d3b296, 0x3da6d0cb
|
||||
.long 0x0715ce53, 0xddc0152b
|
||||
.long 0x47db8317, 0x1c291d04
|
||||
.long 0x0d3b6092, 0x9e4addf8
|
||||
.long 0xc96cfdc0, 0x740eef02
|
||||
.long 0x878a92a7, 0x39d3b296
|
||||
.long 0xdaece73e, 0x083a6eec
|
||||
.long 0xab7aff2a, 0x0715ce53
|
||||
.long 0x2162d385, 0xc49f4f67
|
||||
.long 0x83348832, 0x47db8317
|
||||
.long 0x299847d5, 0x2ad91c30
|
||||
.long 0xb9e02b86, 0x0d3b6092
|
||||
.long 0x18b33a4e, 0x6992cea2
|
||||
.long 0xb6dd949b, 0xc96cfdc0
|
||||
.long 0x78d9ccb7, 0x7e908048
|
||||
.long 0xbac2fd7b, 0x878a92a7
|
||||
.long 0xa60ce07b, 0x1b3d8f29
|
||||
.long 0xce7f39f4, 0xdaece73e
|
||||
.long 0x61d82e56, 0xf1d0f55e
|
||||
.long 0xd270f1a2, 0xab7aff2a
|
||||
.long 0xc619809d, 0xa87ab8a8
|
||||
.long 0x2b3cac5d, 0x2162d385
|
||||
.long 0x65863b64, 0x8462d800
|
||||
.long 0x1b03397f, 0x83348832
|
||||
.long 0xebb883bd, 0x71d111a8
|
||||
.long 0xb3e32c28, 0x299847d5
|
||||
.long 0x064f7f26, 0xffd852c6
|
||||
.long 0xdd7e3b0c, 0xb9e02b86
|
||||
.long 0xf285651c, 0xdcb17aa4
|
||||
.long 0x10746f3c, 0x18b33a4e
|
||||
.long 0xc7a68855, 0xf37c5aee
|
||||
.long 0x271d9844, 0xb6dd949b
|
||||
.long 0x8e766a0c, 0x6051d5a2
|
||||
.long 0x93a5f730, 0x78d9ccb7
|
||||
.long 0x6cb08e5c, 0x18b0d4ff
|
||||
.long 0x6b749fb2, 0xbac2fd7b
|
||||
.long 0x1393e203, 0x21f3d99c
|
||||
.long 0xcec3662e, 0xa60ce07b
|
||||
.long 0x96c515bb, 0x8f158014
|
||||
.long 0xe6fc4e6a, 0xce7f39f4
|
||||
.long 0x8227bb8a, 0xa00457f7
|
||||
.long 0xb0cd4768, 0x61d82e56
|
||||
.long 0x39c7ff35, 0x8d6d2c43
|
||||
.long 0xd7a4825c, 0xd270f1a2
|
||||
.long 0x0ab3844b, 0x00ac29cf
|
||||
.long 0x0167d312, 0xc619809d
|
||||
.long 0xf6076544, 0xe9adf796
|
||||
.long 0x26f6a60a, 0x2b3cac5d
|
||||
.long 0xa741c1bf, 0x96638b34
|
||||
.long 0x98d8d9cb, 0x65863b64
|
||||
.long 0x49c3cc9c, 0xe0e9f351
|
||||
.long 0x68bce87a, 0x1b03397f
|
||||
.long 0x57a3d037, 0x9af01f2d
|
||||
.long 0x6956fc3b, 0xebb883bd
|
||||
.long 0x42d98888, 0x2cff42cf
|
||||
.long 0x3771e98f, 0xb3e32c28
|
||||
.long 0xb42ae3d9, 0x88f25a3a
|
||||
.long 0x2178513a, 0x064f7f26
|
||||
.long 0xe0ac139e, 0x4e36f0b0
|
||||
.long 0x170076fa, 0xdd7e3b0c
|
||||
.long 0x444dd413, 0xbd6f81f8
|
||||
.long 0x6f345e45, 0xf285651c
|
||||
.long 0x41d17b64, 0x91c9bd4b
|
||||
.long 0xff0dba97, 0x10746f3c
|
||||
.long 0xa2b73df1, 0x885f087b
|
||||
.long 0xf872e54c, 0xc7a68855
|
||||
.long 0x1e41e9fc, 0x4c144932
|
||||
.long 0x86d8e4d2, 0x271d9844
|
||||
.long 0x651bd98b, 0x52148f02
|
||||
.long 0x5bb8f1bc, 0x8e766a0c
|
||||
.long 0xa90fd27a, 0xa3c6f37a
|
||||
.long 0xb3af077a, 0x93a5f730
|
||||
.long 0x4984d782, 0xd7c0557f
|
||||
.long 0xca6ef3ac, 0x6cb08e5c
|
||||
.long 0x234e0b26, 0x63ded06a
|
||||
.long 0xdd66cbbb, 0x6b749fb2
|
||||
.long 0x4597456a, 0x4d56973c
|
||||
.long 0xe9e28eb4, 0x1393e203
|
||||
.long 0x7b3ff57a, 0x9669c9df
|
||||
.long 0xc9c8b782, 0xcec3662e
|
||||
.long 0x3f70cc6f, 0xe417f38a
|
||||
.long 0x93e106a4, 0x96c515bb
|
||||
.long 0x62ec6c6d, 0x4b9e0f71
|
||||
.long 0xd813b325, 0xe6fc4e6a
|
||||
.long 0x0df04680, 0xd104b8fc
|
||||
.long 0x2342001e, 0x8227bb8a
|
||||
.long 0x0a2a8d7e, 0x5b397730
|
||||
.long 0x6d9a4957, 0xb0cd4768
|
||||
.long 0xe8b6368b, 0xe78eb416
|
||||
.long 0xd2c3ed1a, 0x39c7ff35
|
||||
.long 0x995a5724, 0x61ff0e01
|
||||
.long 0x9ef68d35, 0xd7a4825c
|
||||
.long 0x0c139b31, 0x8d96551c
|
||||
.long 0xf2271e60, 0x0ab3844b
|
||||
.long 0x0b0bf8ca, 0x0bf80dd2
|
||||
.long 0x2664fd8b, 0x0167d312
|
||||
.long 0xed64812d, 0x8821abed
|
||||
.long 0x02ee03b2, 0xf6076544
|
||||
.long 0x8604ae0f, 0x6a45d2b2
|
||||
.long 0x363bd6b3, 0x26f6a60a
|
||||
.long 0x135c83fd, 0xd8d26619
|
||||
.long 0x5fabe670, 0xa741c1bf
|
||||
.long 0x35ec3279, 0xde87806c
|
||||
.long 0x00bcf5f6, 0x98d8d9cb
|
||||
.long 0x8ae00689, 0x14338754
|
||||
.long 0x17f27698, 0x49c3cc9c
|
||||
.long 0x58ca5f00, 0x5bd2011f
|
||||
.long 0xaa7c7ad5, 0x68bce87a
|
||||
.long 0xb5cfca28, 0xdd07448e
|
||||
.long 0xded288f8, 0x57a3d037
|
||||
.long 0x59f229bc, 0xdde8f5b9
|
||||
.long 0x6d390dec, 0x6956fc3b
|
||||
.long 0x37170390, 0xa3e3e02c
|
||||
.long 0x6353c1cc, 0x42d98888
|
||||
.long 0xc4584f5c, 0xd73c7bea
|
||||
.long 0xf48642e9, 0x3771e98f
|
||||
.long 0x531377e2, 0x80ff0093
|
||||
.long 0xdd35bc8d, 0xb42ae3d9
|
||||
.long 0xb25b29f2, 0x8fe4c34d
|
||||
.long 0x9a5ede41, 0x2178513a
|
||||
.long 0xa563905d, 0xdf99fc11
|
||||
.long 0x45cddf4e, 0xe0ac139e
|
||||
.long 0xacfa3103, 0x6c23e841
|
||||
.long 0xa51b6135, 0x170076fa
|
||||
|
805
arch/x86/crypto/des3_ede-asm_64.S
Normal file
805
arch/x86/crypto/des3_ede-asm_64.S
Normal file
@ -0,0 +1,805 @@
|
||||
/*
|
||||
* des3_ede-asm_64.S - x86-64 assembly implementation of 3DES cipher
|
||||
*
|
||||
* Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.file "des3_ede-asm_64.S"
|
||||
.text
|
||||
|
||||
#define s1 .L_s1
|
||||
#define s2 ((s1) + (64*8))
|
||||
#define s3 ((s2) + (64*8))
|
||||
#define s4 ((s3) + (64*8))
|
||||
#define s5 ((s4) + (64*8))
|
||||
#define s6 ((s5) + (64*8))
|
||||
#define s7 ((s6) + (64*8))
|
||||
#define s8 ((s7) + (64*8))
|
||||
|
||||
/* register macros */
|
||||
#define CTX %rdi
|
||||
|
||||
#define RL0 %r8
|
||||
#define RL1 %r9
|
||||
#define RL2 %r10
|
||||
|
||||
#define RL0d %r8d
|
||||
#define RL1d %r9d
|
||||
#define RL2d %r10d
|
||||
|
||||
#define RR0 %r11
|
||||
#define RR1 %r12
|
||||
#define RR2 %r13
|
||||
|
||||
#define RR0d %r11d
|
||||
#define RR1d %r12d
|
||||
#define RR2d %r13d
|
||||
|
||||
#define RW0 %rax
|
||||
#define RW1 %rbx
|
||||
#define RW2 %rcx
|
||||
|
||||
#define RW0d %eax
|
||||
#define RW1d %ebx
|
||||
#define RW2d %ecx
|
||||
|
||||
#define RW0bl %al
|
||||
#define RW1bl %bl
|
||||
#define RW2bl %cl
|
||||
|
||||
#define RW0bh %ah
|
||||
#define RW1bh %bh
|
||||
#define RW2bh %ch
|
||||
|
||||
#define RT0 %r15
|
||||
#define RT1 %rbp
|
||||
#define RT2 %r14
|
||||
#define RT3 %rdx
|
||||
|
||||
#define RT0d %r15d
|
||||
#define RT1d %ebp
|
||||
#define RT2d %r14d
|
||||
#define RT3d %edx
|
||||
|
||||
/***********************************************************************
|
||||
* 1-way 3DES
|
||||
***********************************************************************/
|
||||
#define do_permutation(a, b, offset, mask) \
|
||||
movl a, RT0d; \
|
||||
shrl $(offset), RT0d; \
|
||||
xorl b, RT0d; \
|
||||
andl $(mask), RT0d; \
|
||||
xorl RT0d, b; \
|
||||
shll $(offset), RT0d; \
|
||||
xorl RT0d, a;
|
||||
|
||||
#define expand_to_64bits(val, mask) \
|
||||
movl val##d, RT0d; \
|
||||
rorl $4, RT0d; \
|
||||
shlq $32, RT0; \
|
||||
orq RT0, val; \
|
||||
andq mask, val;
|
||||
|
||||
#define compress_to_64bits(val) \
|
||||
movq val, RT0; \
|
||||
shrq $32, RT0; \
|
||||
roll $4, RT0d; \
|
||||
orl RT0d, val##d;
|
||||
|
||||
#define initial_permutation(left, right) \
|
||||
do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \
|
||||
do_permutation(left##d, right##d, 16, 0x0000ffff); \
|
||||
do_permutation(right##d, left##d, 2, 0x33333333); \
|
||||
do_permutation(right##d, left##d, 8, 0x00ff00ff); \
|
||||
movabs $0x3f3f3f3f3f3f3f3f, RT3; \
|
||||
movl left##d, RW0d; \
|
||||
roll $1, right##d; \
|
||||
xorl right##d, RW0d; \
|
||||
andl $0xaaaaaaaa, RW0d; \
|
||||
xorl RW0d, left##d; \
|
||||
xorl RW0d, right##d; \
|
||||
roll $1, left##d; \
|
||||
expand_to_64bits(right, RT3); \
|
||||
expand_to_64bits(left, RT3);
|
||||
|
||||
#define final_permutation(left, right) \
|
||||
compress_to_64bits(right); \
|
||||
compress_to_64bits(left); \
|
||||
movl right##d, RW0d; \
|
||||
rorl $1, left##d; \
|
||||
xorl left##d, RW0d; \
|
||||
andl $0xaaaaaaaa, RW0d; \
|
||||
xorl RW0d, right##d; \
|
||||
xorl RW0d, left##d; \
|
||||
rorl $1, right##d; \
|
||||
do_permutation(right##d, left##d, 8, 0x00ff00ff); \
|
||||
do_permutation(right##d, left##d, 2, 0x33333333); \
|
||||
do_permutation(left##d, right##d, 16, 0x0000ffff); \
|
||||
do_permutation(left##d, right##d, 4, 0x0f0f0f0f);
|
||||
|
||||
#define round1(n, from, to, load_next_key) \
|
||||
xorq from, RW0; \
|
||||
\
|
||||
movzbl RW0bl, RT0d; \
|
||||
movzbl RW0bh, RT1d; \
|
||||
shrq $16, RW0; \
|
||||
movzbl RW0bl, RT2d; \
|
||||
movzbl RW0bh, RT3d; \
|
||||
shrq $16, RW0; \
|
||||
movq s8(, RT0, 8), RT0; \
|
||||
xorq s6(, RT1, 8), to; \
|
||||
movzbl RW0bl, RL1d; \
|
||||
movzbl RW0bh, RT1d; \
|
||||
shrl $16, RW0d; \
|
||||
xorq s4(, RT2, 8), RT0; \
|
||||
xorq s2(, RT3, 8), to; \
|
||||
movzbl RW0bl, RT2d; \
|
||||
movzbl RW0bh, RT3d; \
|
||||
xorq s7(, RL1, 8), RT0; \
|
||||
xorq s5(, RT1, 8), to; \
|
||||
xorq s3(, RT2, 8), RT0; \
|
||||
load_next_key(n, RW0); \
|
||||
xorq RT0, to; \
|
||||
xorq s1(, RT3, 8), to; \
|
||||
|
||||
#define load_next_key(n, RWx) \
|
||||
movq (((n) + 1) * 8)(CTX), RWx;
|
||||
|
||||
#define dummy2(a, b) /*_*/
|
||||
|
||||
#define read_block(io, left, right) \
|
||||
movl (io), left##d; \
|
||||
movl 4(io), right##d; \
|
||||
bswapl left##d; \
|
||||
bswapl right##d;
|
||||
|
||||
#define write_block(io, left, right) \
|
||||
bswapl left##d; \
|
||||
bswapl right##d; \
|
||||
movl left##d, (io); \
|
||||
movl right##d, 4(io);
|
||||
|
||||
ENTRY(des3_ede_x86_64_crypt_blk)
|
||||
/* input:
|
||||
* %rdi: round keys, CTX
|
||||
* %rsi: dst
|
||||
* %rdx: src
|
||||
*/
|
||||
pushq %rbp;
|
||||
pushq %rbx;
|
||||
pushq %r12;
|
||||
pushq %r13;
|
||||
pushq %r14;
|
||||
pushq %r15;
|
||||
|
||||
read_block(%rdx, RL0, RR0);
|
||||
initial_permutation(RL0, RR0);
|
||||
|
||||
movq (CTX), RW0;
|
||||
|
||||
round1(0, RR0, RL0, load_next_key);
|
||||
round1(1, RL0, RR0, load_next_key);
|
||||
round1(2, RR0, RL0, load_next_key);
|
||||
round1(3, RL0, RR0, load_next_key);
|
||||
round1(4, RR0, RL0, load_next_key);
|
||||
round1(5, RL0, RR0, load_next_key);
|
||||
round1(6, RR0, RL0, load_next_key);
|
||||
round1(7, RL0, RR0, load_next_key);
|
||||
round1(8, RR0, RL0, load_next_key);
|
||||
round1(9, RL0, RR0, load_next_key);
|
||||
round1(10, RR0, RL0, load_next_key);
|
||||
round1(11, RL0, RR0, load_next_key);
|
||||
round1(12, RR0, RL0, load_next_key);
|
||||
round1(13, RL0, RR0, load_next_key);
|
||||
round1(14, RR0, RL0, load_next_key);
|
||||
round1(15, RL0, RR0, load_next_key);
|
||||
|
||||
round1(16+0, RL0, RR0, load_next_key);
|
||||
round1(16+1, RR0, RL0, load_next_key);
|
||||
round1(16+2, RL0, RR0, load_next_key);
|
||||
round1(16+3, RR0, RL0, load_next_key);
|
||||
round1(16+4, RL0, RR0, load_next_key);
|
||||
round1(16+5, RR0, RL0, load_next_key);
|
||||
round1(16+6, RL0, RR0, load_next_key);
|
||||
round1(16+7, RR0, RL0, load_next_key);
|
||||
round1(16+8, RL0, RR0, load_next_key);
|
||||
round1(16+9, RR0, RL0, load_next_key);
|
||||
round1(16+10, RL0, RR0, load_next_key);
|
||||
round1(16+11, RR0, RL0, load_next_key);
|
||||
round1(16+12, RL0, RR0, load_next_key);
|
||||
round1(16+13, RR0, RL0, load_next_key);
|
||||
round1(16+14, RL0, RR0, load_next_key);
|
||||
round1(16+15, RR0, RL0, load_next_key);
|
||||
|
||||
round1(32+0, RR0, RL0, load_next_key);
|
||||
round1(32+1, RL0, RR0, load_next_key);
|
||||
round1(32+2, RR0, RL0, load_next_key);
|
||||
round1(32+3, RL0, RR0, load_next_key);
|
||||
round1(32+4, RR0, RL0, load_next_key);
|
||||
round1(32+5, RL0, RR0, load_next_key);
|
||||
round1(32+6, RR0, RL0, load_next_key);
|
||||
round1(32+7, RL0, RR0, load_next_key);
|
||||
round1(32+8, RR0, RL0, load_next_key);
|
||||
round1(32+9, RL0, RR0, load_next_key);
|
||||
round1(32+10, RR0, RL0, load_next_key);
|
||||
round1(32+11, RL0, RR0, load_next_key);
|
||||
round1(32+12, RR0, RL0, load_next_key);
|
||||
round1(32+13, RL0, RR0, load_next_key);
|
||||
round1(32+14, RR0, RL0, load_next_key);
|
||||
round1(32+15, RL0, RR0, dummy2);
|
||||
|
||||
final_permutation(RR0, RL0);
|
||||
write_block(%rsi, RR0, RL0);
|
||||
|
||||
popq %r15;
|
||||
popq %r14;
|
||||
popq %r13;
|
||||
popq %r12;
|
||||
popq %rbx;
|
||||
popq %rbp;
|
||||
|
||||
ret;
|
||||
ENDPROC(des3_ede_x86_64_crypt_blk)
|
||||
|
||||
/***********************************************************************
|
||||
* 3-way 3DES
|
||||
***********************************************************************/
|
||||
#define expand_to_64bits(val, mask) \
|
||||
movl val##d, RT0d; \
|
||||
rorl $4, RT0d; \
|
||||
shlq $32, RT0; \
|
||||
orq RT0, val; \
|
||||
andq mask, val;
|
||||
|
||||
#define compress_to_64bits(val) \
|
||||
movq val, RT0; \
|
||||
shrq $32, RT0; \
|
||||
roll $4, RT0d; \
|
||||
orl RT0d, val##d;
|
||||
|
||||
#define initial_permutation3(left, right) \
|
||||
do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \
|
||||
do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
|
||||
do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \
|
||||
do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
|
||||
do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \
|
||||
do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
|
||||
\
|
||||
do_permutation(right##0d, left##0d, 2, 0x33333333); \
|
||||
do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \
|
||||
do_permutation(right##1d, left##1d, 2, 0x33333333); \
|
||||
do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \
|
||||
do_permutation(right##2d, left##2d, 2, 0x33333333); \
|
||||
do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \
|
||||
\
|
||||
movabs $0x3f3f3f3f3f3f3f3f, RT3; \
|
||||
\
|
||||
movl left##0d, RW0d; \
|
||||
roll $1, right##0d; \
|
||||
xorl right##0d, RW0d; \
|
||||
andl $0xaaaaaaaa, RW0d; \
|
||||
xorl RW0d, left##0d; \
|
||||
xorl RW0d, right##0d; \
|
||||
roll $1, left##0d; \
|
||||
expand_to_64bits(right##0, RT3); \
|
||||
expand_to_64bits(left##0, RT3); \
|
||||
movl left##1d, RW1d; \
|
||||
roll $1, right##1d; \
|
||||
xorl right##1d, RW1d; \
|
||||
andl $0xaaaaaaaa, RW1d; \
|
||||
xorl RW1d, left##1d; \
|
||||
xorl RW1d, right##1d; \
|
||||
roll $1, left##1d; \
|
||||
expand_to_64bits(right##1, RT3); \
|
||||
expand_to_64bits(left##1, RT3); \
|
||||
movl left##2d, RW2d; \
|
||||
roll $1, right##2d; \
|
||||
xorl right##2d, RW2d; \
|
||||
andl $0xaaaaaaaa, RW2d; \
|
||||
xorl RW2d, left##2d; \
|
||||
xorl RW2d, right##2d; \
|
||||
roll $1, left##2d; \
|
||||
expand_to_64bits(right##2, RT3); \
|
||||
expand_to_64bits(left##2, RT3);
|
||||
|
||||
#define final_permutation3(left, right) \
|
||||
compress_to_64bits(right##0); \
|
||||
compress_to_64bits(left##0); \
|
||||
movl right##0d, RW0d; \
|
||||
rorl $1, left##0d; \
|
||||
xorl left##0d, RW0d; \
|
||||
andl $0xaaaaaaaa, RW0d; \
|
||||
xorl RW0d, right##0d; \
|
||||
xorl RW0d, left##0d; \
|
||||
rorl $1, right##0d; \
|
||||
compress_to_64bits(right##1); \
|
||||
compress_to_64bits(left##1); \
|
||||
movl right##1d, RW1d; \
|
||||
rorl $1, left##1d; \
|
||||
xorl left##1d, RW1d; \
|
||||
andl $0xaaaaaaaa, RW1d; \
|
||||
xorl RW1d, right##1d; \
|
||||
xorl RW1d, left##1d; \
|
||||
rorl $1, right##1d; \
|
||||
compress_to_64bits(right##2); \
|
||||
compress_to_64bits(left##2); \
|
||||
movl right##2d, RW2d; \
|
||||
rorl $1, left##2d; \
|
||||
xorl left##2d, RW2d; \
|
||||
andl $0xaaaaaaaa, RW2d; \
|
||||
xorl RW2d, right##2d; \
|
||||
xorl RW2d, left##2d; \
|
||||
rorl $1, right##2d; \
|
||||
\
|
||||
do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \
|
||||
do_permutation(right##0d, left##0d, 2, 0x33333333); \
|
||||
do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \
|
||||
do_permutation(right##1d, left##1d, 2, 0x33333333); \
|
||||
do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \
|
||||
do_permutation(right##2d, left##2d, 2, 0x33333333); \
|
||||
\
|
||||
do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
|
||||
do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \
|
||||
do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
|
||||
do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \
|
||||
do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
|
||||
do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f);
|
||||
|
||||
#define round3(n, from, to, load_next_key, do_movq) \
|
||||
xorq from##0, RW0; \
|
||||
movzbl RW0bl, RT3d; \
|
||||
movzbl RW0bh, RT1d; \
|
||||
shrq $16, RW0; \
|
||||
xorq s8(, RT3, 8), to##0; \
|
||||
xorq s6(, RT1, 8), to##0; \
|
||||
movzbl RW0bl, RT3d; \
|
||||
movzbl RW0bh, RT1d; \
|
||||
shrq $16, RW0; \
|
||||
xorq s4(, RT3, 8), to##0; \
|
||||
xorq s2(, RT1, 8), to##0; \
|
||||
movzbl RW0bl, RT3d; \
|
||||
movzbl RW0bh, RT1d; \
|
||||
shrl $16, RW0d; \
|
||||
xorq s7(, RT3, 8), to##0; \
|
||||
xorq s5(, RT1, 8), to##0; \
|
||||
movzbl RW0bl, RT3d; \
|
||||
movzbl RW0bh, RT1d; \
|
||||
load_next_key(n, RW0); \
|
||||
xorq s3(, RT3, 8), to##0; \
|
||||
xorq s1(, RT1, 8), to##0; \
|
||||
xorq from##1, RW1; \
|
||||
movzbl RW1bl, RT3d; \
|
||||
movzbl RW1bh, RT1d; \
|
||||
shrq $16, RW1; \
|
||||
xorq s8(, RT3, 8), to##1; \
|
||||
xorq s6(, RT1, 8), to##1; \
|
||||
movzbl RW1bl, RT3d; \
|
||||
movzbl RW1bh, RT1d; \
|
||||
shrq $16, RW1; \
|
||||
xorq s4(, RT3, 8), to##1; \
|
||||
xorq s2(, RT1, 8), to##1; \
|
||||
movzbl RW1bl, RT3d; \
|
||||
movzbl RW1bh, RT1d; \
|
||||
shrl $16, RW1d; \
|
||||
xorq s7(, RT3, 8), to##1; \
|
||||
xorq s5(, RT1, 8), to##1; \
|
||||
movzbl RW1bl, RT3d; \
|
||||
movzbl RW1bh, RT1d; \
|
||||
do_movq(RW0, RW1); \
|
||||
xorq s3(, RT3, 8), to##1; \
|
||||
xorq s1(, RT1, 8), to##1; \
|
||||
xorq from##2, RW2; \
|
||||
movzbl RW2bl, RT3d; \
|
||||
movzbl RW2bh, RT1d; \
|
||||
shrq $16, RW2; \
|
||||
xorq s8(, RT3, 8), to##2; \
|
||||
xorq s6(, RT1, 8), to##2; \
|
||||
movzbl RW2bl, RT3d; \
|
||||
movzbl RW2bh, RT1d; \
|
||||
shrq $16, RW2; \
|
||||
xorq s4(, RT3, 8), to##2; \
|
||||
xorq s2(, RT1, 8), to##2; \
|
||||
movzbl RW2bl, RT3d; \
|
||||
movzbl RW2bh, RT1d; \
|
||||
shrl $16, RW2d; \
|
||||
xorq s7(, RT3, 8), to##2; \
|
||||
xorq s5(, RT1, 8), to##2; \
|
||||
movzbl RW2bl, RT3d; \
|
||||
movzbl RW2bh, RT1d; \
|
||||
do_movq(RW0, RW2); \
|
||||
xorq s3(, RT3, 8), to##2; \
|
||||
xorq s1(, RT1, 8), to##2;
|
||||
|
||||
#define __movq(src, dst) \
|
||||
movq src, dst;
|
||||
|
||||
ENTRY(des3_ede_x86_64_crypt_blk_3way)
|
||||
/* input:
|
||||
* %rdi: ctx, round keys
|
||||
* %rsi: dst (3 blocks)
|
||||
* %rdx: src (3 blocks)
|
||||
*/
|
||||
|
||||
pushq %rbp;
|
||||
pushq %rbx;
|
||||
pushq %r12;
|
||||
pushq %r13;
|
||||
pushq %r14;
|
||||
pushq %r15;
|
||||
|
||||
/* load input */
|
||||
movl 0 * 4(%rdx), RL0d;
|
||||
movl 1 * 4(%rdx), RR0d;
|
||||
movl 2 * 4(%rdx), RL1d;
|
||||
movl 3 * 4(%rdx), RR1d;
|
||||
movl 4 * 4(%rdx), RL2d;
|
||||
movl 5 * 4(%rdx), RR2d;
|
||||
|
||||
bswapl RL0d;
|
||||
bswapl RR0d;
|
||||
bswapl RL1d;
|
||||
bswapl RR1d;
|
||||
bswapl RL2d;
|
||||
bswapl RR2d;
|
||||
|
||||
initial_permutation3(RL, RR);
|
||||
|
||||
movq 0(CTX), RW0;
|
||||
movq RW0, RW1;
|
||||
movq RW0, RW2;
|
||||
|
||||
round3(0, RR, RL, load_next_key, __movq);
|
||||
round3(1, RL, RR, load_next_key, __movq);
|
||||
round3(2, RR, RL, load_next_key, __movq);
|
||||
round3(3, RL, RR, load_next_key, __movq);
|
||||
round3(4, RR, RL, load_next_key, __movq);
|
||||
round3(5, RL, RR, load_next_key, __movq);
|
||||
round3(6, RR, RL, load_next_key, __movq);
|
||||
round3(7, RL, RR, load_next_key, __movq);
|
||||
round3(8, RR, RL, load_next_key, __movq);
|
||||
round3(9, RL, RR, load_next_key, __movq);
|
||||
round3(10, RR, RL, load_next_key, __movq);
|
||||
round3(11, RL, RR, load_next_key, __movq);
|
||||
round3(12, RR, RL, load_next_key, __movq);
|
||||
round3(13, RL, RR, load_next_key, __movq);
|
||||
round3(14, RR, RL, load_next_key, __movq);
|
||||
round3(15, RL, RR, load_next_key, __movq);
|
||||
|
||||
round3(16+0, RL, RR, load_next_key, __movq);
|
||||
round3(16+1, RR, RL, load_next_key, __movq);
|
||||
round3(16+2, RL, RR, load_next_key, __movq);
|
||||
round3(16+3, RR, RL, load_next_key, __movq);
|
||||
round3(16+4, RL, RR, load_next_key, __movq);
|
||||
round3(16+5, RR, RL, load_next_key, __movq);
|
||||
round3(16+6, RL, RR, load_next_key, __movq);
|
||||
round3(16+7, RR, RL, load_next_key, __movq);
|
||||
round3(16+8, RL, RR, load_next_key, __movq);
|
||||
round3(16+9, RR, RL, load_next_key, __movq);
|
||||
round3(16+10, RL, RR, load_next_key, __movq);
|
||||
round3(16+11, RR, RL, load_next_key, __movq);
|
||||
round3(16+12, RL, RR, load_next_key, __movq);
|
||||
round3(16+13, RR, RL, load_next_key, __movq);
|
||||
round3(16+14, RL, RR, load_next_key, __movq);
|
||||
round3(16+15, RR, RL, load_next_key, __movq);
|
||||
|
||||
round3(32+0, RR, RL, load_next_key, __movq);
|
||||
round3(32+1, RL, RR, load_next_key, __movq);
|
||||
round3(32+2, RR, RL, load_next_key, __movq);
|
||||
round3(32+3, RL, RR, load_next_key, __movq);
|
||||
round3(32+4, RR, RL, load_next_key, __movq);
|
||||
round3(32+5, RL, RR, load_next_key, __movq);
|
||||
round3(32+6, RR, RL, load_next_key, __movq);
|
||||
round3(32+7, RL, RR, load_next_key, __movq);
|
||||
round3(32+8, RR, RL, load_next_key, __movq);
|
||||
round3(32+9, RL, RR, load_next_key, __movq);
|
||||
round3(32+10, RR, RL, load_next_key, __movq);
|
||||
round3(32+11, RL, RR, load_next_key, __movq);
|
||||
round3(32+12, RR, RL, load_next_key, __movq);
|
||||
round3(32+13, RL, RR, load_next_key, __movq);
|
||||
round3(32+14, RR, RL, load_next_key, __movq);
|
||||
round3(32+15, RL, RR, dummy2, dummy2);
|
||||
|
||||
final_permutation3(RR, RL);
|
||||
|
||||
bswapl RR0d;
|
||||
bswapl RL0d;
|
||||
bswapl RR1d;
|
||||
bswapl RL1d;
|
||||
bswapl RR2d;
|
||||
bswapl RL2d;
|
||||
|
||||
movl RR0d, 0 * 4(%rsi);
|
||||
movl RL0d, 1 * 4(%rsi);
|
||||
movl RR1d, 2 * 4(%rsi);
|
||||
movl RL1d, 3 * 4(%rsi);
|
||||
movl RR2d, 4 * 4(%rsi);
|
||||
movl RL2d, 5 * 4(%rsi);
|
||||
|
||||
popq %r15;
|
||||
popq %r14;
|
||||
popq %r13;
|
||||
popq %r12;
|
||||
popq %rbx;
|
||||
popq %rbp;
|
||||
|
||||
ret;
|
||||
ENDPROC(des3_ede_x86_64_crypt_blk_3way)
|
||||
|
||||
.data
|
||||
.align 16
|
||||
.L_s1:
|
||||
.quad 0x0010100001010400, 0x0000000000000000
|
||||
.quad 0x0000100000010000, 0x0010100001010404
|
||||
.quad 0x0010100001010004, 0x0000100000010404
|
||||
.quad 0x0000000000000004, 0x0000100000010000
|
||||
.quad 0x0000000000000400, 0x0010100001010400
|
||||
.quad 0x0010100001010404, 0x0000000000000400
|
||||
.quad 0x0010000001000404, 0x0010100001010004
|
||||
.quad 0x0010000001000000, 0x0000000000000004
|
||||
.quad 0x0000000000000404, 0x0010000001000400
|
||||
.quad 0x0010000001000400, 0x0000100000010400
|
||||
.quad 0x0000100000010400, 0x0010100001010000
|
||||
.quad 0x0010100001010000, 0x0010000001000404
|
||||
.quad 0x0000100000010004, 0x0010000001000004
|
||||
.quad 0x0010000001000004, 0x0000100000010004
|
||||
.quad 0x0000000000000000, 0x0000000000000404
|
||||
.quad 0x0000100000010404, 0x0010000001000000
|
||||
.quad 0x0000100000010000, 0x0010100001010404
|
||||
.quad 0x0000000000000004, 0x0010100001010000
|
||||
.quad 0x0010100001010400, 0x0010000001000000
|
||||
.quad 0x0010000001000000, 0x0000000000000400
|
||||
.quad 0x0010100001010004, 0x0000100000010000
|
||||
.quad 0x0000100000010400, 0x0010000001000004
|
||||
.quad 0x0000000000000400, 0x0000000000000004
|
||||
.quad 0x0010000001000404, 0x0000100000010404
|
||||
.quad 0x0010100001010404, 0x0000100000010004
|
||||
.quad 0x0010100001010000, 0x0010000001000404
|
||||
.quad 0x0010000001000004, 0x0000000000000404
|
||||
.quad 0x0000100000010404, 0x0010100001010400
|
||||
.quad 0x0000000000000404, 0x0010000001000400
|
||||
.quad 0x0010000001000400, 0x0000000000000000
|
||||
.quad 0x0000100000010004, 0x0000100000010400
|
||||
.quad 0x0000000000000000, 0x0010100001010004
|
||||
.L_s2:
|
||||
.quad 0x0801080200100020, 0x0800080000000000
|
||||
.quad 0x0000080000000000, 0x0001080200100020
|
||||
.quad 0x0001000000100000, 0x0000000200000020
|
||||
.quad 0x0801000200100020, 0x0800080200000020
|
||||
.quad 0x0800000200000020, 0x0801080200100020
|
||||
.quad 0x0801080000100000, 0x0800000000000000
|
||||
.quad 0x0800080000000000, 0x0001000000100000
|
||||
.quad 0x0000000200000020, 0x0801000200100020
|
||||
.quad 0x0001080000100000, 0x0001000200100020
|
||||
.quad 0x0800080200000020, 0x0000000000000000
|
||||
.quad 0x0800000000000000, 0x0000080000000000
|
||||
.quad 0x0001080200100020, 0x0801000000100000
|
||||
.quad 0x0001000200100020, 0x0800000200000020
|
||||
.quad 0x0000000000000000, 0x0001080000100000
|
||||
.quad 0x0000080200000020, 0x0801080000100000
|
||||
.quad 0x0801000000100000, 0x0000080200000020
|
||||
.quad 0x0000000000000000, 0x0001080200100020
|
||||
.quad 0x0801000200100020, 0x0001000000100000
|
||||
.quad 0x0800080200000020, 0x0801000000100000
|
||||
.quad 0x0801080000100000, 0x0000080000000000
|
||||
.quad 0x0801000000100000, 0x0800080000000000
|
||||
.quad 0x0000000200000020, 0x0801080200100020
|
||||
.quad 0x0001080200100020, 0x0000000200000020
|
||||
.quad 0x0000080000000000, 0x0800000000000000
|
||||
.quad 0x0000080200000020, 0x0801080000100000
|
||||
.quad 0x0001000000100000, 0x0800000200000020
|
||||
.quad 0x0001000200100020, 0x0800080200000020
|
||||
.quad 0x0800000200000020, 0x0001000200100020
|
||||
.quad 0x0001080000100000, 0x0000000000000000
|
||||
.quad 0x0800080000000000, 0x0000080200000020
|
||||
.quad 0x0800000000000000, 0x0801000200100020
|
||||
.quad 0x0801080200100020, 0x0001080000100000
|
||||
.L_s3:
|
||||
.quad 0x0000002000000208, 0x0000202008020200
|
||||
.quad 0x0000000000000000, 0x0000200008020008
|
||||
.quad 0x0000002008000200, 0x0000000000000000
|
||||
.quad 0x0000202000020208, 0x0000002008000200
|
||||
.quad 0x0000200000020008, 0x0000000008000008
|
||||
.quad 0x0000000008000008, 0x0000200000020000
|
||||
.quad 0x0000202008020208, 0x0000200000020008
|
||||
.quad 0x0000200008020000, 0x0000002000000208
|
||||
.quad 0x0000000008000000, 0x0000000000000008
|
||||
.quad 0x0000202008020200, 0x0000002000000200
|
||||
.quad 0x0000202000020200, 0x0000200008020000
|
||||
.quad 0x0000200008020008, 0x0000202000020208
|
||||
.quad 0x0000002008000208, 0x0000202000020200
|
||||
.quad 0x0000200000020000, 0x0000002008000208
|
||||
.quad 0x0000000000000008, 0x0000202008020208
|
||||
.quad 0x0000002000000200, 0x0000000008000000
|
||||
.quad 0x0000202008020200, 0x0000000008000000
|
||||
.quad 0x0000200000020008, 0x0000002000000208
|
||||
.quad 0x0000200000020000, 0x0000202008020200
|
||||
.quad 0x0000002008000200, 0x0000000000000000
|
||||
.quad 0x0000002000000200, 0x0000200000020008
|
||||
.quad 0x0000202008020208, 0x0000002008000200
|
||||
.quad 0x0000000008000008, 0x0000002000000200
|
||||
.quad 0x0000000000000000, 0x0000200008020008
|
||||
.quad 0x0000002008000208, 0x0000200000020000
|
||||
.quad 0x0000000008000000, 0x0000202008020208
|
||||
.quad 0x0000000000000008, 0x0000202000020208
|
||||
.quad 0x0000202000020200, 0x0000000008000008
|
||||
.quad 0x0000200008020000, 0x0000002008000208
|
||||
.quad 0x0000002000000208, 0x0000200008020000
|
||||
.quad 0x0000202000020208, 0x0000000000000008
|
||||
.quad 0x0000200008020008, 0x0000202000020200
|
||||
.L_s4:
|
||||
.quad 0x1008020000002001, 0x1000020800002001
|
||||
.quad 0x1000020800002001, 0x0000000800000000
|
||||
.quad 0x0008020800002000, 0x1008000800000001
|
||||
.quad 0x1008000000000001, 0x1000020000002001
|
||||
.quad 0x0000000000000000, 0x0008020000002000
|
||||
.quad 0x0008020000002000, 0x1008020800002001
|
||||
.quad 0x1000000800000001, 0x0000000000000000
|
||||
.quad 0x0008000800000000, 0x1008000000000001
|
||||
.quad 0x1000000000000001, 0x0000020000002000
|
||||
.quad 0x0008000000000000, 0x1008020000002001
|
||||
.quad 0x0000000800000000, 0x0008000000000000
|
||||
.quad 0x1000020000002001, 0x0000020800002000
|
||||
.quad 0x1008000800000001, 0x1000000000000001
|
||||
.quad 0x0000020800002000, 0x0008000800000000
|
||||
.quad 0x0000020000002000, 0x0008020800002000
|
||||
.quad 0x1008020800002001, 0x1000000800000001
|
||||
.quad 0x0008000800000000, 0x1008000000000001
|
||||
.quad 0x0008020000002000, 0x1008020800002001
|
||||
.quad 0x1000000800000001, 0x0000000000000000
|
||||
.quad 0x0000000000000000, 0x0008020000002000
|
||||
.quad 0x0000020800002000, 0x0008000800000000
|
||||
.quad 0x1008000800000001, 0x1000000000000001
|
||||
.quad 0x1008020000002001, 0x1000020800002001
|
||||
.quad 0x1000020800002001, 0x0000000800000000
|
||||
.quad 0x1008020800002001, 0x1000000800000001
|
||||
.quad 0x1000000000000001, 0x0000020000002000
|
||||
.quad 0x1008000000000001, 0x1000020000002001
|
||||
.quad 0x0008020800002000, 0x1008000800000001
|
||||
.quad 0x1000020000002001, 0x0000020800002000
|
||||
.quad 0x0008000000000000, 0x1008020000002001
|
||||
.quad 0x0000000800000000, 0x0008000000000000
|
||||
.quad 0x0000020000002000, 0x0008020800002000
|
||||
.L_s5:
|
||||
.quad 0x0000001000000100, 0x0020001002080100
|
||||
.quad 0x0020000002080000, 0x0420001002000100
|
||||
.quad 0x0000000000080000, 0x0000001000000100
|
||||
.quad 0x0400000000000000, 0x0020000002080000
|
||||
.quad 0x0400001000080100, 0x0000000000080000
|
||||
.quad 0x0020001002000100, 0x0400001000080100
|
||||
.quad 0x0420001002000100, 0x0420000002080000
|
||||
.quad 0x0000001000080100, 0x0400000000000000
|
||||
.quad 0x0020000002000000, 0x0400000000080000
|
||||
.quad 0x0400000000080000, 0x0000000000000000
|
||||
.quad 0x0400001000000100, 0x0420001002080100
|
||||
.quad 0x0420001002080100, 0x0020001002000100
|
||||
.quad 0x0420000002080000, 0x0400001000000100
|
||||
.quad 0x0000000000000000, 0x0420000002000000
|
||||
.quad 0x0020001002080100, 0x0020000002000000
|
||||
.quad 0x0420000002000000, 0x0000001000080100
|
||||
.quad 0x0000000000080000, 0x0420001002000100
|
||||
.quad 0x0000001000000100, 0x0020000002000000
|
||||
.quad 0x0400000000000000, 0x0020000002080000
|
||||
.quad 0x0420001002000100, 0x0400001000080100
|
||||
.quad 0x0020001002000100, 0x0400000000000000
|
||||
.quad 0x0420000002080000, 0x0020001002080100
|
||||
.quad 0x0400001000080100, 0x0000001000000100
|
||||
.quad 0x0020000002000000, 0x0420000002080000
|
||||
.quad 0x0420001002080100, 0x0000001000080100
|
||||
.quad 0x0420000002000000, 0x0420001002080100
|
||||
.quad 0x0020000002080000, 0x0000000000000000
|
||||
.quad 0x0400000000080000, 0x0420000002000000
|
||||
.quad 0x0000001000080100, 0x0020001002000100
|
||||
.quad 0x0400001000000100, 0x0000000000080000
|
||||
.quad 0x0000000000000000, 0x0400000000080000
|
||||
.quad 0x0020001002080100, 0x0400001000000100
|
||||
.L_s6:
|
||||
.quad 0x0200000120000010, 0x0204000020000000
|
||||
.quad 0x0000040000000000, 0x0204040120000010
|
||||
.quad 0x0204000020000000, 0x0000000100000010
|
||||
.quad 0x0204040120000010, 0x0004000000000000
|
||||
.quad 0x0200040020000000, 0x0004040100000010
|
||||
.quad 0x0004000000000000, 0x0200000120000010
|
||||
.quad 0x0004000100000010, 0x0200040020000000
|
||||
.quad 0x0200000020000000, 0x0000040100000010
|
||||
.quad 0x0000000000000000, 0x0004000100000010
|
||||
.quad 0x0200040120000010, 0x0000040000000000
|
||||
.quad 0x0004040000000000, 0x0200040120000010
|
||||
.quad 0x0000000100000010, 0x0204000120000010
|
||||
.quad 0x0204000120000010, 0x0000000000000000
|
||||
.quad 0x0004040100000010, 0x0204040020000000
|
||||
.quad 0x0000040100000010, 0x0004040000000000
|
||||
.quad 0x0204040020000000, 0x0200000020000000
|
||||
.quad 0x0200040020000000, 0x0000000100000010
|
||||
.quad 0x0204000120000010, 0x0004040000000000
|
||||
.quad 0x0204040120000010, 0x0004000000000000
|
||||
.quad 0x0000040100000010, 0x0200000120000010
|
||||
.quad 0x0004000000000000, 0x0200040020000000
|
||||
.quad 0x0200000020000000, 0x0000040100000010
|
||||
.quad 0x0200000120000010, 0x0204040120000010
|
||||
.quad 0x0004040000000000, 0x0204000020000000
|
||||
.quad 0x0004040100000010, 0x0204040020000000
|
||||
.quad 0x0000000000000000, 0x0204000120000010
|
||||
.quad 0x0000000100000010, 0x0000040000000000
|
||||
.quad 0x0204000020000000, 0x0004040100000010
|
||||
.quad 0x0000040000000000, 0x0004000100000010
|
||||
.quad 0x0200040120000010, 0x0000000000000000
|
||||
.quad 0x0204040020000000, 0x0200000020000000
|
||||
.quad 0x0004000100000010, 0x0200040120000010
|
||||
.L_s7:
|
||||
.quad 0x0002000000200000, 0x2002000004200002
|
||||
.quad 0x2000000004000802, 0x0000000000000000
|
||||
.quad 0x0000000000000800, 0x2000000004000802
|
||||
.quad 0x2002000000200802, 0x0002000004200800
|
||||
.quad 0x2002000004200802, 0x0002000000200000
|
||||
.quad 0x0000000000000000, 0x2000000004000002
|
||||
.quad 0x2000000000000002, 0x0000000004000000
|
||||
.quad 0x2002000004200002, 0x2000000000000802
|
||||
.quad 0x0000000004000800, 0x2002000000200802
|
||||
.quad 0x2002000000200002, 0x0000000004000800
|
||||
.quad 0x2000000004000002, 0x0002000004200000
|
||||
.quad 0x0002000004200800, 0x2002000000200002
|
||||
.quad 0x0002000004200000, 0x0000000000000800
|
||||
.quad 0x2000000000000802, 0x2002000004200802
|
||||
.quad 0x0002000000200800, 0x2000000000000002
|
||||
.quad 0x0000000004000000, 0x0002000000200800
|
||||
.quad 0x0000000004000000, 0x0002000000200800
|
||||
.quad 0x0002000000200000, 0x2000000004000802
|
||||
.quad 0x2000000004000802, 0x2002000004200002
|
||||
.quad 0x2002000004200002, 0x2000000000000002
|
||||
.quad 0x2002000000200002, 0x0000000004000000
|
||||
.quad 0x0000000004000800, 0x0002000000200000
|
||||
.quad 0x0002000004200800, 0x2000000000000802
|
||||
.quad 0x2002000000200802, 0x0002000004200800
|
||||
.quad 0x2000000000000802, 0x2000000004000002
|
||||
.quad 0x2002000004200802, 0x0002000004200000
|
||||
.quad 0x0002000000200800, 0x0000000000000000
|
||||
.quad 0x2000000000000002, 0x2002000004200802
|
||||
.quad 0x0000000000000000, 0x2002000000200802
|
||||
.quad 0x0002000004200000, 0x0000000000000800
|
||||
.quad 0x2000000004000002, 0x0000000004000800
|
||||
.quad 0x0000000000000800, 0x2002000000200002
|
||||
.L_s8:
|
||||
.quad 0x0100010410001000, 0x0000010000001000
|
||||
.quad 0x0000000000040000, 0x0100010410041000
|
||||
.quad 0x0100000010000000, 0x0100010410001000
|
||||
.quad 0x0000000400000000, 0x0100000010000000
|
||||
.quad 0x0000000400040000, 0x0100000010040000
|
||||
.quad 0x0100010410041000, 0x0000010000041000
|
||||
.quad 0x0100010010041000, 0x0000010400041000
|
||||
.quad 0x0000010000001000, 0x0000000400000000
|
||||
.quad 0x0100000010040000, 0x0100000410000000
|
||||
.quad 0x0100010010001000, 0x0000010400001000
|
||||
.quad 0x0000010000041000, 0x0000000400040000
|
||||
.quad 0x0100000410040000, 0x0100010010041000
|
||||
.quad 0x0000010400001000, 0x0000000000000000
|
||||
.quad 0x0000000000000000, 0x0100000410040000
|
||||
.quad 0x0100000410000000, 0x0100010010001000
|
||||
.quad 0x0000010400041000, 0x0000000000040000
|
||||
.quad 0x0000010400041000, 0x0000000000040000
|
||||
.quad 0x0100010010041000, 0x0000010000001000
|
||||
.quad 0x0000000400000000, 0x0100000410040000
|
||||
.quad 0x0000010000001000, 0x0000010400041000
|
||||
.quad 0x0100010010001000, 0x0000000400000000
|
||||
.quad 0x0100000410000000, 0x0100000010040000
|
||||
.quad 0x0100000410040000, 0x0100000010000000
|
||||
.quad 0x0000000000040000, 0x0100010410001000
|
||||
.quad 0x0000000000000000, 0x0100010410041000
|
||||
.quad 0x0000000400040000, 0x0100000410000000
|
||||
.quad 0x0100000010040000, 0x0100010010001000
|
||||
.quad 0x0100010410001000, 0x0000000000000000
|
||||
.quad 0x0100010410041000, 0x0000010000041000
|
||||
.quad 0x0000010000041000, 0x0000010400001000
|
||||
.quad 0x0000010400001000, 0x0000000400040000
|
||||
.quad 0x0100000010000000, 0x0100010010041000
|
509
arch/x86/crypto/des3_ede_glue.c
Normal file
509
arch/x86/crypto/des3_ede_glue.c
Normal file
@ -0,0 +1,509 @@
|
||||
/*
|
||||
* Glue Code for assembler optimized version of 3DES
|
||||
*
|
||||
* Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
|
||||
*
|
||||
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
|
||||
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
* CTR part based on code (crypto/ctr.c) by:
|
||||
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <crypto/des.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
|
||||
struct des3_ede_x86_ctx {
|
||||
u32 enc_expkey[DES3_EDE_EXPKEY_WORDS];
|
||||
u32 dec_expkey[DES3_EDE_EXPKEY_WORDS];
|
||||
};
|
||||
|
||||
/* regular block cipher functions */
|
||||
asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst,
|
||||
const u8 *src);
|
||||
|
||||
/* 3-way parallel cipher functions */
|
||||
asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst,
|
||||
const u8 *src);
|
||||
|
||||
static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
|
||||
const u8 *src)
|
||||
{
|
||||
u32 *enc_ctx = ctx->enc_expkey;
|
||||
|
||||
des3_ede_x86_64_crypt_blk(enc_ctx, dst, src);
|
||||
}
|
||||
|
||||
static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
|
||||
const u8 *src)
|
||||
{
|
||||
u32 *dec_ctx = ctx->dec_expkey;
|
||||
|
||||
des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
|
||||
}
|
||||
|
||||
static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
|
||||
const u8 *src)
|
||||
{
|
||||
u32 *enc_ctx = ctx->enc_expkey;
|
||||
|
||||
des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src);
|
||||
}
|
||||
|
||||
static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
|
||||
const u8 *src)
|
||||
{
|
||||
u32 *dec_ctx = ctx->dec_expkey;
|
||||
|
||||
des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src);
|
||||
}
|
||||
|
||||
static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src);
|
||||
}
|
||||
|
||||
static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
|
||||
}
|
||||
|
||||
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
|
||||
const u32 *expkey)
|
||||
{
|
||||
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
unsigned int nbytes;
|
||||
int err;
|
||||
|
||||
err = blkcipher_walk_virt(desc, walk);
|
||||
|
||||
while ((nbytes = walk->nbytes)) {
|
||||
u8 *wsrc = walk->src.virt.addr;
|
||||
u8 *wdst = walk->dst.virt.addr;
|
||||
|
||||
/* Process four block batch */
|
||||
if (nbytes >= bsize * 3) {
|
||||
do {
|
||||
des3_ede_x86_64_crypt_blk_3way(expkey, wdst,
|
||||
wsrc);
|
||||
|
||||
wsrc += bsize * 3;
|
||||
wdst += bsize * 3;
|
||||
nbytes -= bsize * 3;
|
||||
} while (nbytes >= bsize * 3);
|
||||
|
||||
if (nbytes < bsize)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Handle leftovers */
|
||||
do {
|
||||
des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc);
|
||||
|
||||
wsrc += bsize;
|
||||
wdst += bsize;
|
||||
nbytes -= bsize;
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
done:
|
||||
err = blkcipher_walk_done(desc, walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_crypt(desc, &walk, ctx->enc_expkey);
|
||||
}
|
||||
|
||||
static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
struct blkcipher_walk walk;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
return ecb_crypt(desc, &walk, ctx->dec_expkey);
|
||||
}
|
||||
|
||||
static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
u64 *dst = (u64 *)walk->dst.virt.addr;
|
||||
u64 *iv = (u64 *)walk->iv;
|
||||
|
||||
do {
|
||||
*dst = *src ^ *iv;
|
||||
des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
|
||||
iv = dst;
|
||||
|
||||
src += 1;
|
||||
dst += 1;
|
||||
nbytes -= bsize;
|
||||
} while (nbytes >= bsize);
|
||||
|
||||
*(u64 *)walk->iv = *iv;
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
nbytes = __cbc_encrypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
u64 *src = (u64 *)walk->src.virt.addr;
|
||||
u64 *dst = (u64 *)walk->dst.virt.addr;
|
||||
u64 ivs[3 - 1];
|
||||
u64 last_iv;
|
||||
|
||||
/* Start of the last block. */
|
||||
src += nbytes / bsize - 1;
|
||||
dst += nbytes / bsize - 1;
|
||||
|
||||
last_iv = *src;
|
||||
|
||||
/* Process four block batch */
|
||||
if (nbytes >= bsize * 3) {
|
||||
do {
|
||||
nbytes -= bsize * 3 - bsize;
|
||||
src -= 3 - 1;
|
||||
dst -= 3 - 1;
|
||||
|
||||
ivs[0] = src[0];
|
||||
ivs[1] = src[1];
|
||||
|
||||
des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
|
||||
|
||||
dst[1] ^= ivs[0];
|
||||
dst[2] ^= ivs[1];
|
||||
|
||||
nbytes -= bsize;
|
||||
if (nbytes < bsize)
|
||||
goto done;
|
||||
|
||||
*dst ^= *(src - 1);
|
||||
src -= 1;
|
||||
dst -= 1;
|
||||
} while (nbytes >= bsize * 3);
|
||||
}
|
||||
|
||||
/* Handle leftovers */
|
||||
for (;;) {
|
||||
des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src);
|
||||
|
||||
nbytes -= bsize;
|
||||
if (nbytes < bsize)
|
||||
break;
|
||||
|
||||
*dst ^= *(src - 1);
|
||||
src -= 1;
|
||||
dst -= 1;
|
||||
}
|
||||
|
||||
done:
|
||||
*dst ^= *(u64 *)walk->iv;
|
||||
*(u64 *)walk->iv = last_iv;
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
nbytes = __cbc_decrypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
u8 *ctrblk = walk->iv;
|
||||
u8 keystream[DES3_EDE_BLOCK_SIZE];
|
||||
u8 *src = walk->src.virt.addr;
|
||||
u8 *dst = walk->dst.virt.addr;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
|
||||
des3_ede_enc_blk(ctx, keystream, ctrblk);
|
||||
crypto_xor(keystream, src, nbytes);
|
||||
memcpy(dst, keystream, nbytes);
|
||||
|
||||
crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
|
||||
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
|
||||
unsigned int nbytes = walk->nbytes;
|
||||
__be64 *src = (__be64 *)walk->src.virt.addr;
|
||||
__be64 *dst = (__be64 *)walk->dst.virt.addr;
|
||||
u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
|
||||
__be64 ctrblocks[3];
|
||||
|
||||
/* Process four block batch */
|
||||
if (nbytes >= bsize * 3) {
|
||||
do {
|
||||
/* create ctrblks for parallel encrypt */
|
||||
ctrblocks[0] = cpu_to_be64(ctrblk++);
|
||||
ctrblocks[1] = cpu_to_be64(ctrblk++);
|
||||
ctrblocks[2] = cpu_to_be64(ctrblk++);
|
||||
|
||||
des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks,
|
||||
(u8 *)ctrblocks);
|
||||
|
||||
dst[0] = src[0] ^ ctrblocks[0];
|
||||
dst[1] = src[1] ^ ctrblocks[1];
|
||||
dst[2] = src[2] ^ ctrblocks[2];
|
||||
|
||||
src += 3;
|
||||
dst += 3;
|
||||
} while ((nbytes -= bsize * 3) >= bsize * 3);
|
||||
|
||||
if (nbytes < bsize)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Handle leftovers */
|
||||
do {
|
||||
ctrblocks[0] = cpu_to_be64(ctrblk++);
|
||||
|
||||
des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
|
||||
|
||||
dst[0] = src[0] ^ ctrblocks[0];
|
||||
|
||||
src += 1;
|
||||
dst += 1;
|
||||
} while ((nbytes -= bsize) >= bsize);
|
||||
|
||||
done:
|
||||
*(__be64 *)walk->iv = cpu_to_be64(ctrblk);
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE);
|
||||
|
||||
while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
|
||||
nbytes = __ctr_crypt(desc, &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, nbytes);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
u32 i, j, tmp;
|
||||
int err;
|
||||
|
||||
/* Generate encryption context using generic implementation. */
|
||||
err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Fix encryption context for this implementation and form decryption
|
||||
* context. */
|
||||
j = DES3_EDE_EXPKEY_WORDS - 2;
|
||||
for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) {
|
||||
tmp = ror32(ctx->enc_expkey[i + 1], 4);
|
||||
ctx->enc_expkey[i + 1] = tmp;
|
||||
|
||||
ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0];
|
||||
ctx->dec_expkey[j + 1] = tmp;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct crypto_alg des3_ede_algs[4] = { {
|
||||
.cra_name = "des3_ede",
|
||||
.cra_driver_name = "des3_ede-asm",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.cipher = {
|
||||
.cia_min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.cia_max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.cia_setkey = des3_ede_x86_setkey,
|
||||
.cia_encrypt = des3_ede_x86_encrypt,
|
||||
.cia_decrypt = des3_ede_x86_decrypt,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.cra_name = "ecb(des3_ede)",
|
||||
.cra_driver_name = "ecb-des3_ede-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.setkey = des3_ede_x86_setkey,
|
||||
.encrypt = ecb_encrypt,
|
||||
.decrypt = ecb_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "cbc(des3_ede)",
|
||||
.cra_driver_name = "cbc-des3_ede-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_x86_setkey,
|
||||
.encrypt = cbc_encrypt,
|
||||
.decrypt = cbc_decrypt,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
.cra_name = "ctr(des3_ede)",
|
||||
.cra_driver_name = "ctr-des3_ede-asm",
|
||||
.cra_priority = 300,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
|
||||
.cra_alignmask = 0,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.setkey = des3_ede_x86_setkey,
|
||||
.encrypt = ctr_crypt,
|
||||
.decrypt = ctr_crypt,
|
||||
},
|
||||
},
|
||||
} };
|
||||
|
||||
static bool is_blacklisted_cpu(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return false;
|
||||
|
||||
if (boot_cpu_data.x86 == 0x0f) {
|
||||
/*
|
||||
* On Pentium 4, des3_ede-x86_64 is slower than generic C
|
||||
* implementation because use of 64bit rotates (which are really
|
||||
* slow on P4). Therefore blacklist P4s.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int force;
|
||||
module_param(force, int, 0);
|
||||
MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
|
||||
|
||||
static int __init des3_ede_x86_init(void)
|
||||
{
|
||||
if (!force && is_blacklisted_cpu()) {
|
||||
pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
|
||||
}
|
||||
|
||||
static void __exit des3_ede_x86_fini(void)
|
||||
{
|
||||
crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
|
||||
}
|
||||
|
||||
module_init(des3_ede_x86_init);
|
||||
module_exit(des3_ede_x86_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
|
||||
MODULE_ALIAS("des3_ede");
|
||||
MODULE_ALIAS("des3_ede-asm");
|
||||
MODULE_ALIAS("des");
|
||||
MODULE_ALIAS("des-asm");
|
||||
MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
|
@ -23,7 +23,8 @@ comment "Crypto core or helper"
|
||||
|
||||
config CRYPTO_FIPS
|
||||
bool "FIPS 200 compliance"
|
||||
depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
|
||||
depends on MODULE_SIG
|
||||
help
|
||||
This options enables the fips boot option which is
|
||||
required if you want to system to operate in a FIPS 200
|
||||
@ -1019,6 +1020,19 @@ config CRYPTO_DES_SPARC64
|
||||
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
|
||||
optimized using SPARC64 crypto opcodes.
|
||||
|
||||
config CRYPTO_DES3_EDE_X86_64
|
||||
tristate "Triple DES EDE cipher algorithm (x86-64)"
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_DES
|
||||
help
|
||||
Triple DES EDE (FIPS 46-3) algorithm.
|
||||
|
||||
This module provides implementation of the Triple DES EDE cipher
|
||||
algorithm that is optimized for x86-64 processors. Two versions of
|
||||
algorithm are provided; regular processing one input block and
|
||||
one that processes three blocks parallel.
|
||||
|
||||
config CRYPTO_FCRYPT
|
||||
tristate "FCrypt cipher algorithm"
|
||||
select CRYPTO_ALGAPI
|
||||
@ -1380,6 +1394,40 @@ config CRYPTO_ANSI_CPRNG
|
||||
ANSI X9.31 A.2.4. Note that this option must be enabled if
|
||||
CRYPTO_FIPS is selected
|
||||
|
||||
menuconfig CRYPTO_DRBG_MENU
|
||||
tristate "NIST SP800-90A DRBG"
|
||||
help
|
||||
NIST SP800-90A compliant DRBG. In the following submenu, one or
|
||||
more of the DRBG types must be selected.
|
||||
|
||||
if CRYPTO_DRBG_MENU
|
||||
|
||||
config CRYPTO_DRBG_HMAC
|
||||
bool "Enable HMAC DRBG"
|
||||
default y
|
||||
select CRYPTO_HMAC
|
||||
help
|
||||
Enable the HMAC DRBG variant as defined in NIST SP800-90A.
|
||||
|
||||
config CRYPTO_DRBG_HASH
|
||||
bool "Enable Hash DRBG"
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
Enable the Hash DRBG variant as defined in NIST SP800-90A.
|
||||
|
||||
config CRYPTO_DRBG_CTR
|
||||
bool "Enable CTR DRBG"
|
||||
select CRYPTO_AES
|
||||
help
|
||||
Enable the CTR DRBG variant as defined in NIST SP800-90A.
|
||||
|
||||
config CRYPTO_DRBG
|
||||
tristate
|
||||
default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR)
|
||||
select CRYPTO_RNG
|
||||
|
||||
endif # if CRYPTO_DRBG_MENU
|
||||
|
||||
config CRYPTO_USER_API
|
||||
tristate
|
||||
|
||||
|
@ -92,6 +92,7 @@ obj-$(CONFIG_CRYPTO_842) += 842.o
|
||||
obj-$(CONFIG_CRYPTO_RNG2) += rng.o
|
||||
obj-$(CONFIG_CRYPTO_RNG2) += krng.o
|
||||
obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
|
||||
obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
|
||||
obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
|
||||
obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
|
||||
obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
|
||||
|
@ -41,8 +41,20 @@ static inline int crypto_set_driver_name(struct crypto_alg *alg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void crypto_check_module_sig(struct module *mod)
|
||||
{
|
||||
#ifdef CONFIG_CRYPTO_FIPS
|
||||
if (fips_enabled && mod && !mod->sig_ok)
|
||||
panic("Module %s signature verification failed in FIPS mode\n",
|
||||
mod->name);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
static int crypto_check_alg(struct crypto_alg *alg)
|
||||
{
|
||||
crypto_check_module_sig(alg->cra_module);
|
||||
|
||||
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
|
||||
return -EINVAL;
|
||||
|
||||
@ -430,6 +442,8 @@ int crypto_register_template(struct crypto_template *tmpl)
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
|
||||
crypto_check_module_sig(tmpl->module);
|
||||
|
||||
list_for_each_entry(q, &crypto_template_list, list) {
|
||||
if (q == tmpl)
|
||||
goto out;
|
||||
|
@ -233,7 +233,7 @@ static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
|
||||
}
|
||||
|
||||
static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
|
||||
crypto_completion_t complete)
|
||||
crypto_completion_t compl)
|
||||
{
|
||||
struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
@ -241,7 +241,7 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
|
||||
|
||||
queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
|
||||
rctx->complete = req->base.complete;
|
||||
req->base.complete = complete;
|
||||
req->base.complete = compl;
|
||||
|
||||
return cryptd_enqueue_request(queue, &req->base);
|
||||
}
|
||||
@ -414,7 +414,7 @@ static int cryptd_hash_setkey(struct crypto_ahash *parent,
|
||||
}
|
||||
|
||||
static int cryptd_hash_enqueue(struct ahash_request *req,
|
||||
crypto_completion_t complete)
|
||||
crypto_completion_t compl)
|
||||
{
|
||||
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
@ -422,7 +422,7 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
|
||||
cryptd_get_queue(crypto_ahash_tfm(tfm));
|
||||
|
||||
rctx->complete = req->base.complete;
|
||||
req->base.complete = complete;
|
||||
req->base.complete = compl;
|
||||
|
||||
return cryptd_enqueue_request(queue, &req->base);
|
||||
}
|
||||
@ -667,14 +667,14 @@ static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
|
||||
}
|
||||
|
||||
static int cryptd_aead_enqueue(struct aead_request *req,
|
||||
crypto_completion_t complete)
|
||||
crypto_completion_t compl)
|
||||
{
|
||||
struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
|
||||
|
||||
rctx->complete = req->base.complete;
|
||||
req->base.complete = complete;
|
||||
req->base.complete = compl;
|
||||
return cryptd_enqueue_request(queue, &req->base);
|
||||
}
|
||||
|
||||
|
@ -859,13 +859,10 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
* property.
|
||||
*
|
||||
*/
|
||||
static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
const u32 *K = (const u32 *)key;
|
||||
struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
u32 *expkey = dctx->expkey;
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
|
||||
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
|
||||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
|
||||
@ -880,6 +877,17 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__des3_ede_setkey);
|
||||
|
||||
static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
|
||||
u32 *flags = &tfm->crt_flags;
|
||||
u32 *expkey = dctx->expkey;
|
||||
|
||||
return __des3_ede_setkey(expkey, flags, key, keylen);
|
||||
}
|
||||
|
||||
static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
{
|
||||
@ -945,6 +953,8 @@ static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||
|
||||
static struct crypto_alg des_algs[2] = { {
|
||||
.cra_name = "des",
|
||||
.cra_driver_name = "des-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des_ctx),
|
||||
@ -958,6 +968,8 @@ static struct crypto_alg des_algs[2] = { {
|
||||
.cia_decrypt = des_decrypt } }
|
||||
}, {
|
||||
.cra_name = "des3_ede",
|
||||
.cra_driver_name = "des3_ede-generic",
|
||||
.cra_priority = 100,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct des3_ede_ctx),
|
||||
|
2044
crypto/drbg.c
Normal file
2044
crypto/drbg.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -68,7 +68,7 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
|
||||
struct ablkcipher_request *subreq;
|
||||
crypto_completion_t complete;
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
struct scatterlist *osrc, *odst;
|
||||
struct scatterlist *dst;
|
||||
@ -86,7 +86,7 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
||||
|
||||
giv = req->giv;
|
||||
complete = req->creq.base.complete;
|
||||
compl = req->creq.base.complete;
|
||||
data = req->creq.base.data;
|
||||
|
||||
osrc = req->creq.src;
|
||||
@ -101,11 +101,11 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
if (vsrc != giv + ivsize && vdst != giv + ivsize) {
|
||||
giv = PTR_ALIGN((u8 *)reqctx->tail,
|
||||
crypto_ablkcipher_alignmask(geniv) + 1);
|
||||
complete = eseqiv_complete;
|
||||
compl = eseqiv_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
|
||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
|
||||
data);
|
||||
|
||||
sg_init_table(reqctx->src, 2);
|
||||
|
30
crypto/gcm.c
30
crypto/gcm.c
@ -228,14 +228,14 @@ static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
|
||||
|
||||
static int gcm_hash_update(struct aead_request *req,
|
||||
struct crypto_gcm_req_priv_ctx *pctx,
|
||||
crypto_completion_t complete,
|
||||
crypto_completion_t compl,
|
||||
struct scatterlist *src,
|
||||
unsigned int len)
|
||||
{
|
||||
struct ahash_request *ahreq = &pctx->u.ahreq;
|
||||
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req),
|
||||
complete, req);
|
||||
compl, req);
|
||||
ahash_request_set_crypt(ahreq, src, NULL, len);
|
||||
|
||||
return crypto_ahash_update(ahreq);
|
||||
@ -244,12 +244,12 @@ static int gcm_hash_update(struct aead_request *req,
|
||||
static int gcm_hash_remain(struct aead_request *req,
|
||||
struct crypto_gcm_req_priv_ctx *pctx,
|
||||
unsigned int remain,
|
||||
crypto_completion_t complete)
|
||||
crypto_completion_t compl)
|
||||
{
|
||||
struct ahash_request *ahreq = &pctx->u.ahreq;
|
||||
|
||||
ahash_request_set_callback(ahreq, aead_request_flags(req),
|
||||
complete, req);
|
||||
compl, req);
|
||||
sg_init_one(pctx->src, gcm_zeroes, remain);
|
||||
ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
|
||||
|
||||
@ -375,14 +375,14 @@ static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
|
||||
{
|
||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
|
||||
crypto_completion_t complete;
|
||||
crypto_completion_t compl;
|
||||
unsigned int remain = 0;
|
||||
|
||||
if (!err && gctx->cryptlen) {
|
||||
remain = gcm_remain(gctx->cryptlen);
|
||||
complete = remain ? gcm_hash_crypt_done :
|
||||
compl = remain ? gcm_hash_crypt_done :
|
||||
gcm_hash_crypt_remain_done;
|
||||
err = gcm_hash_update(req, pctx, complete,
|
||||
err = gcm_hash_update(req, pctx, compl,
|
||||
gctx->src, gctx->cryptlen);
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return;
|
||||
@ -429,14 +429,14 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
|
||||
static void __gcm_hash_init_done(struct aead_request *req, int err)
|
||||
{
|
||||
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
|
||||
crypto_completion_t complete;
|
||||
crypto_completion_t compl;
|
||||
unsigned int remain = 0;
|
||||
|
||||
if (!err && req->assoclen) {
|
||||
remain = gcm_remain(req->assoclen);
|
||||
complete = remain ? gcm_hash_assoc_done :
|
||||
compl = remain ? gcm_hash_assoc_done :
|
||||
gcm_hash_assoc_remain_done;
|
||||
err = gcm_hash_update(req, pctx, complete,
|
||||
err = gcm_hash_update(req, pctx, compl,
|
||||
req->assoc, req->assoclen);
|
||||
if (err == -EINPROGRESS || err == -EBUSY)
|
||||
return;
|
||||
@ -462,7 +462,7 @@ static int gcm_hash(struct aead_request *req,
|
||||
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
|
||||
struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
||||
unsigned int remain;
|
||||
crypto_completion_t complete;
|
||||
crypto_completion_t compl;
|
||||
int err;
|
||||
|
||||
ahash_request_set_tfm(ahreq, ctx->ghash);
|
||||
@ -473,8 +473,8 @@ static int gcm_hash(struct aead_request *req,
|
||||
if (err)
|
||||
return err;
|
||||
remain = gcm_remain(req->assoclen);
|
||||
complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
|
||||
err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
|
||||
compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
|
||||
err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen);
|
||||
if (err)
|
||||
return err;
|
||||
if (remain) {
|
||||
@ -484,8 +484,8 @@ static int gcm_hash(struct aead_request *req,
|
||||
return err;
|
||||
}
|
||||
remain = gcm_remain(gctx->cryptlen);
|
||||
complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
|
||||
err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
|
||||
compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
|
||||
err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen);
|
||||
if (err)
|
||||
return err;
|
||||
if (remain) {
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/lzo.h>
|
||||
|
||||
struct lzo_ctx {
|
||||
@ -30,7 +31,10 @@ static int lzo_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
|
||||
ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
|
||||
GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
|
||||
if (!ctx->lzo_comp_mem)
|
||||
ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
|
||||
if (!ctx->lzo_comp_mem)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -41,7 +45,7 @@ static void lzo_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
vfree(ctx->lzo_comp_mem);
|
||||
kvfree(ctx->lzo_comp_mem);
|
||||
}
|
||||
|
||||
static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
|
||||
|
@ -100,7 +100,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
|
||||
struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
|
||||
struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
|
||||
crypto_completion_t complete;
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
u8 *info;
|
||||
unsigned int ivsize;
|
||||
@ -108,7 +108,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
|
||||
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
|
||||
|
||||
complete = req->creq.base.complete;
|
||||
compl = req->creq.base.complete;
|
||||
data = req->creq.base.data;
|
||||
info = req->creq.info;
|
||||
|
||||
@ -122,11 +122,11 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
complete = seqiv_complete;
|
||||
compl = seqiv_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
|
||||
ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
|
||||
data);
|
||||
ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
|
||||
req->creq.nbytes, info);
|
||||
@ -146,7 +146,7 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_request *areq = &req->areq;
|
||||
struct aead_request *subreq = aead_givcrypt_reqctx(req);
|
||||
crypto_completion_t complete;
|
||||
crypto_completion_t compl;
|
||||
void *data;
|
||||
u8 *info;
|
||||
unsigned int ivsize;
|
||||
@ -154,7 +154,7 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
|
||||
aead_request_set_tfm(subreq, aead_geniv_base(geniv));
|
||||
|
||||
complete = areq->base.complete;
|
||||
compl = areq->base.complete;
|
||||
data = areq->base.data;
|
||||
info = areq->iv;
|
||||
|
||||
@ -168,11 +168,11 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
complete = seqiv_aead_complete;
|
||||
compl = seqiv_aead_complete;
|
||||
data = req;
|
||||
}
|
||||
|
||||
aead_request_set_callback(subreq, areq->base.flags, complete, data);
|
||||
aead_request_set_callback(subreq, areq->base.flags, compl, data);
|
||||
aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
|
||||
info);
|
||||
aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
|
||||
|
114
crypto/tcrypt.c
114
crypto/tcrypt.c
@ -47,6 +47,11 @@
|
||||
#define ENCRYPT 1
|
||||
#define DECRYPT 0
|
||||
|
||||
/*
|
||||
* return a string with the driver name
|
||||
*/
|
||||
#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))
|
||||
|
||||
/*
|
||||
* Used by test_cipher_speed()
|
||||
*/
|
||||
@ -68,13 +73,13 @@ static char *check[] = {
|
||||
};
|
||||
|
||||
static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
|
||||
struct scatterlist *sg, int blen, int sec)
|
||||
struct scatterlist *sg, int blen, int secs)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int bcount;
|
||||
int ret;
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
if (enc)
|
||||
ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
|
||||
@ -86,7 +91,7 @@ static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
|
||||
}
|
||||
|
||||
printk("%d operations in %d seconds (%ld bytes)\n",
|
||||
bcount, sec, (long)bcount * blen);
|
||||
bcount, secs, (long)bcount * blen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -138,13 +143,13 @@ out:
|
||||
}
|
||||
|
||||
static int test_aead_jiffies(struct aead_request *req, int enc,
|
||||
int blen, int sec)
|
||||
int blen, int secs)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int bcount;
|
||||
int ret;
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
if (enc)
|
||||
ret = crypto_aead_encrypt(req);
|
||||
@ -156,7 +161,7 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
|
||||
}
|
||||
|
||||
printk("%d operations in %d seconds (%ld bytes)\n",
|
||||
bcount, sec, (long)bcount * blen);
|
||||
bcount, secs, (long)bcount * blen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -260,7 +265,7 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
|
||||
}
|
||||
}
|
||||
|
||||
static void test_aead_speed(const char *algo, int enc, unsigned int sec,
|
||||
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
|
||||
struct aead_speed_template *template,
|
||||
unsigned int tcount, u8 authsize,
|
||||
unsigned int aad_size, u8 *keysize)
|
||||
@ -305,9 +310,6 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
|
||||
asg = &sg[8];
|
||||
sgout = &asg[8];
|
||||
|
||||
|
||||
printk(KERN_INFO "\ntesting speed of %s %s\n", algo, e);
|
||||
|
||||
tfm = crypto_alloc_aead(algo, 0, 0);
|
||||
|
||||
if (IS_ERR(tfm)) {
|
||||
@ -316,6 +318,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
|
||||
goto out_notfm;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
|
||||
get_driver_name(crypto_aead, tfm), e);
|
||||
|
||||
req = aead_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
pr_err("alg: aead: Failed to allocate request for %s\n",
|
||||
@ -374,8 +379,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
|
||||
aead_request_set_crypt(req, sg, sgout, *b_size, iv);
|
||||
aead_request_set_assoc(req, asg, aad_size);
|
||||
|
||||
if (sec)
|
||||
ret = test_aead_jiffies(req, enc, *b_size, sec);
|
||||
if (secs)
|
||||
ret = test_aead_jiffies(req, enc, *b_size,
|
||||
secs);
|
||||
else
|
||||
ret = test_aead_cycles(req, enc, *b_size);
|
||||
|
||||
@ -405,7 +411,7 @@ out_noxbuf:
|
||||
return;
|
||||
}
|
||||
|
||||
static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
|
||||
static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
|
||||
struct cipher_speed_template *template,
|
||||
unsigned int tcount, u8 *keysize)
|
||||
{
|
||||
@ -422,8 +428,6 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
|
||||
else
|
||||
e = "decryption";
|
||||
|
||||
printk("\ntesting speed of %s %s\n", algo, e);
|
||||
|
||||
tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
|
||||
|
||||
if (IS_ERR(tfm)) {
|
||||
@ -434,6 +438,9 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
|
||||
desc.tfm = tfm;
|
||||
desc.flags = 0;
|
||||
|
||||
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
|
||||
get_driver_name(crypto_blkcipher, tfm), e);
|
||||
|
||||
i = 0;
|
||||
do {
|
||||
|
||||
@ -483,9 +490,9 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
|
||||
crypto_blkcipher_set_iv(tfm, iv, iv_len);
|
||||
}
|
||||
|
||||
if (sec)
|
||||
if (secs)
|
||||
ret = test_cipher_jiffies(&desc, enc, sg,
|
||||
*b_size, sec);
|
||||
*b_size, secs);
|
||||
else
|
||||
ret = test_cipher_cycles(&desc, enc, sg,
|
||||
*b_size);
|
||||
@ -506,13 +513,13 @@ out:
|
||||
|
||||
static int test_hash_jiffies_digest(struct hash_desc *desc,
|
||||
struct scatterlist *sg, int blen,
|
||||
char *out, int sec)
|
||||
char *out, int secs)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int bcount;
|
||||
int ret;
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
ret = crypto_hash_digest(desc, sg, blen, out);
|
||||
if (ret)
|
||||
@ -520,22 +527,22 @@ static int test_hash_jiffies_digest(struct hash_desc *desc,
|
||||
}
|
||||
|
||||
printk("%6u opers/sec, %9lu bytes/sec\n",
|
||||
bcount / sec, ((long)bcount * blen) / sec);
|
||||
bcount / secs, ((long)bcount * blen) / secs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
|
||||
int blen, int plen, char *out, int sec)
|
||||
int blen, int plen, char *out, int secs)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int bcount, pcount;
|
||||
int ret;
|
||||
|
||||
if (plen == blen)
|
||||
return test_hash_jiffies_digest(desc, sg, blen, out, sec);
|
||||
return test_hash_jiffies_digest(desc, sg, blen, out, secs);
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
ret = crypto_hash_init(desc);
|
||||
if (ret)
|
||||
@ -552,7 +559,7 @@ static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
|
||||
}
|
||||
|
||||
printk("%6u opers/sec, %9lu bytes/sec\n",
|
||||
bcount / sec, ((long)bcount * blen) / sec);
|
||||
bcount / secs, ((long)bcount * blen) / secs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -673,7 +680,7 @@ static void test_hash_sg_init(struct scatterlist *sg)
|
||||
}
|
||||
}
|
||||
|
||||
static void test_hash_speed(const char *algo, unsigned int sec,
|
||||
static void test_hash_speed(const char *algo, unsigned int secs,
|
||||
struct hash_speed *speed)
|
||||
{
|
||||
struct scatterlist sg[TVMEMSIZE];
|
||||
@ -683,8 +690,6 @@ static void test_hash_speed(const char *algo, unsigned int sec,
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
printk(KERN_INFO "\ntesting speed of %s\n", algo);
|
||||
|
||||
tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
|
||||
|
||||
if (IS_ERR(tfm)) {
|
||||
@ -693,6 +698,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo,
|
||||
get_driver_name(crypto_hash, tfm));
|
||||
|
||||
desc.tfm = tfm;
|
||||
desc.flags = 0;
|
||||
|
||||
@ -718,9 +726,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
|
||||
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
|
||||
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
|
||||
|
||||
if (sec)
|
||||
if (secs)
|
||||
ret = test_hash_jiffies(&desc, sg, speed[i].blen,
|
||||
speed[i].plen, output, sec);
|
||||
speed[i].plen, output, secs);
|
||||
else
|
||||
ret = test_hash_cycles(&desc, sg, speed[i].blen,
|
||||
speed[i].plen, output);
|
||||
@ -765,13 +773,13 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
|
||||
}
|
||||
|
||||
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
|
||||
char *out, int sec)
|
||||
char *out, int secs)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int bcount;
|
||||
int ret;
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
|
||||
if (ret)
|
||||
@ -779,22 +787,22 @@ static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
|
||||
}
|
||||
|
||||
printk("%6u opers/sec, %9lu bytes/sec\n",
|
||||
bcount / sec, ((long)bcount * blen) / sec);
|
||||
bcount / secs, ((long)bcount * blen) / secs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_ahash_jiffies(struct ahash_request *req, int blen,
|
||||
int plen, char *out, int sec)
|
||||
int plen, char *out, int secs)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int bcount, pcount;
|
||||
int ret;
|
||||
|
||||
if (plen == blen)
|
||||
return test_ahash_jiffies_digest(req, blen, out, sec);
|
||||
return test_ahash_jiffies_digest(req, blen, out, secs);
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
ret = crypto_ahash_init(req);
|
||||
if (ret)
|
||||
@ -811,7 +819,7 @@ static int test_ahash_jiffies(struct ahash_request *req, int blen,
|
||||
}
|
||||
|
||||
pr_cont("%6u opers/sec, %9lu bytes/sec\n",
|
||||
bcount / sec, ((long)bcount * blen) / sec);
|
||||
bcount / secs, ((long)bcount * blen) / secs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -911,7 +919,7 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void test_ahash_speed(const char *algo, unsigned int sec,
|
||||
static void test_ahash_speed(const char *algo, unsigned int secs,
|
||||
struct hash_speed *speed)
|
||||
{
|
||||
struct scatterlist sg[TVMEMSIZE];
|
||||
@ -921,8 +929,6 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
|
||||
static char output[1024];
|
||||
int i, ret;
|
||||
|
||||
printk(KERN_INFO "\ntesting speed of async %s\n", algo);
|
||||
|
||||
tfm = crypto_alloc_ahash(algo, 0, 0);
|
||||
if (IS_ERR(tfm)) {
|
||||
pr_err("failed to load transform for %s: %ld\n",
|
||||
@ -930,6 +936,9 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
|
||||
get_driver_name(crypto_ahash, tfm));
|
||||
|
||||
if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
|
||||
pr_err("digestsize(%u) > outputbuffer(%zu)\n",
|
||||
crypto_ahash_digestsize(tfm), sizeof(output));
|
||||
@ -960,9 +969,9 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
|
||||
|
||||
ahash_request_set_crypt(req, sg, output, speed[i].plen);
|
||||
|
||||
if (sec)
|
||||
if (secs)
|
||||
ret = test_ahash_jiffies(req, speed[i].blen,
|
||||
speed[i].plen, output, sec);
|
||||
speed[i].plen, output, secs);
|
||||
else
|
||||
ret = test_ahash_cycles(req, speed[i].blen,
|
||||
speed[i].plen, output);
|
||||
@ -994,13 +1003,13 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
|
||||
}
|
||||
|
||||
static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
|
||||
int blen, int sec)
|
||||
int blen, int secs)
|
||||
{
|
||||
unsigned long start, end;
|
||||
int bcount;
|
||||
int ret;
|
||||
|
||||
for (start = jiffies, end = start + sec * HZ, bcount = 0;
|
||||
for (start = jiffies, end = start + secs * HZ, bcount = 0;
|
||||
time_before(jiffies, end); bcount++) {
|
||||
if (enc)
|
||||
ret = do_one_acipher_op(req,
|
||||
@ -1014,7 +1023,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
|
||||
}
|
||||
|
||||
pr_cont("%d operations in %d seconds (%ld bytes)\n",
|
||||
bcount, sec, (long)bcount * blen);
|
||||
bcount, secs, (long)bcount * blen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1065,7 +1074,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
|
||||
static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
|
||||
struct cipher_speed_template *template,
|
||||
unsigned int tcount, u8 *keysize)
|
||||
{
|
||||
@ -1083,8 +1092,6 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
|
||||
else
|
||||
e = "decryption";
|
||||
|
||||
pr_info("\ntesting speed of async %s %s\n", algo, e);
|
||||
|
||||
init_completion(&tresult.completion);
|
||||
|
||||
tfm = crypto_alloc_ablkcipher(algo, 0, 0);
|
||||
@ -1095,6 +1102,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
|
||||
return;
|
||||
}
|
||||
|
||||
pr_info("\ntesting speed of async %s (%s) %s\n", algo,
|
||||
get_driver_name(crypto_ablkcipher, tfm), e);
|
||||
|
||||
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
|
||||
@ -1168,9 +1178,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
|
||||
|
||||
ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv);
|
||||
|
||||
if (sec)
|
||||
if (secs)
|
||||
ret = test_acipher_jiffies(req, enc,
|
||||
*b_size, sec);
|
||||
*b_size, secs);
|
||||
else
|
||||
ret = test_acipher_cycles(req, enc,
|
||||
*b_size);
|
||||
@ -1585,6 +1595,12 @@ static int do_test(int m)
|
||||
test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24);
|
||||
test_cipher_speed("ctr(des3_ede)", ENCRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24);
|
||||
test_cipher_speed("ctr(des3_ede)", DECRYPT, sec,
|
||||
des3_speed_template, DES3_SPEED_VECTORS,
|
||||
speed_template_24);
|
||||
break;
|
||||
|
||||
case 202:
|
||||
|
304
crypto/testmgr.c
304
crypto/testmgr.c
@ -27,6 +27,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <crypto/rng.h>
|
||||
#include <crypto/drbg.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
@ -108,6 +109,11 @@ struct cprng_test_suite {
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
struct drbg_test_suite {
|
||||
struct drbg_testvec *vecs;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
struct alg_test_desc {
|
||||
const char *alg;
|
||||
int (*test)(const struct alg_test_desc *desc, const char *driver,
|
||||
@ -121,6 +127,7 @@ struct alg_test_desc {
|
||||
struct pcomp_test_suite pcomp;
|
||||
struct hash_test_suite hash;
|
||||
struct cprng_test_suite cprng;
|
||||
struct drbg_test_suite drbg;
|
||||
} suite;
|
||||
};
|
||||
|
||||
@ -191,13 +198,20 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
|
||||
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
|
||||
unsigned int i, j, k, temp;
|
||||
struct scatterlist sg[8];
|
||||
char result[64];
|
||||
char *result;
|
||||
char *key;
|
||||
struct ahash_request *req;
|
||||
struct tcrypt_result tresult;
|
||||
void *hash_buff;
|
||||
char *xbuf[XBUFSIZE];
|
||||
int ret = -ENOMEM;
|
||||
|
||||
result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
|
||||
if (!result)
|
||||
return ret;
|
||||
key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
|
||||
if (!key)
|
||||
goto out_nobuf;
|
||||
if (testmgr_alloc_buf(xbuf))
|
||||
goto out_nobuf;
|
||||
|
||||
@ -222,7 +236,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
|
||||
goto out;
|
||||
|
||||
j++;
|
||||
memset(result, 0, 64);
|
||||
memset(result, 0, MAX_DIGEST_SIZE);
|
||||
|
||||
hash_buff = xbuf[0];
|
||||
hash_buff += align_offset;
|
||||
@ -232,8 +246,14 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
|
||||
|
||||
if (template[i].ksize) {
|
||||
crypto_ahash_clear_flags(tfm, ~0);
|
||||
ret = crypto_ahash_setkey(tfm, template[i].key,
|
||||
template[i].ksize);
|
||||
if (template[i].ksize > MAX_KEYLEN) {
|
||||
pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
|
||||
j, algo, template[i].ksize, MAX_KEYLEN);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
memcpy(key, template[i].key, template[i].ksize);
|
||||
ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "alg: hash: setkey failed on "
|
||||
"test %d for %s: ret=%d\n", j, algo,
|
||||
@ -293,7 +313,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
|
||||
|
||||
if (template[i].np) {
|
||||
j++;
|
||||
memset(result, 0, 64);
|
||||
memset(result, 0, MAX_DIGEST_SIZE);
|
||||
|
||||
temp = 0;
|
||||
sg_init_table(sg, template[i].np);
|
||||
@ -312,8 +332,16 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
|
||||
}
|
||||
|
||||
if (template[i].ksize) {
|
||||
if (template[i].ksize > MAX_KEYLEN) {
|
||||
pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
|
||||
j, algo, template[i].ksize,
|
||||
MAX_KEYLEN);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
crypto_ahash_clear_flags(tfm, ~0);
|
||||
ret = crypto_ahash_setkey(tfm, template[i].key,
|
||||
memcpy(key, template[i].key, template[i].ksize);
|
||||
ret = crypto_ahash_setkey(tfm, key,
|
||||
template[i].ksize);
|
||||
|
||||
if (ret) {
|
||||
@ -365,6 +393,8 @@ out:
|
||||
out_noreq:
|
||||
testmgr_free_buf(xbuf);
|
||||
out_nobuf:
|
||||
kfree(key);
|
||||
kfree(result);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -422,6 +452,9 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
|
||||
if (!iv)
|
||||
return ret;
|
||||
key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
|
||||
if (!key)
|
||||
goto out_noxbuf;
|
||||
if (testmgr_alloc_buf(xbuf))
|
||||
goto out_noxbuf;
|
||||
if (testmgr_alloc_buf(axbuf))
|
||||
@ -486,7 +519,14 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
crypto_aead_set_flags(
|
||||
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
|
||||
key = template[i].key;
|
||||
if (template[i].klen > MAX_KEYLEN) {
|
||||
pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
|
||||
d, j, algo, template[i].klen,
|
||||
MAX_KEYLEN);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
memcpy(key, template[i].key, template[i].klen);
|
||||
|
||||
ret = crypto_aead_setkey(tfm, key,
|
||||
template[i].klen);
|
||||
@ -587,7 +627,14 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
if (template[i].wk)
|
||||
crypto_aead_set_flags(
|
||||
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
|
||||
key = template[i].key;
|
||||
if (template[i].klen > MAX_KEYLEN) {
|
||||
pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
|
||||
d, j, algo, template[i].klen,
|
||||
MAX_KEYLEN);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
memcpy(key, template[i].key, template[i].klen);
|
||||
|
||||
ret = crypto_aead_setkey(tfm, key, template[i].klen);
|
||||
if (!ret == template[i].fail) {
|
||||
@ -769,6 +816,7 @@ out_nooutbuf:
|
||||
out_noaxbuf:
|
||||
testmgr_free_buf(xbuf);
|
||||
out_noxbuf:
|
||||
kfree(key);
|
||||
kfree(iv);
|
||||
return ret;
|
||||
}
|
||||
@ -1715,6 +1763,100 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
static int drbg_cavs_test(struct drbg_testvec *test, int pr,
|
||||
const char *driver, u32 type, u32 mask)
|
||||
{
|
||||
int ret = -EAGAIN;
|
||||
struct crypto_rng *drng;
|
||||
struct drbg_test_data test_data;
|
||||
struct drbg_string addtl, pers, testentropy;
|
||||
unsigned char *buf = kzalloc(test->expectedlen, GFP_KERNEL);
|
||||
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
drng = crypto_alloc_rng(driver, type, mask);
|
||||
if (IS_ERR(drng)) {
|
||||
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
|
||||
"%s\n", driver);
|
||||
kzfree(buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
test_data.testentropy = &testentropy;
|
||||
drbg_string_fill(&testentropy, test->entropy, test->entropylen);
|
||||
drbg_string_fill(&pers, test->pers, test->perslen);
|
||||
ret = crypto_drbg_reset_test(drng, &pers, &test_data);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "alg: drbg: Failed to reset rng\n");
|
||||
goto outbuf;
|
||||
}
|
||||
|
||||
drbg_string_fill(&addtl, test->addtla, test->addtllen);
|
||||
if (pr) {
|
||||
drbg_string_fill(&testentropy, test->entpra, test->entprlen);
|
||||
ret = crypto_drbg_get_bytes_addtl_test(drng,
|
||||
buf, test->expectedlen, &addtl, &test_data);
|
||||
} else {
|
||||
ret = crypto_drbg_get_bytes_addtl(drng,
|
||||
buf, test->expectedlen, &addtl);
|
||||
}
|
||||
if (ret <= 0) {
|
||||
printk(KERN_ERR "alg: drbg: could not obtain random data for "
|
||||
"driver %s\n", driver);
|
||||
goto outbuf;
|
||||
}
|
||||
|
||||
drbg_string_fill(&addtl, test->addtlb, test->addtllen);
|
||||
if (pr) {
|
||||
drbg_string_fill(&testentropy, test->entprb, test->entprlen);
|
||||
ret = crypto_drbg_get_bytes_addtl_test(drng,
|
||||
buf, test->expectedlen, &addtl, &test_data);
|
||||
} else {
|
||||
ret = crypto_drbg_get_bytes_addtl(drng,
|
||||
buf, test->expectedlen, &addtl);
|
||||
}
|
||||
if (ret <= 0) {
|
||||
printk(KERN_ERR "alg: drbg: could not obtain random data for "
|
||||
"driver %s\n", driver);
|
||||
goto outbuf;
|
||||
}
|
||||
|
||||
ret = memcmp(test->expected, buf, test->expectedlen);
|
||||
|
||||
outbuf:
|
||||
crypto_free_rng(drng);
|
||||
kzfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
int err = 0;
|
||||
int pr = 0;
|
||||
int i = 0;
|
||||
struct drbg_testvec *template = desc->suite.drbg.vecs;
|
||||
unsigned int tcount = desc->suite.drbg.count;
|
||||
|
||||
if (0 == memcmp(driver, "drbg_pr_", 8))
|
||||
pr = 1;
|
||||
|
||||
for (i = 0; i < tcount; i++) {
|
||||
err = drbg_cavs_test(&template[i], pr, driver, type, mask);
|
||||
if (err) {
|
||||
printk(KERN_ERR "alg: drbg: Test %d failed for %s\n",
|
||||
i, driver);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
|
||||
}
|
||||
|
||||
static int alg_test_null(const struct alg_test_desc *desc,
|
||||
const char *driver, u32 type, u32 mask)
|
||||
{
|
||||
@ -2457,6 +2599,152 @@ static const struct alg_test_desc alg_test_descs[] = {
|
||||
}, {
|
||||
.alg = "digest_null",
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_nopr_ctr_aes128",
|
||||
.test = alg_test_drbg,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.drbg = {
|
||||
.vecs = drbg_nopr_ctr_aes128_tv_template,
|
||||
.count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "drbg_nopr_ctr_aes192",
|
||||
.test = alg_test_drbg,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.drbg = {
|
||||
.vecs = drbg_nopr_ctr_aes192_tv_template,
|
||||
.count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
.alg = "drbg_nopr_ctr_aes256",
|
||||
.test = alg_test_drbg,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.drbg = {
|
||||
.vecs = drbg_nopr_ctr_aes256_tv_template,
|
||||
.count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
/*
|
||||
* There is no need to specifically test the DRBG with every
|
||||
* backend cipher -- covered by drbg_nopr_hmac_sha256 test
|
||||
*/
|
||||
.alg = "drbg_nopr_hmac_sha1",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_nopr_hmac_sha256",
|
||||
.test = alg_test_drbg,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.drbg = {
|
||||
.vecs = drbg_nopr_hmac_sha256_tv_template,
|
||||
.count =
|
||||
ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
/* covered by drbg_nopr_hmac_sha256 test */
|
||||
.alg = "drbg_nopr_hmac_sha384",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_nopr_hmac_sha512",
|
||||
.test = alg_test_null,
|
||||
.fips_allowed = 1,
|
||||
}, {
|
||||
.alg = "drbg_nopr_sha1",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_nopr_sha256",
|
||||
.test = alg_test_drbg,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.drbg = {
|
||||
.vecs = drbg_nopr_sha256_tv_template,
|
||||
.count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
/* covered by drbg_nopr_sha256 test */
|
||||
.alg = "drbg_nopr_sha384",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_nopr_sha512",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_ctr_aes128",
|
||||
.test = alg_test_drbg,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.drbg = {
|
||||
.vecs = drbg_pr_ctr_aes128_tv_template,
|
||||
.count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
/* covered by drbg_pr_ctr_aes128 test */
|
||||
.alg = "drbg_pr_ctr_aes192",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_ctr_aes256",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_hmac_sha1",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_hmac_sha256",
|
||||
.test = alg_test_drbg,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.drbg = {
|
||||
.vecs = drbg_pr_hmac_sha256_tv_template,
|
||||
.count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
/* covered by drbg_pr_hmac_sha256 test */
|
||||
.alg = "drbg_pr_hmac_sha384",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_hmac_sha512",
|
||||
.test = alg_test_null,
|
||||
.fips_allowed = 1,
|
||||
}, {
|
||||
.alg = "drbg_pr_sha1",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_sha256",
|
||||
.test = alg_test_drbg,
|
||||
.fips_allowed = 1,
|
||||
.suite = {
|
||||
.drbg = {
|
||||
.vecs = drbg_pr_sha256_tv_template,
|
||||
.count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
|
||||
}
|
||||
}
|
||||
}, {
|
||||
/* covered by drbg_pr_sha256 test */
|
||||
.alg = "drbg_pr_sha384",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "drbg_pr_sha512",
|
||||
.fips_allowed = 1,
|
||||
.test = alg_test_null,
|
||||
}, {
|
||||
.alg = "ecb(__aes-aesni)",
|
||||
.test = alg_test_null,
|
||||
|
1158
crypto/testmgr.h
1158
crypto/testmgr.h
File diff suppressed because it is too large
Load Diff
@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
|
||||
|
||||
config CRYPTO_DEV_CCP
|
||||
bool "Support for AMD Cryptographic Coprocessor"
|
||||
depends on X86 && PCI
|
||||
depends on (X86 && PCI) || ARM64
|
||||
default n
|
||||
help
|
||||
The AMD Cryptographic Coprocessor provides hardware support
|
||||
@ -418,4 +418,22 @@ config CRYPTO_DEV_MXS_DCP
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called mxs-dcp.
|
||||
|
||||
source "drivers/crypto/qat/Kconfig"
|
||||
|
||||
config CRYPTO_DEV_QCE
|
||||
tristate "Qualcomm crypto engine accelerator"
|
||||
depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_DES
|
||||
select CRYPTO_ECB
|
||||
select CRYPTO_CBC
|
||||
select CRYPTO_XTS
|
||||
select CRYPTO_CTR
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
This driver supports Qualcomm crypto engine accelerator
|
||||
hardware. To compile this driver as a module, choose M here. The
|
||||
module will be called qcrypto.
|
||||
|
||||
endif # CRYPTO_HW
|
||||
|
@ -23,3 +23,5 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
|
||||
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
|
||||
|
@ -1292,7 +1292,7 @@ static struct platform_driver crypto4xx_driver = {
|
||||
.of_match_table = crypto4xx_match,
|
||||
},
|
||||
.probe = crypto4xx_probe,
|
||||
.remove = crypto4xx_remove,
|
||||
.remove = __exit_p(crypto4xx_remove),
|
||||
};
|
||||
|
||||
module_platform_driver(crypto4xx_driver);
|
||||
|
@ -1353,7 +1353,6 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd
|
||||
GFP_KERNEL);
|
||||
if (!pdata->dma_slave) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
|
||||
devm_kfree(&pdev->dev, pdata);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -1375,7 +1374,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
|
||||
unsigned long sha_phys_size;
|
||||
int err;
|
||||
|
||||
sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
|
||||
sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev),
|
||||
GFP_KERNEL);
|
||||
if (sha_dd == NULL) {
|
||||
dev_err(dev, "unable to alloc data struct.\n");
|
||||
err = -ENOMEM;
|
||||
@ -1490,8 +1490,6 @@ clk_err:
|
||||
free_irq(sha_dd->irq, sha_dd);
|
||||
res_err:
|
||||
tasklet_kill(&sha_dd->done_task);
|
||||
kfree(sha_dd);
|
||||
sha_dd = NULL;
|
||||
sha_dd_err:
|
||||
dev_err(dev, "initialization failed.\n");
|
||||
|
||||
@ -1523,9 +1521,6 @@ static int atmel_sha_remove(struct platform_device *pdev)
|
||||
if (sha_dd->irq >= 0)
|
||||
free_irq(sha_dd->irq, sha_dd);
|
||||
|
||||
kfree(sha_dd);
|
||||
sha_dd = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1337,7 +1337,6 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p
|
||||
GFP_KERNEL);
|
||||
if (!pdata->dma_slave) {
|
||||
dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
|
||||
devm_kfree(&pdev->dev, pdata);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -1359,7 +1358,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
|
||||
unsigned long tdes_phys_size;
|
||||
int err;
|
||||
|
||||
tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL);
|
||||
tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
|
||||
if (tdes_dd == NULL) {
|
||||
dev_err(dev, "unable to alloc data struct.\n");
|
||||
err = -ENOMEM;
|
||||
@ -1483,8 +1482,6 @@ tdes_irq_err:
|
||||
res_err:
|
||||
tasklet_kill(&tdes_dd->done_task);
|
||||
tasklet_kill(&tdes_dd->queue_task);
|
||||
kfree(tdes_dd);
|
||||
tdes_dd = NULL;
|
||||
tdes_dd_err:
|
||||
dev_err(dev, "initialization failed.\n");
|
||||
|
||||
@ -1519,9 +1516,6 @@ static int atmel_tdes_remove(struct platform_device *pdev)
|
||||
if (tdes_dd->irq >= 0)
|
||||
free_irq(tdes_dd->irq, tdes_dd);
|
||||
|
||||
kfree(tdes_dd);
|
||||
tdes_dd = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type)
|
||||
{
|
||||
u32 *jump_cmd, *uncond_jump_cmd;
|
||||
|
||||
/* DK bit is valid only for AES */
|
||||
if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
|
||||
append_operation(desc, type | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_DECRYPT);
|
||||
return;
|
||||
}
|
||||
|
||||
jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
|
||||
append_operation(desc, type | OP_ALG_AS_INITFINAL |
|
||||
OP_ALG_DECRYPT);
|
||||
@ -786,7 +793,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
|
||||
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1313,8 +1320,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
DMA_FROM_DEVICE, dst_chained);
|
||||
}
|
||||
|
||||
/* Check if data are contiguous */
|
||||
iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, iv_dma)) {
|
||||
dev_err(jrdev, "unable to map IV\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Check if data are contiguous */
|
||||
if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
|
||||
iv_dma || src_nents || iv_dma + ivsize !=
|
||||
sg_dma_address(req->src)) {
|
||||
@ -1345,8 +1357,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
|
||||
desc_bytes;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
*all_contig_ptr = all_contig;
|
||||
|
||||
sec4_sg_index = 0;
|
||||
@ -1369,6 +1379,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||
sg_to_sec4_sg_last(req->dst, dst_nents,
|
||||
edesc->sec4_sg + sec4_sg_index, 0);
|
||||
}
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return edesc;
|
||||
}
|
||||
@ -1494,8 +1510,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
|
||||
DMA_FROM_DEVICE, dst_chained);
|
||||
}
|
||||
|
||||
/* Check if data are contiguous */
|
||||
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, iv_dma)) {
|
||||
dev_err(jrdev, "unable to map IV\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Check if data are contiguous */
|
||||
if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
|
||||
iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
|
||||
contig &= ~GIV_SRC_CONTIG;
|
||||
@ -1534,8 +1555,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
|
||||
desc_bytes;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
*contig_ptr = contig;
|
||||
|
||||
sec4_sg_index = 0;
|
||||
@ -1559,6 +1578,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
|
||||
sg_to_sec4_sg_last(req->dst, dst_nents,
|
||||
edesc->sec4_sg + sec4_sg_index, 0);
|
||||
}
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return edesc;
|
||||
}
|
||||
@ -1650,11 +1675,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
||||
DMA_FROM_DEVICE, dst_chained);
|
||||
}
|
||||
|
||||
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, iv_dma)) {
|
||||
dev_err(jrdev, "unable to map IV\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if iv can be contiguous with source and destination.
|
||||
* If so, include it. If not, create scatterlist.
|
||||
*/
|
||||
iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
|
||||
if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
|
||||
iv_contig = true;
|
||||
else
|
||||
@ -1693,6 +1723,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
edesc->iv_dma = iv_dma;
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -2441,8 +2476,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
|
||||
|
||||
static int __init caam_algapi_init(void)
|
||||
{
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct device *ctrldev;
|
||||
void *priv;
|
||||
int i = 0, err = 0;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
||||
if (!dev_node)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(dev_node);
|
||||
if (!pdev) {
|
||||
of_node_put(dev_node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ctrldev = &pdev->dev;
|
||||
priv = dev_get_drvdata(ctrldev);
|
||||
of_node_put(dev_node);
|
||||
|
||||
/*
|
||||
* If priv is NULL, it's probably because the caam driver wasn't
|
||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
||||
*/
|
||||
if (!priv)
|
||||
return -ENODEV;
|
||||
|
||||
|
||||
INIT_LIST_HEAD(&alg_list);
|
||||
|
||||
/* register crypto algorithms the device supports */
|
||||
|
@ -137,13 +137,20 @@ struct caam_hash_state {
|
||||
/* Common job descriptor seq in/out ptr routines */
|
||||
|
||||
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
|
||||
static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
|
||||
struct caam_hash_state *state,
|
||||
int ctx_len)
|
||||
static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
|
||||
struct caam_hash_state *state,
|
||||
int ctx_len)
|
||||
{
|
||||
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
|
||||
ctx_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(jrdev, state->ctx_dma)) {
|
||||
dev_err(jrdev, "unable to map ctx\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Map req->result, and append seq_out_ptr command that points to it */
|
||||
@ -201,14 +208,19 @@ try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
|
||||
}
|
||||
|
||||
/* Map state->caam_ctx, and add it to link table */
|
||||
static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
|
||||
struct caam_hash_state *state,
|
||||
int ctx_len,
|
||||
struct sec4_sg_entry *sec4_sg,
|
||||
u32 flag)
|
||||
static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
|
||||
struct caam_hash_state *state, int ctx_len,
|
||||
struct sec4_sg_entry *sec4_sg, u32 flag)
|
||||
{
|
||||
state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
|
||||
if (dma_mapping_error(jrdev, state->ctx_dma)) {
|
||||
dev_err(jrdev, "unable to map ctx\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Common shared descriptor commands */
|
||||
@ -487,11 +499,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
|
||||
digestsize, 1);
|
||||
#endif
|
||||
}
|
||||
*keylen = digestsize;
|
||||
|
||||
dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
|
||||
dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
|
||||
|
||||
*keylen = digestsize;
|
||||
|
||||
kfree(desc);
|
||||
|
||||
return ret;
|
||||
@ -706,7 +718,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
|
||||
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
|
||||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -741,7 +753,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
|
||||
if (err)
|
||||
caam_jr_strstatus(jrdev, err);
|
||||
|
||||
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
|
||||
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
|
||||
kfree(edesc);
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -808,12 +820,11 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
|
||||
DESC_JOB_IO_LEN;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
||||
edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
||||
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
||||
edesc->sec4_sg, DMA_BIDIRECTIONAL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
|
||||
edesc->sec4_sg + 1,
|
||||
@ -839,6 +850,14 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
|
||||
HDR_REVERSE);
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
|
||||
to_hash, LDST_SGF);
|
||||
|
||||
@ -911,23 +930,34 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
|
||||
DESC_JOB_IO_LEN;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
edesc->src_nents = 0;
|
||||
|
||||
ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
|
||||
DMA_TO_DEVICE);
|
||||
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
||||
edesc->sec4_sg, DMA_TO_DEVICE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
||||
buf, state->buf_dma, buflen,
|
||||
last_buflen);
|
||||
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
|
||||
LDST_SGF);
|
||||
|
||||
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
||||
digestsize);
|
||||
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
||||
dev_err(jrdev, "unable to map dst\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
@ -989,11 +1019,11 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
|
||||
DESC_JOB_IO_LEN;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
|
||||
ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
|
||||
DMA_TO_DEVICE);
|
||||
ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
|
||||
edesc->sec4_sg, DMA_TO_DEVICE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
||||
buf, state->buf_dma, buflen,
|
||||
@ -1002,11 +1032,22 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
|
||||
sec4_sg_src_index, chained);
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
|
||||
buflen + req->nbytes, LDST_SGF);
|
||||
|
||||
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
||||
digestsize);
|
||||
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
||||
dev_err(jrdev, "unable to map dst\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
@ -1056,8 +1097,7 @@ static int ahash_digest(struct ahash_request *req)
|
||||
}
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
|
||||
DESC_JOB_IO_LEN;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->src_nents = src_nents;
|
||||
edesc->chained = chained;
|
||||
|
||||
@ -1067,6 +1107,12 @@ static int ahash_digest(struct ahash_request *req)
|
||||
|
||||
if (src_nents) {
|
||||
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
src_dma = edesc->sec4_sg_dma;
|
||||
options = LDST_SGF;
|
||||
} else {
|
||||
@ -1077,6 +1123,10 @@ static int ahash_digest(struct ahash_request *req)
|
||||
|
||||
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
||||
digestsize);
|
||||
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
||||
dev_err(jrdev, "unable to map dst\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
@ -1125,11 +1175,19 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
|
||||
|
||||
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, state->buf_dma)) {
|
||||
dev_err(jrdev, "unable to map src\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
|
||||
|
||||
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
||||
digestsize);
|
||||
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
||||
dev_err(jrdev, "unable to map dst\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
edesc->src_nents = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -1197,9 +1255,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
|
||||
DESC_JOB_IO_LEN;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
edesc->dst_dma = 0;
|
||||
|
||||
state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
|
||||
buf, *buflen);
|
||||
@ -1216,9 +1272,19 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
|
||||
HDR_REVERSE);
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
|
||||
|
||||
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
||||
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
@ -1297,8 +1363,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
|
||||
DESC_JOB_IO_LEN;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
|
||||
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
|
||||
state->buf_dma, buflen,
|
||||
@ -1307,11 +1371,22 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||
src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
|
||||
chained);
|
||||
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
|
||||
req->nbytes, LDST_SGF);
|
||||
|
||||
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
|
||||
digestsize);
|
||||
if (dma_mapping_error(jrdev, edesc->dst_dma)) {
|
||||
dev_err(jrdev, "unable to map dst\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
@ -1380,13 +1455,19 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
|
||||
DESC_JOB_IO_LEN;
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
|
||||
sec4_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
edesc->dst_dma = 0;
|
||||
|
||||
if (src_nents) {
|
||||
sg_to_sec4_sg_last(req->src, src_nents,
|
||||
edesc->sec4_sg, 0);
|
||||
edesc->sec4_sg_dma = dma_map_single(jrdev,
|
||||
edesc->sec4_sg,
|
||||
sec4_sg_bytes,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
|
||||
dev_err(jrdev, "unable to map S/G table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
src_dma = edesc->sec4_sg_dma;
|
||||
options = LDST_SGF;
|
||||
} else {
|
||||
@ -1404,7 +1485,9 @@ static int ahash_update_first(struct ahash_request *req)
|
||||
|
||||
append_seq_in_ptr(desc, src_dma, to_hash, options);
|
||||
|
||||
map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
||||
ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
|
||||
@ -1453,6 +1536,7 @@ static int ahash_init(struct ahash_request *req)
|
||||
state->final = ahash_final_no_ctx;
|
||||
|
||||
state->current_buf = 0;
|
||||
state->buf_dma = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1787,8 +1871,36 @@ caam_hash_alloc(struct caam_hash_template *template,
|
||||
|
||||
static int __init caam_algapi_hash_init(void)
|
||||
{
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct device *ctrldev;
|
||||
void *priv;
|
||||
int i = 0, err = 0;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
||||
if (!dev_node)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(dev_node);
|
||||
if (!pdev) {
|
||||
of_node_put(dev_node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ctrldev = &pdev->dev;
|
||||
priv = dev_get_drvdata(ctrldev);
|
||||
of_node_put(dev_node);
|
||||
|
||||
/*
|
||||
* If priv is NULL, it's probably because the caam driver wasn't
|
||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
||||
*/
|
||||
if (!priv)
|
||||
return -ENODEV;
|
||||
|
||||
INIT_LIST_HEAD(&hash_list);
|
||||
|
||||
/* register crypto algorithms the device supports */
|
||||
|
@ -185,7 +185,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
|
||||
max - copied_idx, false);
|
||||
}
|
||||
|
||||
static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
|
||||
static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
|
||||
{
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
u32 *desc = ctx->sh_desc;
|
||||
@ -203,13 +203,18 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
|
||||
|
||||
ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
|
||||
static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
|
||||
{
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
struct buf_data *bd = &ctx->bufs[buf_id];
|
||||
@ -220,12 +225,17 @@ static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
|
||||
HDR_REVERSE);
|
||||
|
||||
bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(jrdev, bd->addr)) {
|
||||
dev_err(jrdev, "unable to map dst\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
|
||||
desc, desc_bytes(desc), 1);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void caam_cleanup(struct hwrng *rng)
|
||||
@ -242,24 +252,44 @@ static void caam_cleanup(struct hwrng *rng)
|
||||
rng_unmap_ctx(rng_ctx);
|
||||
}
|
||||
|
||||
static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
|
||||
static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
|
||||
{
|
||||
struct buf_data *bd = &ctx->bufs[buf_id];
|
||||
int err;
|
||||
|
||||
err = rng_create_job_desc(ctx, buf_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rng_create_job_desc(ctx, buf_id);
|
||||
atomic_set(&bd->empty, BUF_EMPTY);
|
||||
submit_job(ctx, buf_id == ctx->current_buf);
|
||||
wait_for_completion(&bd->filled);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
|
||||
static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
|
||||
{
|
||||
int err;
|
||||
|
||||
ctx->jrdev = jrdev;
|
||||
rng_create_sh_desc(ctx);
|
||||
|
||||
err = rng_create_sh_desc(ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ctx->current_buf = 0;
|
||||
ctx->cur_buf_idx = 0;
|
||||
caam_init_buf(ctx, 0);
|
||||
caam_init_buf(ctx, 1);
|
||||
|
||||
err = caam_init_buf(ctx, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = caam_init_buf(ctx, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hwrng caam_rng = {
|
||||
@ -278,6 +308,35 @@ static void __exit caam_rng_exit(void)
|
||||
static int __init caam_rng_init(void)
|
||||
{
|
||||
struct device *dev;
|
||||
struct device_node *dev_node;
|
||||
struct platform_device *pdev;
|
||||
struct device *ctrldev;
|
||||
void *priv;
|
||||
int err;
|
||||
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
|
||||
if (!dev_node) {
|
||||
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
|
||||
if (!dev_node)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pdev = of_find_device_by_node(dev_node);
|
||||
if (!pdev) {
|
||||
of_node_put(dev_node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ctrldev = &pdev->dev;
|
||||
priv = dev_get_drvdata(ctrldev);
|
||||
of_node_put(dev_node);
|
||||
|
||||
/*
|
||||
* If priv is NULL, it's probably because the caam driver wasn't
|
||||
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
|
||||
*/
|
||||
if (!priv)
|
||||
return -ENODEV;
|
||||
|
||||
dev = caam_jr_alloc();
|
||||
if (IS_ERR(dev)) {
|
||||
@ -287,7 +346,9 @@ static int __init caam_rng_init(void)
|
||||
rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
|
||||
if (!rng_ctx)
|
||||
return -ENOMEM;
|
||||
caam_init_rng(rng_ctx, dev);
|
||||
err = caam_init_rng(rng_ctx, dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev_info(dev, "registering rng-caam\n");
|
||||
return hwrng_register(&caam_rng);
|
||||
|
@ -5,6 +5,7 @@
|
||||
* Copyright 2008-2012 Freescale Semiconductor, Inc.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
@ -87,6 +88,17 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
|
||||
|
||||
/* Set the bit to request direct access to DECO0 */
|
||||
topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
|
||||
|
||||
if (ctrlpriv->virt_en == 1) {
|
||||
setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
|
||||
|
||||
while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
|
||||
--timeout)
|
||||
cpu_relax();
|
||||
|
||||
timeout = 100000;
|
||||
}
|
||||
|
||||
setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
|
||||
|
||||
while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
|
||||
@ -129,6 +141,9 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
|
||||
*status = rd_reg32(&topregs->deco.op_status_hi) &
|
||||
DECO_OP_STATUS_HI_ERR_MASK;
|
||||
|
||||
if (ctrlpriv->virt_en == 1)
|
||||
clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
|
||||
|
||||
/* Mark the DECO as free */
|
||||
clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
|
||||
|
||||
@ -295,9 +310,6 @@ static int caam_remove(struct platform_device *pdev)
|
||||
/* Unmap controller region */
|
||||
iounmap(&topregs->ctrl);
|
||||
|
||||
kfree(ctrlpriv->jrpdev);
|
||||
kfree(ctrlpriv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -380,9 +392,11 @@ static int caam_probe(struct platform_device *pdev)
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct caam_perfmon *perfmon;
|
||||
#endif
|
||||
u64 cha_vid;
|
||||
u32 scfgr, comp_params;
|
||||
u32 cha_vid_ls;
|
||||
|
||||
ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
|
||||
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
|
||||
GFP_KERNEL);
|
||||
if (!ctrlpriv)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -413,13 +427,40 @@ static int caam_probe(struct platform_device *pdev)
|
||||
setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
|
||||
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
|
||||
|
||||
/*
|
||||
* Read the Compile Time paramters and SCFGR to determine
|
||||
* if Virtualization is enabled for this platform
|
||||
*/
|
||||
comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
|
||||
scfgr = rd_reg32(&topregs->ctrl.scfgr);
|
||||
|
||||
ctrlpriv->virt_en = 0;
|
||||
if (comp_params & CTPR_MS_VIRT_EN_INCL) {
|
||||
/* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
|
||||
* VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
|
||||
*/
|
||||
if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
|
||||
(!(comp_params & CTPR_MS_VIRT_EN_POR) &&
|
||||
(scfgr & SCFGR_VIRT_EN)))
|
||||
ctrlpriv->virt_en = 1;
|
||||
} else {
|
||||
/* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
|
||||
if (comp_params & CTPR_MS_VIRT_EN_POR)
|
||||
ctrlpriv->virt_en = 1;
|
||||
}
|
||||
|
||||
if (ctrlpriv->virt_en == 1)
|
||||
setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
|
||||
JRSTART_JR1_START | JRSTART_JR2_START |
|
||||
JRSTART_JR3_START);
|
||||
|
||||
if (sizeof(dma_addr_t) == sizeof(u64))
|
||||
if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
|
||||
dma_set_mask(dev, DMA_BIT_MASK(40));
|
||||
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
|
||||
else
|
||||
dma_set_mask(dev, DMA_BIT_MASK(36));
|
||||
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
|
||||
else
|
||||
dma_set_mask(dev, DMA_BIT_MASK(32));
|
||||
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
||||
|
||||
/*
|
||||
* Detect and enable JobRs
|
||||
@ -432,8 +473,9 @@ static int caam_probe(struct platform_device *pdev)
|
||||
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
|
||||
rspec++;
|
||||
|
||||
ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
|
||||
GFP_KERNEL);
|
||||
ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
|
||||
sizeof(struct platform_device *) * rspec,
|
||||
GFP_KERNEL);
|
||||
if (ctrlpriv->jrpdev == NULL) {
|
||||
iounmap(&topregs->ctrl);
|
||||
return -ENOMEM;
|
||||
@ -456,8 +498,9 @@ static int caam_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
/* Check to see if QI present. If so, enable */
|
||||
ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
|
||||
CTPR_QI_MASK);
|
||||
ctrlpriv->qi_present =
|
||||
!!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
|
||||
CTPR_MS_QI_MASK);
|
||||
if (ctrlpriv->qi_present) {
|
||||
ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
|
||||
/* This is all that's required to physically enable QI */
|
||||
@ -471,13 +514,13 @@ static int caam_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
|
||||
cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
|
||||
|
||||
/*
|
||||
* If SEC has RNG version >= 4 and RNG state handle has not been
|
||||
* already instantiated, do RNG instantiation
|
||||
*/
|
||||
if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
|
||||
if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
|
||||
ctrlpriv->rng4_sh_init =
|
||||
rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
|
||||
/*
|
||||
@ -531,7 +574,8 @@ static int caam_probe(struct platform_device *pdev)
|
||||
|
||||
/* NOTE: RTIC detection ought to go here, around Si time */
|
||||
|
||||
caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
|
||||
caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
|
||||
(u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
|
||||
|
||||
/* Report "alive" for developer to see */
|
||||
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
|
||||
@ -547,7 +591,7 @@ static int caam_probe(struct platform_device *pdev)
|
||||
*/
|
||||
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
|
||||
|
||||
ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
|
||||
ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
|
||||
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
|
||||
|
||||
/* Controller-level - performance monitor counters */
|
||||
|
@ -321,7 +321,6 @@ struct sec4_sg_entry {
|
||||
/* Continue - Not the last FIFO store to come */
|
||||
#define FIFOST_CONT_SHIFT 23
|
||||
#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
|
||||
#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
|
||||
|
||||
/*
|
||||
* Extended Length - use 32-bit extended length that
|
||||
|
@ -82,6 +82,7 @@ struct caam_drv_private {
|
||||
u8 total_jobrs; /* Total Job Rings in device */
|
||||
u8 qi_present; /* Nonzero if QI present in device */
|
||||
int secvio_irq; /* Security violation interrupt number */
|
||||
int virt_en; /* Virtualization enabled in CAAM */
|
||||
|
||||
#define RNG4_MAX_HANDLES 2
|
||||
/* RNG4 block */
|
||||
|
@ -476,11 +476,11 @@ static int caam_jr_probe(struct platform_device *pdev)
|
||||
|
||||
if (sizeof(dma_addr_t) == sizeof(u64))
|
||||
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
|
||||
dma_set_mask(jrdev, DMA_BIT_MASK(40));
|
||||
dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
|
||||
else
|
||||
dma_set_mask(jrdev, DMA_BIT_MASK(36));
|
||||
dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
|
||||
else
|
||||
dma_set_mask(jrdev, DMA_BIT_MASK(32));
|
||||
dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
|
||||
|
||||
/* Identify the interrupt */
|
||||
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
|
||||
|
@ -84,6 +84,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#ifdef __BIG_ENDIAN
|
||||
static inline void wr_reg64(u64 __iomem *reg, u64 data)
|
||||
{
|
||||
wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
|
||||
@ -95,6 +96,21 @@ static inline u64 rd_reg64(u64 __iomem *reg)
|
||||
return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
|
||||
((u64)rd_reg32((u32 __iomem *)reg + 1));
|
||||
}
|
||||
#else
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
static inline void wr_reg64(u64 __iomem *reg, u64 data)
|
||||
{
|
||||
wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
|
||||
wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
|
||||
}
|
||||
|
||||
static inline u64 rd_reg64(u64 __iomem *reg)
|
||||
{
|
||||
return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
|
||||
((u64)rd_reg32((u32 __iomem *)reg));
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -114,45 +130,45 @@ struct jr_outentry {
|
||||
*/
|
||||
|
||||
/* Number of DECOs */
|
||||
#define CHA_NUM_DECONUM_SHIFT 56
|
||||
#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
|
||||
#define CHA_NUM_MS_DECONUM_SHIFT 24
|
||||
#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
|
||||
|
||||
/* CHA Version IDs */
|
||||
#define CHA_ID_AES_SHIFT 0
|
||||
#define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT)
|
||||
#define CHA_ID_LS_AES_SHIFT 0
|
||||
#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
|
||||
|
||||
#define CHA_ID_DES_SHIFT 4
|
||||
#define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT)
|
||||
#define CHA_ID_LS_DES_SHIFT 4
|
||||
#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
|
||||
|
||||
#define CHA_ID_ARC4_SHIFT 8
|
||||
#define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT)
|
||||
#define CHA_ID_LS_ARC4_SHIFT 8
|
||||
#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
|
||||
|
||||
#define CHA_ID_MD_SHIFT 12
|
||||
#define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT)
|
||||
#define CHA_ID_LS_MD_SHIFT 12
|
||||
#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
|
||||
|
||||
#define CHA_ID_RNG_SHIFT 16
|
||||
#define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT)
|
||||
#define CHA_ID_LS_RNG_SHIFT 16
|
||||
#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
|
||||
|
||||
#define CHA_ID_SNW8_SHIFT 20
|
||||
#define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT)
|
||||
#define CHA_ID_LS_SNW8_SHIFT 20
|
||||
#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT)
|
||||
|
||||
#define CHA_ID_KAS_SHIFT 24
|
||||
#define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT)
|
||||
#define CHA_ID_LS_KAS_SHIFT 24
|
||||
#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT)
|
||||
|
||||
#define CHA_ID_PK_SHIFT 28
|
||||
#define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT)
|
||||
#define CHA_ID_LS_PK_SHIFT 28
|
||||
#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT)
|
||||
|
||||
#define CHA_ID_CRC_SHIFT 32
|
||||
#define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT)
|
||||
#define CHA_ID_MS_CRC_SHIFT 0
|
||||
#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT)
|
||||
|
||||
#define CHA_ID_SNW9_SHIFT 36
|
||||
#define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT)
|
||||
#define CHA_ID_MS_SNW9_SHIFT 4
|
||||
#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT)
|
||||
|
||||
#define CHA_ID_DECO_SHIFT 56
|
||||
#define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT)
|
||||
#define CHA_ID_MS_DECO_SHIFT 24
|
||||
#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT)
|
||||
|
||||
#define CHA_ID_JR_SHIFT 60
|
||||
#define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT)
|
||||
#define CHA_ID_MS_JR_SHIFT 28
|
||||
#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
|
||||
|
||||
struct sec_vid {
|
||||
u16 ip_id;
|
||||
@ -172,10 +188,14 @@ struct caam_perfmon {
|
||||
u64 rsvd[13];
|
||||
|
||||
/* CAAM Hardware Instantiation Parameters fa0-fbf */
|
||||
u64 cha_rev; /* CRNR - CHA Revision Number */
|
||||
#define CTPR_QI_SHIFT 57
|
||||
#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT)
|
||||
u64 comp_parms; /* CTPR - Compile Parameters Register */
|
||||
u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/
|
||||
u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
|
||||
#define CTPR_MS_QI_SHIFT 25
|
||||
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
|
||||
#define CTPR_MS_VIRT_EN_INCL 0x00000001
|
||||
#define CTPR_MS_VIRT_EN_POR 0x00000002
|
||||
u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
|
||||
u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
|
||||
u64 rsvd1[2];
|
||||
|
||||
/* CAAM Global Status fc0-fdf */
|
||||
@ -189,9 +209,12 @@ struct caam_perfmon {
|
||||
/* Component Instantiation Parameters fe0-fff */
|
||||
u32 rtic_id; /* RVID - RTIC Version ID */
|
||||
u32 ccb_id; /* CCBVID - CCB Version ID */
|
||||
u64 cha_id; /* CHAVID - CHA Version ID */
|
||||
u64 cha_num; /* CHANUM - CHA Number */
|
||||
u64 caam_id; /* CAAMVID - CAAM Version ID */
|
||||
u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/
|
||||
u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/
|
||||
u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */
|
||||
u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/
|
||||
u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */
|
||||
u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
|
||||
};
|
||||
|
||||
/* LIODN programming for DMA configuration */
|
||||
@ -304,9 +327,12 @@ struct caam_ctrl {
|
||||
/* Bus Access Configuration Section 010-11f */
|
||||
/* Read/Writable */
|
||||
struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
|
||||
u32 rsvd3[12];
|
||||
u32 rsvd3[11];
|
||||
u32 jrstart; /* JRSTART - Job Ring Start Register */
|
||||
struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
|
||||
u32 rsvd4[7];
|
||||
u32 rsvd4[5];
|
||||
u32 deco_rsr; /* DECORSR - Deco Request Source */
|
||||
u32 rsvd11;
|
||||
u32 deco_rq; /* DECORR - DECO Request */
|
||||
struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
|
||||
u32 rsvd5[22];
|
||||
@ -347,7 +373,10 @@ struct caam_ctrl {
|
||||
#define MCFGR_DMA_RESET 0x10000000
|
||||
#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
|
||||
#define SCFGR_RDBENABLE 0x00000400
|
||||
#define SCFGR_VIRT_EN 0x00008000
|
||||
#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
|
||||
#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */
|
||||
#define DECORSR_VALID 0x80000000
|
||||
#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
|
||||
|
||||
/* AXI read cache control */
|
||||
@ -365,6 +394,12 @@ struct caam_ctrl {
|
||||
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
|
||||
#define MCFGR_BURST_64 0x00000001 /* Max burst size */
|
||||
|
||||
/* JRSTART register offsets */
|
||||
#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
|
||||
#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */
|
||||
#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */
|
||||
#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */
|
||||
|
||||
/*
|
||||
* caam_job_ring - direct job ring setup
|
||||
* 1-4 possible per instantiation, base + 1000/2000/3000/4000
|
||||
|
@ -1,6 +1,11 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
|
||||
ccp-objs := ccp-dev.o ccp-ops.o
|
||||
ifdef CONFIG_X86
|
||||
ccp-objs += ccp-pci.o
|
||||
endif
|
||||
ifdef CONFIG_ARM64
|
||||
ccp-objs += ccp-platform.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
|
||||
ccp-crypto-objs := ccp-crypto-main.o \
|
||||
|
@ -20,7 +20,9 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/cpu.h>
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/cpu_device_id.h>
|
||||
#endif
|
||||
#include <linux/ccp.h>
|
||||
|
||||
#include "ccp-dev.h"
|
||||
@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp)
|
||||
/* Build queue interrupt mask (two interrupts per queue) */
|
||||
qim |= cmd_q->int_ok | cmd_q->int_err;
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
/* For arm64 set the recommended queue cache settings */
|
||||
iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
|
||||
(CMD_Q_CACHE_INC * i));
|
||||
#endif
|
||||
|
||||
dev_dbg(dev, "queue #%u available\n", i);
|
||||
}
|
||||
if (ccp->cmd_q_count == 0) {
|
||||
@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static const struct x86_cpu_id ccp_support[] = {
|
||||
{ X86_VENDOR_AMD, 22, },
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init ccp_mod_init(void)
|
||||
{
|
||||
#ifdef CONFIG_X86
|
||||
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
|
||||
int ret;
|
||||
|
||||
@ -589,12 +600,30 @@ static int __init ccp_mod_init(void)
|
||||
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
int ret;
|
||||
|
||||
ret = ccp_platform_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Don't leave the driver loaded if init failed */
|
||||
if (!ccp_get_device()) {
|
||||
ccp_platform_exit();
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __exit ccp_mod_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_X86
|
||||
struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
|
||||
|
||||
switch (cpuinfo->x86) {
|
||||
@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void)
|
||||
ccp_pci_exit();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
ccp_platform_exit();
|
||||
#endif
|
||||
}
|
||||
|
||||
module_init(ccp_mod_init);
|
||||
|
@ -23,8 +23,6 @@
|
||||
#include <linux/hw_random.h>
|
||||
|
||||
|
||||
#define IO_OFFSET 0x20000
|
||||
|
||||
#define MAX_DMAPOOL_NAME_LEN 32
|
||||
|
||||
#define MAX_HW_QUEUES 5
|
||||
@ -32,6 +30,9 @@
|
||||
|
||||
#define TRNG_RETRIES 10
|
||||
|
||||
#define CACHE_NONE 0x00
|
||||
#define CACHE_WB_NO_ALLOC 0xb7
|
||||
|
||||
|
||||
/****** Register Mappings ******/
|
||||
#define Q_MASK_REG 0x000
|
||||
@ -50,7 +51,7 @@
|
||||
#define CMD_Q_INT_STATUS_BASE 0x214
|
||||
#define CMD_Q_STATUS_INCR 0x20
|
||||
|
||||
#define CMD_Q_CACHE 0x228
|
||||
#define CMD_Q_CACHE_BASE 0x228
|
||||
#define CMD_Q_CACHE_INC 0x20
|
||||
|
||||
#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f);
|
||||
@ -194,6 +195,7 @@ struct ccp_device {
|
||||
void *dev_specific;
|
||||
int (*get_irq)(struct ccp_device *ccp);
|
||||
void (*free_irq)(struct ccp_device *ccp);
|
||||
unsigned int irq;
|
||||
|
||||
/*
|
||||
* I/O area used for device communication. The register mapping
|
||||
@ -254,12 +256,18 @@ struct ccp_device {
|
||||
/* Suspend support */
|
||||
unsigned int suspending;
|
||||
wait_queue_head_t suspend_queue;
|
||||
|
||||
/* DMA caching attribute support */
|
||||
unsigned int axcache;
|
||||
};
|
||||
|
||||
|
||||
int ccp_pci_init(void);
|
||||
void ccp_pci_exit(void);
|
||||
|
||||
int ccp_platform_init(void);
|
||||
void ccp_platform_exit(void);
|
||||
|
||||
struct ccp_device *ccp_alloc_struct(struct device *dev);
|
||||
int ccp_init(struct ccp_device *ccp);
|
||||
void ccp_destroy(struct ccp_device *ccp);
|
||||
|
@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
goto e_ksb;
|
||||
|
||||
ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
|
||||
true);
|
||||
false);
|
||||
ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
|
||||
CCP_PASSTHRU_BYTESWAP_NOOP);
|
||||
if (ret) {
|
||||
@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
goto e_exp;
|
||||
|
||||
ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
|
||||
true);
|
||||
false);
|
||||
src.address += o_len; /* Adjust the address for the copy operation */
|
||||
ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
|
||||
true);
|
||||
false);
|
||||
src.address -= o_len; /* Reset the address to original value */
|
||||
|
||||
/* Prepare the output area for the operation */
|
||||
@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
|
||||
/* Copy the ECC modulus */
|
||||
ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
|
||||
/* Copy the first operand */
|
||||
ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
|
||||
ecc->u.mm.operand_1_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
|
||||
if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
|
||||
/* Copy the second operand */
|
||||
ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
|
||||
ecc->u.mm.operand_2_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
}
|
||||
|
||||
@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
|
||||
/* Copy the ECC modulus */
|
||||
ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
|
||||
/* Copy the first point X and Y coordinate */
|
||||
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
|
||||
ecc->u.pm.point_1.x_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
|
||||
ecc->u.pm.point_1.y_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
|
||||
/* Set the first point Z coordianate to 1 */
|
||||
@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
/* Copy the second point X and Y coordinate */
|
||||
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
|
||||
ecc->u.pm.point_2.x_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
|
||||
ecc->u.pm.point_2.y_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
|
||||
/* Set the second point Z coordianate to 1 */
|
||||
@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
|
||||
/* Copy the Domain "a" parameter */
|
||||
ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
|
||||
ecc->u.pm.domain_a_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
|
||||
if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
|
||||
/* Copy the scalar value */
|
||||
ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
|
||||
ecc->u.pm.scalar_len,
|
||||
CCP_ECC_OPERAND_SIZE, true);
|
||||
CCP_ECC_OPERAND_SIZE, false);
|
||||
src.address += CCP_ECC_OPERAND_SIZE;
|
||||
}
|
||||
}
|
||||
|
@ -12,8 +12,10 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
@ -24,6 +26,8 @@
|
||||
#include "ccp-dev.h"
|
||||
|
||||
#define IO_BAR 2
|
||||
#define IO_OFFSET 0x20000
|
||||
|
||||
#define MSIX_VECTORS 2
|
||||
|
||||
struct ccp_msix {
|
||||
@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
|
||||
ccp->irq = pdev->irq;
|
||||
ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
|
||||
if (ret) {
|
||||
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
|
||||
goto e_msi;
|
||||
@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
|
||||
dev);
|
||||
pci_disable_msix(pdev);
|
||||
} else {
|
||||
free_irq(pdev->irq, dev);
|
||||
free_irq(ccp->irq, dev);
|
||||
pci_disable_msi(pdev);
|
||||
}
|
||||
}
|
||||
@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp)
|
||||
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
|
||||
resource_size_t io_len;
|
||||
unsigned long io_flags;
|
||||
int bar;
|
||||
|
||||
io_flags = pci_resource_flags(pdev, IO_BAR);
|
||||
io_len = pci_resource_len(pdev, IO_BAR);
|
||||
if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
|
||||
return IO_BAR;
|
||||
|
||||
for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
|
||||
io_flags = pci_resource_flags(pdev, bar);
|
||||
io_len = pci_resource_len(pdev, bar);
|
||||
if ((io_flags & IORESOURCE_MEM) &&
|
||||
(io_len >= (IO_OFFSET + 0x800)))
|
||||
return bar;
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
}
|
||||
ccp->io_regs = ccp->io_map + IO_OFFSET;
|
||||
|
||||
ret = dma_set_mask(dev, DMA_BIT_MASK(48));
|
||||
if (ret == 0) {
|
||||
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
|
||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
|
||||
if (ret) {
|
||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"pci_set_consistent_dma_mask failed (%d)\n",
|
||||
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
|
||||
ret);
|
||||
goto e_bar0;
|
||||
}
|
||||
} else {
|
||||
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
|
||||
if (ret) {
|
||||
dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
|
||||
goto e_bar0;
|
||||
goto e_iomap;
|
||||
}
|
||||
}
|
||||
|
||||
@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
ret = ccp_init(ccp);
|
||||
if (ret)
|
||||
goto e_bar0;
|
||||
goto e_iomap;
|
||||
|
||||
dev_notice(dev, "enabled\n");
|
||||
|
||||
return 0;
|
||||
|
||||
e_bar0:
|
||||
e_iomap:
|
||||
pci_iounmap(pdev, ccp->io_map);
|
||||
|
||||
e_device:
|
||||
|
230
drivers/crypto/ccp/ccp-platform.c
Normal file
230
drivers/crypto/ccp/ccp-platform.c
Normal file
@ -0,0 +1,230 @@
|
||||
/*
|
||||
* AMD Cryptographic Coprocessor (CCP) driver
|
||||
*
|
||||
* Copyright (C) 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ccp.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include "ccp-dev.h"
|
||||
|
||||
|
||||
static int ccp_get_irq(struct ccp_device *ccp)
|
||||
{
|
||||
struct device *dev = ccp->dev;
|
||||
struct platform_device *pdev = container_of(dev,
|
||||
struct platform_device, dev);
|
||||
int ret;
|
||||
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ccp->irq = ret;
|
||||
ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
|
||||
if (ret) {
|
||||
dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ccp_get_irqs(struct ccp_device *ccp)
|
||||
{
|
||||
struct device *dev = ccp->dev;
|
||||
int ret;
|
||||
|
||||
ret = ccp_get_irq(ccp);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
/* Couldn't get an interrupt */
|
||||
dev_notice(dev, "could not enable interrupts (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ccp_free_irqs(struct ccp_device *ccp)
|
||||
{
|
||||
struct device *dev = ccp->dev;
|
||||
|
||||
free_irq(ccp->irq, dev);
|
||||
}
|
||||
|
||||
static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
|
||||
{
|
||||
struct device *dev = ccp->dev;
|
||||
struct platform_device *pdev = container_of(dev,
|
||||
struct platform_device, dev);
|
||||
struct resource *ior;
|
||||
|
||||
ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (ior && (resource_size(ior) >= 0x800))
|
||||
return ior;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int ccp_platform_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ccp_device *ccp;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct resource *ior;
|
||||
int ret;
|
||||
|
||||
ret = -ENOMEM;
|
||||
ccp = ccp_alloc_struct(dev);
|
||||
if (!ccp)
|
||||
goto e_err;
|
||||
|
||||
ccp->dev_specific = NULL;
|
||||
ccp->get_irq = ccp_get_irqs;
|
||||
ccp->free_irq = ccp_free_irqs;
|
||||
|
||||
ior = ccp_find_mmio_area(ccp);
|
||||
ccp->io_map = devm_ioremap_resource(dev, ior);
|
||||
if (IS_ERR(ccp->io_map)) {
|
||||
ret = PTR_ERR(ccp->io_map);
|
||||
goto e_free;
|
||||
}
|
||||
ccp->io_regs = ccp->io_map;
|
||||
|
||||
if (!dev->dma_mask)
|
||||
dev->dma_mask = &dev->coherent_dma_mask;
|
||||
*(dev->dma_mask) = DMA_BIT_MASK(48);
|
||||
dev->coherent_dma_mask = DMA_BIT_MASK(48);
|
||||
|
||||
if (of_property_read_bool(dev->of_node, "dma-coherent"))
|
||||
ccp->axcache = CACHE_WB_NO_ALLOC;
|
||||
else
|
||||
ccp->axcache = CACHE_NONE;
|
||||
|
||||
dev_set_drvdata(dev, ccp);
|
||||
|
||||
ret = ccp_init(ccp);
|
||||
if (ret)
|
||||
goto e_free;
|
||||
|
||||
dev_notice(dev, "enabled\n");
|
||||
|
||||
return 0;
|
||||
|
||||
e_free:
|
||||
kfree(ccp);
|
||||
|
||||
e_err:
|
||||
dev_notice(dev, "initialization failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ccp_platform_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ccp_device *ccp = dev_get_drvdata(dev);
|
||||
|
||||
ccp_destroy(ccp);
|
||||
|
||||
kfree(ccp);
|
||||
|
||||
dev_notice(dev, "disabled\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int ccp_platform_suspend(struct platform_device *pdev,
|
||||
pm_message_t state)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ccp_device *ccp = dev_get_drvdata(dev);
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock_irqsave(&ccp->cmd_lock, flags);
|
||||
|
||||
ccp->suspending = 1;
|
||||
|
||||
/* Wake all the queue kthreads to prepare for suspend */
|
||||
for (i = 0; i < ccp->cmd_q_count; i++)
|
||||
wake_up_process(ccp->cmd_q[i].kthread);
|
||||
|
||||
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
|
||||
|
||||
/* Wait for all queue kthreads to say they're done */
|
||||
while (!ccp_queues_suspended(ccp))
|
||||
wait_event_interruptible(ccp->suspend_queue,
|
||||
ccp_queues_suspended(ccp));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ccp_platform_resume(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ccp_device *ccp = dev_get_drvdata(dev);
|
||||
unsigned long flags;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock_irqsave(&ccp->cmd_lock, flags);
|
||||
|
||||
ccp->suspending = 0;
|
||||
|
||||
/* Wake up all the kthreads */
|
||||
for (i = 0; i < ccp->cmd_q_count; i++) {
|
||||
ccp->cmd_q[i].suspended = 0;
|
||||
wake_up_process(ccp->cmd_q[i].kthread);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct of_device_id ccp_platform_ids[] = {
|
||||
{ .compatible = "amd,ccp-seattle-v1a" },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct platform_driver ccp_platform_driver = {
|
||||
.driver = {
|
||||
.name = "AMD Cryptographic Coprocessor",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = ccp_platform_ids,
|
||||
},
|
||||
.probe = ccp_platform_probe,
|
||||
.remove = ccp_platform_remove,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ccp_platform_suspend,
|
||||
.resume = ccp_platform_resume,
|
||||
#endif
|
||||
};
|
||||
|
||||
int ccp_platform_init(void)
|
||||
{
|
||||
return platform_driver_register(&ccp_platform_driver);
|
||||
}
|
||||
|
||||
void ccp_platform_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&ccp_platform_driver);
|
||||
}
|
@ -1247,7 +1247,7 @@ static struct vio_device_id nx842_driver_ids[] = {
|
||||
static struct vio_driver nx842_driver = {
|
||||
.name = MODULE_NAME,
|
||||
.probe = nx842_probe,
|
||||
.remove = nx842_remove,
|
||||
.remove = __exit_p(nx842_remove),
|
||||
.get_desired_dma = nx842_get_desired_dma,
|
||||
.id_table = nx842_driver_ids,
|
||||
};
|
||||
|
23
drivers/crypto/qat/Kconfig
Normal file
23
drivers/crypto/qat/Kconfig
Normal file
@ -0,0 +1,23 @@
|
||||
config CRYPTO_DEV_QAT
|
||||
tristate
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_AUTHENC
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_AES
|
||||
select CRYPTO_CBC
|
||||
select CRYPTO_SHA1
|
||||
select CRYPTO_SHA256
|
||||
select CRYPTO_SHA512
|
||||
select FW_LOADER
|
||||
|
||||
config CRYPTO_DEV_QAT_DH895xCC
|
||||
tristate "Support for Intel(R) DH895xCC"
|
||||
depends on X86 && PCI
|
||||
default n
|
||||
select CRYPTO_DEV_QAT
|
||||
help
|
||||
Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
|
||||
for accelerating crypto and compression workloads.
|
||||
|
||||
To compile this as a module, choose M here: the module
|
||||
will be called qat_dh895xcc.
|
2
drivers/crypto/qat/Makefile
Normal file
2
drivers/crypto/qat/Makefile
Normal file
@ -0,0 +1,2 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
|
14
drivers/crypto/qat/qat_common/Makefile
Normal file
14
drivers/crypto/qat/qat_common/Makefile
Normal file
@ -0,0 +1,14 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
|
||||
intel_qat-objs := adf_cfg.o \
|
||||
adf_ctl_drv.o \
|
||||
adf_dev_mgr.o \
|
||||
adf_init.o \
|
||||
adf_accel_engine.o \
|
||||
adf_aer.o \
|
||||
adf_transport.o \
|
||||
qat_crypto.o \
|
||||
qat_algs.o \
|
||||
qat_uclo.o \
|
||||
qat_hal.o
|
||||
|
||||
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
|
205
drivers/crypto/qat/qat_common/adf_accel_devices.h
Normal file
205
drivers/crypto/qat/qat_common/adf_accel_devices.h
Normal file
@ -0,0 +1,205 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_ACCEL_DEVICES_H_
|
||||
#define ADF_ACCEL_DEVICES_H_
|
||||
#include <linux/module.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/io.h>
|
||||
#include "adf_cfg_common.h"
|
||||
|
||||
#define PCI_VENDOR_ID_INTEL 0x8086
|
||||
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
|
||||
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
|
||||
#define ADF_DH895XCC_PMISC_BAR 1
|
||||
#define ADF_DH895XCC_ETR_BAR 2
|
||||
#define ADF_PCI_MAX_BARS 3
|
||||
#define ADF_DEVICE_NAME_LENGTH 32
|
||||
#define ADF_ETR_MAX_RINGS_PER_BANK 16
|
||||
#define ADF_MAX_MSIX_VECTOR_NAME 16
|
||||
#define ADF_DEVICE_NAME_PREFIX "qat_"
|
||||
|
||||
enum adf_accel_capabilities {
|
||||
ADF_ACCEL_CAPABILITIES_NULL = 0,
|
||||
ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
|
||||
ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
|
||||
ADF_ACCEL_CAPABILITIES_CIPHER = 4,
|
||||
ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
|
||||
ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
|
||||
ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
|
||||
ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
|
||||
};
|
||||
|
||||
struct adf_bar {
|
||||
resource_size_t base_addr;
|
||||
void __iomem *virt_addr;
|
||||
resource_size_t size;
|
||||
} __packed;
|
||||
|
||||
struct adf_accel_msix {
|
||||
struct msix_entry *entries;
|
||||
char **names;
|
||||
} __packed;
|
||||
|
||||
struct adf_accel_pci {
|
||||
struct pci_dev *pci_dev;
|
||||
struct adf_accel_msix msix_entries;
|
||||
struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
|
||||
uint8_t revid;
|
||||
uint8_t sku;
|
||||
} __packed;
|
||||
|
||||
enum dev_state {
|
||||
DEV_DOWN = 0,
|
||||
DEV_UP
|
||||
};
|
||||
|
||||
enum dev_sku_info {
|
||||
DEV_SKU_1 = 0,
|
||||
DEV_SKU_2,
|
||||
DEV_SKU_3,
|
||||
DEV_SKU_4,
|
||||
DEV_SKU_UNKNOWN,
|
||||
};
|
||||
|
||||
static inline const char *get_sku_info(enum dev_sku_info info)
|
||||
{
|
||||
switch (info) {
|
||||
case DEV_SKU_1:
|
||||
return "SKU1";
|
||||
case DEV_SKU_2:
|
||||
return "SKU2";
|
||||
case DEV_SKU_3:
|
||||
return "SKU3";
|
||||
case DEV_SKU_4:
|
||||
return "SKU4";
|
||||
case DEV_SKU_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return "Unknown SKU";
|
||||
}
|
||||
|
||||
struct adf_hw_device_class {
|
||||
const char *name;
|
||||
const enum adf_device_type type;
|
||||
uint32_t instances;
|
||||
} __packed;
|
||||
|
||||
struct adf_cfg_device_data;
|
||||
struct adf_accel_dev;
|
||||
struct adf_etr_data;
|
||||
struct adf_etr_ring_data;
|
||||
|
||||
struct adf_hw_device_data {
|
||||
struct adf_hw_device_class *dev_class;
|
||||
uint32_t (*get_accel_mask)(uint32_t fuse);
|
||||
uint32_t (*get_ae_mask)(uint32_t fuse);
|
||||
uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
|
||||
uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
|
||||
uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
|
||||
uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
|
||||
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
|
||||
void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
|
||||
void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
|
||||
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
|
||||
void (*free_irq)(struct adf_accel_dev *accel_dev);
|
||||
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
|
||||
const char *fw_name;
|
||||
uint32_t pci_dev_id;
|
||||
uint32_t fuses;
|
||||
uint32_t accel_capabilities_mask;
|
||||
uint16_t accel_mask;
|
||||
uint16_t ae_mask;
|
||||
uint16_t tx_rings_mask;
|
||||
uint8_t tx_rx_gap;
|
||||
uint8_t instance_id;
|
||||
uint8_t num_banks;
|
||||
uint8_t num_accel;
|
||||
uint8_t num_logical_accel;
|
||||
uint8_t num_engines;
|
||||
} __packed;
|
||||
|
||||
/* CSR write macro */
|
||||
#define ADF_CSR_WR(csr_base, csr_offset, val) \
|
||||
__raw_writel(val, csr_base + csr_offset)
|
||||
|
||||
/* CSR read macro */
|
||||
#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
|
||||
|
||||
#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
|
||||
#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
|
||||
#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
|
||||
#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
|
||||
#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
|
||||
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
|
||||
|
||||
struct adf_admin_comms;
|
||||
struct icp_qat_fw_loader_handle;
|
||||
struct adf_fw_loader_data {
|
||||
struct icp_qat_fw_loader_handle *fw_loader;
|
||||
const struct firmware *uof_fw;
|
||||
};
|
||||
|
||||
struct adf_accel_dev {
|
||||
struct adf_etr_data *transport;
|
||||
struct adf_hw_device_data *hw_device;
|
||||
struct adf_cfg_device_data *cfg;
|
||||
struct adf_fw_loader_data *fw_loader;
|
||||
struct adf_admin_comms *admin;
|
||||
struct list_head crypto_list;
|
||||
unsigned long status;
|
||||
atomic_t ref_count;
|
||||
struct dentry *debugfs_dir;
|
||||
struct list_head list;
|
||||
struct module *owner;
|
||||
uint8_t accel_id;
|
||||
uint8_t numa_node;
|
||||
struct adf_accel_pci accel_pci_dev;
|
||||
} __packed;
|
||||
#endif
|
168
drivers/crypto/qat/qat_common/adf_accel_engine.c
Normal file
168
drivers/crypto/qat/qat_common/adf_accel_engine.c
Normal file
@ -0,0 +1,168 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
#include "adf_cfg.h"
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_common_drv.h"
|
||||
#include "icp_qat_uclo.h"
|
||||
|
||||
int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
void *uof_addr;
|
||||
uint32_t uof_size;
|
||||
|
||||
if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
|
||||
&accel_dev->accel_pci_dev.pci_dev->dev)) {
|
||||
pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
uof_size = loader_data->uof_fw->size;
|
||||
uof_addr = (void *)loader_data->uof_fw->data;
|
||||
if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
|
||||
pr_err("QAT: Failed to map UOF\n");
|
||||
goto out_err;
|
||||
}
|
||||
if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
|
||||
pr_err("QAT: Failed to map UOF\n");
|
||||
goto out_err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
release_firmware(loader_data->uof_fw);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
int adf_ae_fw_release(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
|
||||
|
||||
release_firmware(loader_data->uof_fw);
|
||||
qat_uclo_del_uof_obj(loader_data->fw_loader);
|
||||
qat_hal_deinit(loader_data->fw_loader);
|
||||
loader_data->fw_loader = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int adf_ae_start(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
|
||||
|
||||
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
|
||||
if (hw_data->ae_mask & (1 << ae)) {
|
||||
qat_hal_start(loader_data->fw_loader, ae, 0xFF);
|
||||
ae_ctr++;
|
||||
}
|
||||
}
|
||||
pr_info("QAT: qat_dev%d started %d acceleration engines\n",
|
||||
accel_dev->accel_id, ae_ctr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int adf_ae_stop(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
|
||||
|
||||
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
|
||||
if (hw_data->ae_mask & (1 << ae)) {
|
||||
qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
|
||||
ae_ctr++;
|
||||
}
|
||||
}
|
||||
pr_info("QAT: qat_dev%d stopped %d acceleration engines\n",
|
||||
accel_dev->accel_id, ae_ctr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
|
||||
|
||||
qat_hal_reset(loader_data->fw_loader);
|
||||
if (qat_hal_clr_reset(loader_data->fw_loader))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int adf_ae_init(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_fw_loader_data *loader_data;
|
||||
|
||||
loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
|
||||
if (!loader_data)
|
||||
return -ENOMEM;
|
||||
|
||||
accel_dev->fw_loader = loader_data;
|
||||
if (qat_hal_init(accel_dev)) {
|
||||
pr_err("QAT: Failed to init the AEs\n");
|
||||
kfree(loader_data);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (adf_ae_reset(accel_dev, 0)) {
|
||||
pr_err("QAT: Failed to reset the AEs\n");
|
||||
qat_hal_deinit(loader_data->fw_loader);
|
||||
kfree(loader_data);
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
kfree(accel_dev->fw_loader);
|
||||
accel_dev->fw_loader = NULL;
|
||||
return 0;
|
||||
}
|
259
drivers/crypto/qat/qat_common/adf_aer.c
Normal file
259
drivers/crypto/qat/qat_common/adf_aer.c
Normal file
@ -0,0 +1,259 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/delay.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_common_drv.h"
|
||||
|
||||
static struct workqueue_struct *device_reset_wq;
|
||||
|
||||
static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
|
||||
|
||||
pr_info("QAT: Acceleration driver hardware error detected.\n");
|
||||
if (!accel_dev) {
|
||||
pr_err("QAT: Can't find acceleration device\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
if (state == pci_channel_io_perm_failure) {
|
||||
pr_err("QAT: Can't recover from device error\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
/* reset dev data */
|
||||
struct adf_reset_dev_data {
|
||||
int mode;
|
||||
struct adf_accel_dev *accel_dev;
|
||||
struct completion compl;
|
||||
struct work_struct reset_work;
|
||||
};
|
||||
|
||||
#define PPDSTAT_OFFSET 0x7E
|
||||
static void adf_dev_restore(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
||||
struct pci_dev *parent = pdev->bus->self;
|
||||
uint16_t ppdstat = 0, bridge_ctl = 0;
|
||||
int pending = 0;
|
||||
|
||||
pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id);
|
||||
pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
|
||||
pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
|
||||
if (pending) {
|
||||
int ctr = 0;
|
||||
|
||||
do {
|
||||
msleep(100);
|
||||
pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
|
||||
pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
|
||||
} while (pending && ctr++ < 10);
|
||||
}
|
||||
|
||||
if (pending)
|
||||
pr_info("QAT: Transaction still in progress. Proceeding\n");
|
||||
|
||||
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
|
||||
bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
|
||||
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
|
||||
msleep(100);
|
||||
bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
|
||||
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
|
||||
msleep(100);
|
||||
pci_restore_state(pdev);
|
||||
pci_save_state(pdev);
|
||||
}
|
||||
|
||||
static void adf_device_reset_worker(struct work_struct *work)
|
||||
{
|
||||
struct adf_reset_dev_data *reset_data =
|
||||
container_of(work, struct adf_reset_dev_data, reset_work);
|
||||
struct adf_accel_dev *accel_dev = reset_data->accel_dev;
|
||||
|
||||
adf_dev_restarting_notify(accel_dev);
|
||||
adf_dev_stop(accel_dev);
|
||||
adf_dev_restore(accel_dev);
|
||||
if (adf_dev_start(accel_dev)) {
|
||||
/* The device hanged and we can't restart it so stop here */
|
||||
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
|
||||
kfree(reset_data);
|
||||
WARN(1, "QAT: device restart failed. Device is unusable\n");
|
||||
return;
|
||||
}
|
||||
adf_dev_restarted_notify(accel_dev);
|
||||
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
||||
|
||||
/* The dev is back alive. Notify the caller if in sync mode */
|
||||
if (reset_data->mode == ADF_DEV_RESET_SYNC)
|
||||
complete(&reset_data->compl);
|
||||
else
|
||||
kfree(reset_data);
|
||||
}
|
||||
|
||||
static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
|
||||
enum adf_dev_reset_mode mode)
|
||||
{
|
||||
struct adf_reset_dev_data *reset_data;
|
||||
|
||||
if (adf_dev_started(accel_dev) &&
|
||||
!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
|
||||
return 0;
|
||||
|
||||
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
||||
reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
|
||||
if (!reset_data)
|
||||
return -ENOMEM;
|
||||
reset_data->accel_dev = accel_dev;
|
||||
init_completion(&reset_data->compl);
|
||||
reset_data->mode = mode;
|
||||
INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
|
||||
queue_work(device_reset_wq, &reset_data->reset_work);
|
||||
|
||||
/* If in sync mode wait for the result */
|
||||
if (mode == ADF_DEV_RESET_SYNC) {
|
||||
int ret = 0;
|
||||
/* Maximum device reset time is 10 seconds */
|
||||
unsigned long wait_jiffies = msecs_to_jiffies(10000);
|
||||
unsigned long timeout = wait_for_completion_timeout(
|
||||
&reset_data->compl, wait_jiffies);
|
||||
if (!timeout) {
|
||||
pr_err("QAT: Reset device timeout expired\n");
|
||||
ret = -EFAULT;
|
||||
}
|
||||
kfree(reset_data);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
|
||||
|
||||
if (!accel_dev) {
|
||||
pr_err("QAT: Can't find acceleration device\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
pci_cleanup_aer_uncorrect_error_status(pdev);
|
||||
if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static void adf_resume(struct pci_dev *pdev)
|
||||
{
|
||||
pr_info("QAT: Acceleration driver reset completed\n");
|
||||
pr_info("QAT: Device is up and runnig\n");
|
||||
}
|
||||
|
||||
static struct pci_error_handlers adf_err_handler = {
|
||||
.error_detected = adf_error_detected,
|
||||
.slot_reset = adf_slot_reset,
|
||||
.resume = adf_resume,
|
||||
};
|
||||
|
||||
/**
|
||||
* adf_enable_aer() - Enable Advance Error Reporting for acceleration device
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
* @adf: PCI device driver owning the given acceleration device.
|
||||
*
|
||||
* Function enables PCI Advance Error Reporting for the
|
||||
* QAT acceleration device accel_dev.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
|
||||
{
|
||||
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
||||
|
||||
adf->err_handler = &adf_err_handler;
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_enable_aer);
|
||||
|
||||
/**
|
||||
* adf_disable_aer() - Enable Advance Error Reporting for acceleration device
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function disables PCI Advance Error Reporting for the
|
||||
* QAT acceleration device accel_dev.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void adf_disable_aer(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
|
||||
|
||||
pci_disable_pcie_error_reporting(pdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_disable_aer);
|
||||
|
||||
int adf_init_aer(void)
|
||||
{
|
||||
device_reset_wq = create_workqueue("qat_device_reset_wq");
|
||||
return (device_reset_wq == NULL) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
void adf_exit_aer(void)
|
||||
{
|
||||
if (device_reset_wq)
|
||||
destroy_workqueue(device_reset_wq);
|
||||
device_reset_wq = NULL;
|
||||
}
|
361
drivers/crypto/qat/qat_common/adf_cfg.c
Normal file
361
drivers/crypto/qat/qat_common/adf_cfg.c
Normal file
@ -0,0 +1,361 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_cfg.h"
|
||||
|
||||
static DEFINE_MUTEX(qat_cfg_read_lock);
|
||||
|
||||
static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
|
||||
{
|
||||
struct adf_cfg_device_data *dev_cfg = sfile->private;
|
||||
|
||||
mutex_lock(&qat_cfg_read_lock);
|
||||
return seq_list_start(&dev_cfg->sec_list, *pos);
|
||||
}
|
||||
|
||||
static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
|
||||
{
|
||||
struct list_head *list;
|
||||
struct adf_cfg_section *sec =
|
||||
list_entry(v, struct adf_cfg_section, list);
|
||||
|
||||
seq_printf(sfile, "[%s]\n", sec->name);
|
||||
list_for_each(list, &sec->param_head) {
|
||||
struct adf_cfg_key_val *ptr =
|
||||
list_entry(list, struct adf_cfg_key_val, list);
|
||||
seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
|
||||
{
|
||||
struct adf_cfg_device_data *dev_cfg = sfile->private;
|
||||
|
||||
return seq_list_next(v, &dev_cfg->sec_list, pos);
|
||||
}
|
||||
|
||||
static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
|
||||
{
|
||||
mutex_unlock(&qat_cfg_read_lock);
|
||||
}
|
||||
|
||||
static const struct seq_operations qat_dev_cfg_sops = {
|
||||
.start = qat_dev_cfg_start,
|
||||
.next = qat_dev_cfg_next,
|
||||
.stop = qat_dev_cfg_stop,
|
||||
.show = qat_dev_cfg_show
|
||||
};
|
||||
|
||||
static int qat_dev_cfg_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret = seq_open(file, &qat_dev_cfg_sops);
|
||||
|
||||
if (!ret) {
|
||||
struct seq_file *seq_f = file->private_data;
|
||||
|
||||
seq_f->private = inode->i_private;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations qat_dev_cfg_fops = {
|
||||
.open = qat_dev_cfg_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release
|
||||
};
|
||||
|
||||
/**
|
||||
* adf_cfg_dev_add() - Create an acceleration device configuration table.
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function creates a configuration table for the given acceleration device.
|
||||
* The table stores device specific config values.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_cfg_device_data *dev_cfg_data;
|
||||
|
||||
dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
|
||||
if (!dev_cfg_data)
|
||||
return -ENOMEM;
|
||||
INIT_LIST_HEAD(&dev_cfg_data->sec_list);
|
||||
init_rwsem(&dev_cfg_data->lock);
|
||||
accel_dev->cfg = dev_cfg_data;
|
||||
|
||||
/* accel_dev->debugfs_dir should always be non-NULL here */
|
||||
dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
|
||||
accel_dev->debugfs_dir,
|
||||
dev_cfg_data,
|
||||
&qat_dev_cfg_fops);
|
||||
if (!dev_cfg_data->debug) {
|
||||
pr_err("QAT: Failed to create qat cfg debugfs entry.\n");
|
||||
kfree(dev_cfg_data);
|
||||
accel_dev->cfg = NULL;
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
|
||||
|
||||
static void adf_cfg_section_del_all(struct list_head *head);
|
||||
|
||||
void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
|
||||
|
||||
down_write(&dev_cfg_data->lock);
|
||||
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
|
||||
up_write(&dev_cfg_data->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_cfg_dev_remove() - Clears acceleration device configuration table.
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function removes configuration table from the given acceleration device
|
||||
* and frees all allocated memory.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
|
||||
|
||||
down_write(&dev_cfg_data->lock);
|
||||
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
|
||||
up_write(&dev_cfg_data->lock);
|
||||
debugfs_remove(dev_cfg_data->debug);
|
||||
kfree(dev_cfg_data);
|
||||
accel_dev->cfg = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
|
||||
|
||||
static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
|
||||
struct adf_cfg_section *sec)
|
||||
{
|
||||
list_add_tail(&new->list, &sec->param_head);
|
||||
}
|
||||
|
||||
static void adf_cfg_keyval_del_all(struct list_head *head)
|
||||
{
|
||||
struct list_head *list_ptr, *tmp;
|
||||
|
||||
list_for_each_prev_safe(list_ptr, tmp, head) {
|
||||
struct adf_cfg_key_val *ptr =
|
||||
list_entry(list_ptr, struct adf_cfg_key_val, list);
|
||||
list_del(list_ptr);
|
||||
kfree(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
static void adf_cfg_section_del_all(struct list_head *head)
|
||||
{
|
||||
struct adf_cfg_section *ptr;
|
||||
struct list_head *list, *tmp;
|
||||
|
||||
list_for_each_prev_safe(list, tmp, head) {
|
||||
ptr = list_entry(list, struct adf_cfg_section, list);
|
||||
adf_cfg_keyval_del_all(&ptr->param_head);
|
||||
list_del(list);
|
||||
kfree(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
|
||||
const char *key)
|
||||
{
|
||||
struct list_head *list;
|
||||
|
||||
list_for_each(list, &s->param_head) {
|
||||
struct adf_cfg_key_val *ptr =
|
||||
list_entry(list, struct adf_cfg_key_val, list);
|
||||
if (!strcmp(ptr->key, key))
|
||||
return ptr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
|
||||
const char *sec_name)
|
||||
{
|
||||
struct adf_cfg_device_data *cfg = accel_dev->cfg;
|
||||
struct list_head *list;
|
||||
|
||||
list_for_each(list, &cfg->sec_list) {
|
||||
struct adf_cfg_section *ptr =
|
||||
list_entry(list, struct adf_cfg_section, list);
|
||||
if (!strcmp(ptr->name, sec_name))
|
||||
return ptr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
|
||||
const char *sec_name,
|
||||
const char *key_name,
|
||||
char *val)
|
||||
{
|
||||
struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
|
||||
struct adf_cfg_key_val *keyval = NULL;
|
||||
|
||||
if (sec)
|
||||
keyval = adf_cfg_key_value_find(sec, key_name);
|
||||
if (keyval) {
|
||||
memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_cfg_add_key_value_param() - Add key-value config entry to config table.
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
* @section_name: Name of the section where the param will be added
|
||||
* @key: The key string
|
||||
* @val: Value pain for the given @key
|
||||
* @type: Type - string, int or address
|
||||
*
|
||||
* Function adds configuration key - value entry in the appropriate section
|
||||
* in the given acceleration device
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
|
||||
const char *section_name,
|
||||
const char *key, const void *val,
|
||||
enum adf_cfg_val_type type)
|
||||
{
|
||||
struct adf_cfg_device_data *cfg = accel_dev->cfg;
|
||||
struct adf_cfg_key_val *key_val;
|
||||
struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
|
||||
section_name);
|
||||
if (!section)
|
||||
return -EFAULT;
|
||||
|
||||
key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
|
||||
if (!key_val)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&key_val->list);
|
||||
strlcpy(key_val->key, key, sizeof(key_val->key));
|
||||
|
||||
if (type == ADF_DEC) {
|
||||
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
|
||||
"%ld", (*((long *)val)));
|
||||
} else if (type == ADF_STR) {
|
||||
strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
|
||||
} else if (type == ADF_HEX) {
|
||||
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
|
||||
"0x%lx", (unsigned long)val);
|
||||
} else {
|
||||
pr_err("QAT: Unknown type given.\n");
|
||||
kfree(key_val);
|
||||
return -1;
|
||||
}
|
||||
key_val->type = type;
|
||||
down_write(&cfg->lock);
|
||||
adf_cfg_keyval_add(key_val, section);
|
||||
up_write(&cfg->lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
|
||||
|
||||
/**
|
||||
* adf_cfg_section_add() - Add config section entry to config table.
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
* @name: Name of the section
|
||||
*
|
||||
* Function adds configuration section where key - value entries
|
||||
* will be stored.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
|
||||
{
|
||||
struct adf_cfg_device_data *cfg = accel_dev->cfg;
|
||||
struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
|
||||
|
||||
if (sec)
|
||||
return 0;
|
||||
|
||||
sec = kzalloc(sizeof(*sec), GFP_KERNEL);
|
||||
if (!sec)
|
||||
return -ENOMEM;
|
||||
|
||||
strlcpy(sec->name, name, sizeof(sec->name));
|
||||
INIT_LIST_HEAD(&sec->param_head);
|
||||
down_write(&cfg->lock);
|
||||
list_add_tail(&sec->list, &cfg->sec_list);
|
||||
up_write(&cfg->lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_cfg_section_add);
|
||||
|
||||
int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
|
||||
const char *section, const char *name,
|
||||
char *value)
|
||||
{
|
||||
struct adf_cfg_device_data *cfg = accel_dev->cfg;
|
||||
int ret;
|
||||
|
||||
down_read(&cfg->lock);
|
||||
ret = adf_cfg_key_val_get(accel_dev, section, name, value);
|
||||
up_read(&cfg->lock);
|
||||
return ret;
|
||||
}
|
87
drivers/crypto/qat/qat_common/adf_cfg.h
Normal file
87
drivers/crypto/qat/qat_common/adf_cfg.h
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_CFG_H_
|
||||
#define ADF_CFG_H_
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_cfg_common.h"
|
||||
#include "adf_cfg_strings.h"
|
||||
|
||||
struct adf_cfg_key_val {
|
||||
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
|
||||
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
|
||||
enum adf_cfg_val_type type;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct adf_cfg_section {
|
||||
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
|
||||
struct list_head list;
|
||||
struct list_head param_head;
|
||||
};
|
||||
|
||||
struct adf_cfg_device_data {
|
||||
struct list_head sec_list;
|
||||
struct dentry *debug;
|
||||
struct rw_semaphore lock;
|
||||
};
|
||||
|
||||
int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
|
||||
void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
|
||||
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
|
||||
void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
|
||||
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
|
||||
const char *section_name,
|
||||
const char *key, const void *val,
|
||||
enum adf_cfg_val_type type);
|
||||
int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
|
||||
const char *section, const char *name, char *value);
|
||||
|
||||
#endif
|
100
drivers/crypto/qat/qat_common/adf_cfg_common.h
Normal file
100
drivers/crypto/qat/qat_common/adf_cfg_common.h
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_CFG_COMMON_H_
|
||||
#define ADF_CFG_COMMON_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define ADF_CFG_MAX_STR_LEN 64
|
||||
#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
|
||||
#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
|
||||
#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
|
||||
#define ADF_CFG_BASE_DEC 10
|
||||
#define ADF_CFG_BASE_HEX 16
|
||||
#define ADF_CFG_ALL_DEVICES 0xFE
|
||||
#define ADF_CFG_NO_DEVICE 0xFF
|
||||
#define ADF_CFG_AFFINITY_WHATEVER 0xFF
|
||||
#define MAX_DEVICE_NAME_SIZE 32
|
||||
#define ADF_MAX_DEVICES 32
|
||||
|
||||
enum adf_cfg_val_type {
|
||||
ADF_DEC,
|
||||
ADF_HEX,
|
||||
ADF_STR
|
||||
};
|
||||
|
||||
enum adf_device_type {
|
||||
DEV_UNKNOWN = 0,
|
||||
DEV_DH895XCC,
|
||||
};
|
||||
|
||||
struct adf_dev_status_info {
|
||||
enum adf_device_type type;
|
||||
uint8_t accel_id;
|
||||
uint8_t instance_id;
|
||||
uint8_t num_ae;
|
||||
uint8_t num_accel;
|
||||
uint8_t num_logical_accel;
|
||||
uint8_t banks_per_accel;
|
||||
uint8_t state;
|
||||
uint8_t bus;
|
||||
uint8_t dev;
|
||||
uint8_t fun;
|
||||
char name[MAX_DEVICE_NAME_SIZE];
|
||||
};
|
||||
|
||||
#define ADF_CTL_IOC_MAGIC 'a'
|
||||
#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
|
||||
struct adf_user_cfg_ctl_data)
|
||||
#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
|
||||
struct adf_user_cfg_ctl_data)
|
||||
#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
|
||||
struct adf_user_cfg_ctl_data)
|
||||
#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
|
||||
#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
|
||||
#endif
|
83
drivers/crypto/qat/qat_common/adf_cfg_strings.h
Normal file
83
drivers/crypto/qat/qat_common/adf_cfg_strings.h
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_CFG_STRINGS_H_
|
||||
#define ADF_CFG_STRINGS_H_
|
||||
|
||||
#define ADF_GENERAL_SEC "GENERAL"
|
||||
#define ADF_KERNEL_SEC "KERNEL"
|
||||
#define ADF_ACCEL_SEC "Accelerator"
|
||||
#define ADF_NUM_CY "NumberCyInstances"
|
||||
#define ADF_NUM_DC "NumberDcInstances"
|
||||
#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
|
||||
#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
|
||||
#define ADF_RING_DC_SIZE "NumConcurrentRequests"
|
||||
#define ADF_RING_ASYM_TX "RingAsymTx"
|
||||
#define ADF_RING_SYM_TX "RingSymTx"
|
||||
#define ADF_RING_RND_TX "RingNrbgTx"
|
||||
#define ADF_RING_ASYM_RX "RingAsymRx"
|
||||
#define ADF_RING_SYM_RX "RinSymRx"
|
||||
#define ADF_RING_RND_RX "RingNrbgRx"
|
||||
#define ADF_RING_DC_TX "RingTx"
|
||||
#define ADF_RING_DC_RX "RingRx"
|
||||
#define ADF_ETRMGR_BANK "Bank"
|
||||
#define ADF_RING_BANK_NUM "BankNumber"
|
||||
#define ADF_CY "Cy"
|
||||
#define ADF_DC "Dc"
|
||||
#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
|
||||
#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
|
||||
ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED
|
||||
#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
|
||||
#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
|
||||
ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER
|
||||
#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
|
||||
#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
|
||||
ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED
|
||||
#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
|
||||
#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
|
||||
ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY
|
||||
#define ADF_ACCEL_STR "Accelerator%d"
|
||||
#endif
|
94
drivers/crypto/qat/qat_common/adf_cfg_user.h
Normal file
94
drivers/crypto/qat/qat_common/adf_cfg_user.h
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_CFG_USER_H_
|
||||
#define ADF_CFG_USER_H_
|
||||
|
||||
#include "adf_cfg_common.h"
|
||||
#include "adf_cfg_strings.h"
|
||||
|
||||
struct adf_user_cfg_key_val {
|
||||
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
|
||||
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
|
||||
union {
|
||||
char *user_val_ptr;
|
||||
uint64_t padding1;
|
||||
};
|
||||
union {
|
||||
struct adf_user_cfg_key_val *prev;
|
||||
uint64_t padding2;
|
||||
};
|
||||
union {
|
||||
struct adf_user_cfg_key_val *next;
|
||||
uint64_t padding3;
|
||||
};
|
||||
enum adf_cfg_val_type type;
|
||||
};
|
||||
|
||||
struct adf_user_cfg_section {
|
||||
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
|
||||
union {
|
||||
struct adf_user_cfg_key_val *params;
|
||||
uint64_t padding1;
|
||||
};
|
||||
union {
|
||||
struct adf_user_cfg_section *prev;
|
||||
uint64_t padding2;
|
||||
};
|
||||
union {
|
||||
struct adf_user_cfg_section *next;
|
||||
uint64_t padding3;
|
||||
};
|
||||
};
|
||||
|
||||
struct adf_user_cfg_ctl_data {
|
||||
union {
|
||||
struct adf_user_cfg_section *config_section;
|
||||
uint64_t padding;
|
||||
};
|
||||
uint8_t device_id;
|
||||
};
|
||||
#endif
|
192
drivers/crypto/qat/qat_common/adf_common_drv.h
Normal file
192
drivers/crypto/qat/qat_common/adf_common_drv.h
Normal file
@ -0,0 +1,192 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_DRV_H
|
||||
#define ADF_DRV_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/pci.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "icp_qat_fw_loader_handle.h"
|
||||
#include "icp_qat_hal.h"
|
||||
|
||||
#define ADF_STATUS_RESTARTING 0
|
||||
#define ADF_STATUS_STARTING 1
|
||||
#define ADF_STATUS_CONFIGURED 2
|
||||
#define ADF_STATUS_STARTED 3
|
||||
#define ADF_STATUS_AE_INITIALISED 4
|
||||
#define ADF_STATUS_AE_UCODE_LOADED 5
|
||||
#define ADF_STATUS_AE_STARTED 6
|
||||
#define ADF_STATUS_ORPHAN_TH_RUNNING 7
|
||||
#define ADF_STATUS_IRQ_ALLOCATED 8
|
||||
|
||||
enum adf_dev_reset_mode {
|
||||
ADF_DEV_RESET_ASYNC = 0,
|
||||
ADF_DEV_RESET_SYNC
|
||||
};
|
||||
|
||||
enum adf_event {
|
||||
ADF_EVENT_INIT = 0,
|
||||
ADF_EVENT_START,
|
||||
ADF_EVENT_STOP,
|
||||
ADF_EVENT_SHUTDOWN,
|
||||
ADF_EVENT_RESTARTING,
|
||||
ADF_EVENT_RESTARTED,
|
||||
};
|
||||
|
||||
struct service_hndl {
|
||||
int (*event_hld)(struct adf_accel_dev *accel_dev,
|
||||
enum adf_event event);
|
||||
unsigned long init_status;
|
||||
unsigned long start_status;
|
||||
char *name;
|
||||
struct list_head list;
|
||||
int admin;
|
||||
};
|
||||
|
||||
int adf_service_register(struct service_hndl *service);
|
||||
int adf_service_unregister(struct service_hndl *service);
|
||||
|
||||
int adf_dev_init(struct adf_accel_dev *accel_dev);
|
||||
int adf_dev_start(struct adf_accel_dev *accel_dev);
|
||||
int adf_dev_stop(struct adf_accel_dev *accel_dev);
|
||||
int adf_dev_shutdown(struct adf_accel_dev *accel_dev);
|
||||
|
||||
int adf_ctl_dev_register(void);
|
||||
void adf_ctl_dev_unregister(void);
|
||||
int adf_processes_dev_register(void);
|
||||
void adf_processes_dev_unregister(void);
|
||||
|
||||
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
|
||||
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
|
||||
struct list_head *adf_devmgr_get_head(void);
|
||||
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
|
||||
struct adf_accel_dev *adf_devmgr_get_first(void);
|
||||
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
|
||||
int adf_devmgr_verify_id(uint32_t id);
|
||||
void adf_devmgr_get_num_dev(uint32_t *num);
|
||||
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
|
||||
int adf_dev_started(struct adf_accel_dev *accel_dev);
|
||||
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
|
||||
int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
|
||||
int adf_ae_init(struct adf_accel_dev *accel_dev);
|
||||
int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
|
||||
int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
|
||||
int adf_ae_fw_release(struct adf_accel_dev *accel_dev);
|
||||
int adf_ae_start(struct adf_accel_dev *accel_dev);
|
||||
int adf_ae_stop(struct adf_accel_dev *accel_dev);
|
||||
|
||||
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
|
||||
void adf_disable_aer(struct adf_accel_dev *accel_dev);
|
||||
int adf_init_aer(void);
|
||||
void adf_exit_aer(void);
|
||||
|
||||
int adf_dev_get(struct adf_accel_dev *accel_dev);
|
||||
void adf_dev_put(struct adf_accel_dev *accel_dev);
|
||||
int adf_dev_in_use(struct adf_accel_dev *accel_dev);
|
||||
int adf_init_etr_data(struct adf_accel_dev *accel_dev);
|
||||
void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
|
||||
int qat_crypto_register(void);
|
||||
int qat_crypto_unregister(void);
|
||||
struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
|
||||
void qat_crypto_put_instance(struct qat_crypto_instance *inst);
|
||||
void qat_alg_callback(void *resp);
|
||||
int qat_algs_init(void);
|
||||
void qat_algs_exit(void);
|
||||
int qat_algs_register(void);
|
||||
int qat_algs_unregister(void);
|
||||
|
||||
int qat_hal_init(struct adf_accel_dev *accel_dev);
|
||||
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
|
||||
void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
|
||||
unsigned int ctx_mask);
|
||||
void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
|
||||
unsigned int ctx_mask);
|
||||
void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
|
||||
int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
|
||||
void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned int ctx_mask);
|
||||
int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, enum icp_qat_uof_regtype lm_type,
|
||||
unsigned char mode);
|
||||
int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned char mode);
|
||||
int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned char mode);
|
||||
void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned int ctx_mask, unsigned int upc);
|
||||
void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned int uaddr,
|
||||
unsigned int words_num, uint64_t *uword);
|
||||
void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
|
||||
unsigned int uword_addr, unsigned int words_num,
|
||||
unsigned int *data);
|
||||
int qat_hal_get_ins_num(void);
|
||||
int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae,
|
||||
struct icp_qat_uof_batch_init *lm_init_header);
|
||||
int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned char ctx_mask,
|
||||
enum icp_qat_uof_regtype reg_type,
|
||||
unsigned short reg_num, unsigned int regdata);
|
||||
int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned char ctx_mask,
|
||||
enum icp_qat_uof_regtype reg_type,
|
||||
unsigned short reg_num, unsigned int regdata);
|
||||
int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned char ctx_mask,
|
||||
enum icp_qat_uof_regtype reg_type,
|
||||
unsigned short reg_num, unsigned int regdata);
|
||||
int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned char ctx_mask,
|
||||
unsigned short reg_num, unsigned int regdata);
|
||||
int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
|
||||
unsigned char ae, unsigned short lm_addr, unsigned int value);
|
||||
int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
|
||||
void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
|
||||
int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
|
||||
void *addr_ptr, int mem_size);
|
||||
#endif
|
490
drivers/crypto/qat/qat_common/adf_ctl_drv.c
Normal file
490
drivers/crypto/qat/qat_common/adf_ctl_drv.c
Normal file
@ -0,0 +1,490 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_common_drv.h"
|
||||
#include "adf_cfg.h"
|
||||
#include "adf_cfg_common.h"
|
||||
#include "adf_cfg_user.h"
|
||||
|
||||
#define DEVICE_NAME "qat_adf_ctl"
|
||||
|
||||
static DEFINE_MUTEX(adf_ctl_lock);
|
||||
static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
|
||||
|
||||
static const struct file_operations adf_ctl_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.unlocked_ioctl = adf_ctl_ioctl,
|
||||
.compat_ioctl = adf_ctl_ioctl,
|
||||
};
|
||||
|
||||
struct adf_ctl_drv_info {
|
||||
unsigned int major;
|
||||
struct cdev drv_cdev;
|
||||
struct class *drv_class;
|
||||
};
|
||||
|
||||
static struct adf_ctl_drv_info adt_ctl_drv;
|
||||
|
||||
static void adf_chr_drv_destroy(void)
|
||||
{
|
||||
device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0));
|
||||
cdev_del(&adt_ctl_drv.drv_cdev);
|
||||
class_destroy(adt_ctl_drv.drv_class);
|
||||
unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1);
|
||||
}
|
||||
|
||||
static int adf_chr_drv_create(void)
|
||||
{
|
||||
dev_t dev_id;
|
||||
struct device *drv_device;
|
||||
|
||||
if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
|
||||
pr_err("QAT: unable to allocate chrdev region\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
|
||||
if (IS_ERR(adt_ctl_drv.drv_class)) {
|
||||
pr_err("QAT: class_create failed for adf_ctl\n");
|
||||
goto err_chrdev_unreg;
|
||||
}
|
||||
adt_ctl_drv.major = MAJOR(dev_id);
|
||||
cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops);
|
||||
if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) {
|
||||
pr_err("QAT: cdev add failed\n");
|
||||
goto err_class_destr;
|
||||
}
|
||||
|
||||
drv_device = device_create(adt_ctl_drv.drv_class, NULL,
|
||||
MKDEV(adt_ctl_drv.major, 0),
|
||||
NULL, DEVICE_NAME);
|
||||
if (!drv_device) {
|
||||
pr_err("QAT: failed to create device\n");
|
||||
goto err_cdev_del;
|
||||
}
|
||||
return 0;
|
||||
err_cdev_del:
|
||||
cdev_del(&adt_ctl_drv.drv_cdev);
|
||||
err_class_destr:
|
||||
class_destroy(adt_ctl_drv.drv_class);
|
||||
err_chrdev_unreg:
|
||||
unregister_chrdev_region(dev_id, 1);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct adf_user_cfg_ctl_data *cfg_data;
|
||||
|
||||
cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
|
||||
if (!cfg_data)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Initialize device id to NO DEVICE as 0 is a valid device id */
|
||||
cfg_data->device_id = ADF_CFG_NO_DEVICE;
|
||||
|
||||
if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
|
||||
pr_err("QAT: failed to copy from user cfg_data.\n");
|
||||
kfree(cfg_data);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
*ctl_data = cfg_data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
|
||||
const char *section,
|
||||
const struct adf_user_cfg_key_val *key_val)
|
||||
{
|
||||
if (key_val->type == ADF_HEX) {
|
||||
long *ptr = (long *)key_val->val;
|
||||
long val = *ptr;
|
||||
|
||||
if (adf_cfg_add_key_value_param(accel_dev, section,
|
||||
key_val->key, (void *)val,
|
||||
key_val->type)) {
|
||||
pr_err("QAT: failed to add keyvalue.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
if (adf_cfg_add_key_value_param(accel_dev, section,
|
||||
key_val->key, key_val->val,
|
||||
key_val->type)) {
|
||||
pr_err("QAT: failed to add keyvalue.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
|
||||
struct adf_user_cfg_ctl_data *ctl_data)
|
||||
{
|
||||
struct adf_user_cfg_key_val key_val;
|
||||
struct adf_user_cfg_key_val *params_head;
|
||||
struct adf_user_cfg_section section, *section_head;
|
||||
|
||||
section_head = ctl_data->config_section;
|
||||
|
||||
while (section_head) {
|
||||
if (copy_from_user(§ion, (void __user *)section_head,
|
||||
sizeof(*section_head))) {
|
||||
pr_err("QAT: failed to copy section info\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (adf_cfg_section_add(accel_dev, section.name)) {
|
||||
pr_err("QAT: failed to add section.\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
params_head = section_head->params;
|
||||
|
||||
while (params_head) {
|
||||
if (copy_from_user(&key_val, (void __user *)params_head,
|
||||
sizeof(key_val))) {
|
||||
pr_err("QAT: Failed to copy keyvalue.\n");
|
||||
goto out_err;
|
||||
}
|
||||
if (adf_add_key_value_data(accel_dev, section.name,
|
||||
&key_val)) {
|
||||
goto out_err;
|
||||
}
|
||||
params_head = key_val.next;
|
||||
}
|
||||
section_head = section.next;
|
||||
}
|
||||
return 0;
|
||||
out_err:
|
||||
adf_cfg_del_all(accel_dev);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
struct adf_user_cfg_ctl_data *ctl_data;
|
||||
struct adf_accel_dev *accel_dev;
|
||||
|
||||
ret = adf_ctl_alloc_resources(&ctl_data, arg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
|
||||
if (!accel_dev) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (adf_dev_started(accel_dev)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (adf_copy_key_value_data(accel_dev, ctl_data)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
||||
out:
|
||||
kfree(ctl_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int adf_ctl_is_device_in_use(int id)
|
||||
{
|
||||
struct list_head *itr, *head = adf_devmgr_get_head();
|
||||
|
||||
list_for_each(itr, head) {
|
||||
struct adf_accel_dev *dev =
|
||||
list_entry(itr, struct adf_accel_dev, list);
|
||||
|
||||
if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
|
||||
if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
|
||||
pr_info("QAT: device qat_dev%d is busy\n",
|
||||
dev->accel_id);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adf_ctl_stop_devices(uint32_t id)
|
||||
{
|
||||
struct list_head *itr, *head = adf_devmgr_get_head();
|
||||
int ret = 0;
|
||||
|
||||
list_for_each(itr, head) {
|
||||
struct adf_accel_dev *accel_dev =
|
||||
list_entry(itr, struct adf_accel_dev, list);
|
||||
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
|
||||
if (!adf_dev_started(accel_dev))
|
||||
continue;
|
||||
|
||||
if (adf_dev_stop(accel_dev)) {
|
||||
pr_err("QAT: Failed to stop qat_dev%d\n", id);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
struct adf_user_cfg_ctl_data *ctl_data;
|
||||
|
||||
ret = adf_ctl_alloc_resources(&ctl_data, arg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adf_devmgr_verify_id(ctl_data->device_id)) {
|
||||
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = adf_ctl_is_device_in_use(ctl_data->device_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
|
||||
pr_info("QAT: Stopping all acceleration devices.\n");
|
||||
else
|
||||
pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
|
||||
ctl_data->device_id);
|
||||
|
||||
ret = adf_ctl_stop_devices(ctl_data->device_id);
|
||||
if (ret)
|
||||
pr_err("QAT: failed to stop device.\n");
|
||||
out:
|
||||
kfree(ctl_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
struct adf_user_cfg_ctl_data *ctl_data;
|
||||
struct adf_accel_dev *accel_dev;
|
||||
|
||||
ret = adf_ctl_alloc_resources(&ctl_data, arg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
|
||||
if (!accel_dev) {
|
||||
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!adf_dev_started(accel_dev)) {
|
||||
pr_info("QAT: Starting acceleration device qat_dev%d.\n",
|
||||
ctl_data->device_id);
|
||||
ret = adf_dev_start(accel_dev);
|
||||
} else {
|
||||
pr_info("QAT: Acceleration device qat_dev%d already started.\n",
|
||||
ctl_data->device_id);
|
||||
}
|
||||
if (ret) {
|
||||
pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
|
||||
adf_dev_stop(accel_dev);
|
||||
}
|
||||
out:
|
||||
kfree(ctl_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
uint32_t num_devices = 0;
|
||||
|
||||
adf_devmgr_get_num_dev(&num_devices);
|
||||
if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data;
|
||||
struct adf_dev_status_info dev_info;
|
||||
struct adf_accel_dev *accel_dev;
|
||||
|
||||
if (copy_from_user(&dev_info, (void __user *)arg,
|
||||
sizeof(struct adf_dev_status_info))) {
|
||||
pr_err("QAT: failed to copy from user.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
|
||||
if (!accel_dev) {
|
||||
pr_err("QAT: Device %d not found\n", dev_info.accel_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
hw_data = accel_dev->hw_device;
|
||||
dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
|
||||
dev_info.num_ae = hw_data->get_num_aes(hw_data);
|
||||
dev_info.num_accel = hw_data->get_num_accels(hw_data);
|
||||
dev_info.num_logical_accel = hw_data->num_logical_accel;
|
||||
dev_info.banks_per_accel = hw_data->num_banks
|
||||
/ hw_data->num_logical_accel;
|
||||
strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
|
||||
dev_info.instance_id = hw_data->instance_id;
|
||||
dev_info.type = hw_data->dev_class->type;
|
||||
dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
|
||||
dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
|
||||
dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
|
||||
|
||||
if (copy_to_user((void __user *)arg, &dev_info,
|
||||
sizeof(struct adf_dev_status_info))) {
|
||||
pr_err("QAT: failed to copy status.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (mutex_lock_interruptible(&adf_ctl_lock))
|
||||
return -EFAULT;
|
||||
|
||||
switch (cmd) {
|
||||
case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
|
||||
ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
|
||||
break;
|
||||
|
||||
case IOCTL_STOP_ACCEL_DEV:
|
||||
ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
|
||||
break;
|
||||
|
||||
case IOCTL_START_ACCEL_DEV:
|
||||
ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
|
||||
break;
|
||||
|
||||
case IOCTL_GET_NUM_DEVICES:
|
||||
ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
|
||||
break;
|
||||
|
||||
case IOCTL_STATUS_ACCEL_DEV:
|
||||
ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
|
||||
break;
|
||||
default:
|
||||
pr_err("QAT: Invalid ioclt\n");
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&adf_ctl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init adf_register_ctl_device_driver(void)
|
||||
{
|
||||
mutex_init(&adf_ctl_lock);
|
||||
|
||||
if (qat_algs_init())
|
||||
goto err_algs_init;
|
||||
|
||||
if (adf_chr_drv_create())
|
||||
goto err_chr_dev;
|
||||
|
||||
if (adf_init_aer())
|
||||
goto err_aer;
|
||||
|
||||
if (qat_crypto_register())
|
||||
goto err_crypto_register;
|
||||
|
||||
return 0;
|
||||
|
||||
err_crypto_register:
|
||||
adf_exit_aer();
|
||||
err_aer:
|
||||
adf_chr_drv_destroy();
|
||||
err_chr_dev:
|
||||
qat_algs_exit();
|
||||
err_algs_init:
|
||||
mutex_destroy(&adf_ctl_lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static void __exit adf_unregister_ctl_device_driver(void)
|
||||
{
|
||||
adf_chr_drv_destroy();
|
||||
adf_exit_aer();
|
||||
qat_crypto_unregister();
|
||||
qat_algs_exit();
|
||||
mutex_destroy(&adf_ctl_lock);
|
||||
}
|
||||
|
||||
module_init(adf_register_ctl_device_driver);
|
||||
module_exit(adf_unregister_ctl_device_driver);
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Intel");
|
||||
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
|
||||
MODULE_ALIAS("intel_qat");
|
215
drivers/crypto/qat/qat_common/adf_dev_mgr.c
Normal file
215
drivers/crypto/qat/qat_common/adf_dev_mgr.c
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include "adf_cfg.h"
|
||||
#include "adf_common_drv.h"
|
||||
|
||||
static LIST_HEAD(accel_table);
|
||||
static DEFINE_MUTEX(table_lock);
|
||||
static uint32_t num_devices;
|
||||
|
||||
/**
|
||||
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function adds acceleration device to the acceleration framework.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct list_head *itr;
|
||||
|
||||
if (num_devices == ADF_MAX_DEVICES) {
|
||||
pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mutex_lock(&table_lock);
|
||||
list_for_each(itr, &accel_table) {
|
||||
struct adf_accel_dev *ptr =
|
||||
list_entry(itr, struct adf_accel_dev, list);
|
||||
|
||||
if (ptr == accel_dev) {
|
||||
mutex_unlock(&table_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
atomic_set(&accel_dev->ref_count, 0);
|
||||
list_add_tail(&accel_dev->list, &accel_table);
|
||||
accel_dev->accel_id = num_devices++;
|
||||
mutex_unlock(&table_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
|
||||
|
||||
struct list_head *adf_devmgr_get_head(void)
|
||||
{
|
||||
return &accel_table;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function removes acceleration device from the acceleration framework.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
mutex_lock(&table_lock);
|
||||
list_del(&accel_dev->list);
|
||||
num_devices--;
|
||||
mutex_unlock(&table_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
|
||||
|
||||
struct adf_accel_dev *adf_devmgr_get_first(void)
|
||||
{
|
||||
struct adf_accel_dev *dev = NULL;
|
||||
|
||||
if (!list_empty(&accel_table))
|
||||
dev = list_first_entry(&accel_table, struct adf_accel_dev,
|
||||
list);
|
||||
return dev;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
|
||||
* @accel_dev: Pointer to pci device.
|
||||
*
|
||||
* Function returns acceleration device associated with the given pci device.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: pinter to accel_dev or NULL if not found.
|
||||
*/
|
||||
struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
|
||||
{
|
||||
struct list_head *itr;
|
||||
|
||||
list_for_each(itr, &accel_table) {
|
||||
struct adf_accel_dev *ptr =
|
||||
list_entry(itr, struct adf_accel_dev, list);
|
||||
|
||||
if (ptr->accel_pci_dev.pci_dev == pci_dev) {
|
||||
mutex_unlock(&table_lock);
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
|
||||
|
||||
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
|
||||
{
|
||||
struct list_head *itr;
|
||||
|
||||
list_for_each(itr, &accel_table) {
|
||||
struct adf_accel_dev *ptr =
|
||||
list_entry(itr, struct adf_accel_dev, list);
|
||||
|
||||
if (ptr->accel_id == id) {
|
||||
mutex_unlock(&table_lock);
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int adf_devmgr_verify_id(uint32_t id)
|
||||
{
|
||||
if (id == ADF_CFG_ALL_DEVICES)
|
||||
return 0;
|
||||
|
||||
if (adf_devmgr_get_dev_by_id(id))
|
||||
return 0;
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
void adf_devmgr_get_num_dev(uint32_t *num)
|
||||
{
|
||||
struct list_head *itr;
|
||||
|
||||
*num = 0;
|
||||
list_for_each(itr, &accel_table) {
|
||||
(*num)++;
|
||||
}
|
||||
}
|
||||
|
||||
int adf_dev_in_use(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return atomic_read(&accel_dev->ref_count) != 0;
|
||||
}
|
||||
|
||||
int adf_dev_get(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
|
||||
if (!try_module_get(accel_dev->owner))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void adf_dev_put(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
|
||||
module_put(accel_dev->owner);
|
||||
}
|
||||
|
||||
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
||||
}
|
||||
|
||||
int adf_dev_started(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
||||
}
|
388
drivers/crypto/qat/qat_common/adf_init.c
Normal file
388
drivers/crypto/qat/qat_common/adf_init.c
Normal file
@ -0,0 +1,388 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/delay.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_cfg.h"
|
||||
#include "adf_common_drv.h"
|
||||
|
||||
static LIST_HEAD(service_table);
|
||||
static DEFINE_MUTEX(service_lock);
|
||||
|
||||
static void adf_service_add(struct service_hndl *service)
|
||||
{
|
||||
mutex_lock(&service_lock);
|
||||
list_add(&service->list, &service_table);
|
||||
mutex_unlock(&service_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_service_register() - Register acceleration service in the accel framework
|
||||
* @service: Pointer to the service
|
||||
*
|
||||
* Function adds the acceleration service to the acceleration framework.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_service_register(struct service_hndl *service)
|
||||
{
|
||||
service->init_status = 0;
|
||||
service->start_status = 0;
|
||||
adf_service_add(service);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_service_register);
|
||||
|
||||
static void adf_service_remove(struct service_hndl *service)
|
||||
{
|
||||
mutex_lock(&service_lock);
|
||||
list_del(&service->list);
|
||||
mutex_unlock(&service_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_service_unregister() - Unregister acceleration service from the framework
|
||||
* @service: Pointer to the service
|
||||
*
|
||||
* Function remove the acceleration service from the acceleration framework.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_service_unregister(struct service_hndl *service)
|
||||
{
|
||||
if (service->init_status || service->start_status) {
|
||||
pr_err("QAT: Could not remove active service\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
adf_service_remove(service);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_service_unregister);
|
||||
|
||||
/**
|
||||
* adf_dev_start() - Start acceleration service for the given accel device
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function notifies all the registered services that the acceleration device
|
||||
* is ready to be used.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_dev_start(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct service_hndl *service;
|
||||
struct list_head *list_itr;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
|
||||
if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
|
||||
pr_info("QAT: Device not configured\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||
|
||||
if (adf_ae_init(accel_dev)) {
|
||||
pr_err("QAT: Failed to initialise Acceleration Engine\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
|
||||
|
||||
if (adf_ae_fw_load(accel_dev)) {
|
||||
pr_err("QAT: Failed to load acceleration FW\n");
|
||||
adf_ae_fw_release(accel_dev);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
|
||||
|
||||
if (hw_data->alloc_irq(accel_dev)) {
|
||||
pr_err("QAT: Failed to allocate interrupts\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
|
||||
|
||||
/*
|
||||
* Subservice initialisation is divided into two stages: init and start.
|
||||
* This is to facilitate any ordering dependencies between services
|
||||
* prior to starting any of the accelerators.
|
||||
*/
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
|
||||
pr_err("QAT: Failed to initialise service %s\n",
|
||||
service->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(accel_dev->accel_id, &service->init_status);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
|
||||
pr_err("QAT: Failed to initialise service %s\n",
|
||||
service->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(accel_dev->accel_id, &service->init_status);
|
||||
}
|
||||
|
||||
hw_data->enable_error_correction(accel_dev);
|
||||
|
||||
if (adf_ae_start(accel_dev)) {
|
||||
pr_err("QAT: AE Start Failed\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
|
||||
pr_err("QAT: Failed to start service %s\n",
|
||||
service->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(accel_dev->accel_id, &service->start_status);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
|
||||
pr_err("QAT: Failed to start service %s\n",
|
||||
service->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_bit(accel_dev->accel_id, &service->start_status);
|
||||
}
|
||||
|
||||
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
||||
|
||||
if (qat_algs_register()) {
|
||||
pr_err("QAT: Failed to register crypto algs\n");
|
||||
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_dev_start);
|
||||
|
||||
/**
|
||||
* adf_dev_stop() - Stop acceleration service for the given accel device
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function notifies all the registered services that the acceleration device
|
||||
* is shuting down.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_dev_stop(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct service_hndl *service;
|
||||
struct list_head *list_itr;
|
||||
int ret, wait = 0;
|
||||
|
||||
if (!adf_dev_started(accel_dev) &&
|
||||
!test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
|
||||
return 0;
|
||||
}
|
||||
clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
||||
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
|
||||
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
|
||||
|
||||
if (qat_algs_unregister())
|
||||
pr_err("QAT: Failed to unregister crypto algs\n");
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (!test_bit(accel_dev->accel_id, &service->start_status))
|
||||
continue;
|
||||
ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
|
||||
if (!ret) {
|
||||
clear_bit(accel_dev->accel_id, &service->start_status);
|
||||
} else if (ret == -EAGAIN) {
|
||||
wait = 1;
|
||||
clear_bit(accel_dev->accel_id, &service->start_status);
|
||||
}
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (!test_bit(accel_dev->accel_id, &service->start_status))
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_STOP))
|
||||
pr_err("QAT: Failed to shutdown service %s\n",
|
||||
service->name);
|
||||
else
|
||||
clear_bit(accel_dev->accel_id, &service->start_status);
|
||||
}
|
||||
|
||||
if (wait)
|
||||
msleep(100);
|
||||
|
||||
if (adf_dev_started(accel_dev)) {
|
||||
if (adf_ae_stop(accel_dev))
|
||||
pr_err("QAT: failed to stop AE\n");
|
||||
else
|
||||
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
|
||||
}
|
||||
|
||||
if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
|
||||
if (adf_ae_fw_release(accel_dev))
|
||||
pr_err("QAT: Failed to release the ucode\n");
|
||||
else
|
||||
clear_bit(ADF_STATUS_AE_UCODE_LOADED,
|
||||
&accel_dev->status);
|
||||
}
|
||||
|
||||
if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
|
||||
if (adf_ae_shutdown(accel_dev))
|
||||
pr_err("QAT: Failed to shutdown Accel Engine\n");
|
||||
else
|
||||
clear_bit(ADF_STATUS_AE_INITIALISED,
|
||||
&accel_dev->status);
|
||||
}
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (!test_bit(accel_dev->accel_id, &service->init_status))
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
|
||||
pr_err("QAT: Failed to shutdown service %s\n",
|
||||
service->name);
|
||||
else
|
||||
clear_bit(accel_dev->accel_id, &service->init_status);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (!test_bit(accel_dev->accel_id, &service->init_status))
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
|
||||
pr_err("QAT: Failed to shutdown service %s\n",
|
||||
service->name);
|
||||
else
|
||||
clear_bit(accel_dev->accel_id, &service->init_status);
|
||||
}
|
||||
|
||||
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
|
||||
hw_data->free_irq(accel_dev);
|
||||
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
|
||||
}
|
||||
|
||||
/* Delete configuration only if not restarting */
|
||||
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
|
||||
adf_cfg_del_all(accel_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_dev_stop);
|
||||
|
||||
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct service_hndl *service;
|
||||
struct list_head *list_itr;
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
|
||||
pr_err("QAT: Failed to restart service %s.\n",
|
||||
service->name);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
|
||||
pr_err("QAT: Failed to restart service %s.\n",
|
||||
service->name);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct service_hndl *service;
|
||||
struct list_head *list_itr;
|
||||
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
|
||||
pr_err("QAT: Failed to restart service %s.\n",
|
||||
service->name);
|
||||
}
|
||||
list_for_each(list_itr, &service_table) {
|
||||
service = list_entry(list_itr, struct service_hndl, list);
|
||||
if (!service->admin)
|
||||
continue;
|
||||
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
|
||||
pr_err("QAT: Failed to restart service %s.\n",
|
||||
service->name);
|
||||
}
|
||||
return 0;
|
||||
}
|
567
drivers/crypto/qat/qat_common/adf_transport.c
Normal file
567
drivers/crypto/qat/qat_common/adf_transport.c
Normal file
@ -0,0 +1,567 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/delay.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_transport_internal.h"
|
||||
#include "adf_transport_access_macros.h"
|
||||
#include "adf_cfg.h"
|
||||
#include "adf_common_drv.h"
|
||||
|
||||
static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
|
||||
{
|
||||
uint32_t div = data >> shift;
|
||||
uint32_t mult = div << shift;
|
||||
|
||||
return data - mult;
|
||||
}
|
||||
|
||||
static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
|
||||
{
|
||||
if (((size - 1) & addr) != 0)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
|
||||
{
|
||||
int i = ADF_MIN_RING_SIZE;
|
||||
|
||||
for (; i <= ADF_MAX_RING_SIZE; i++)
|
||||
if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
|
||||
return i;
|
||||
|
||||
return ADF_DEFAULT_RING_SIZE;
|
||||
}
|
||||
|
||||
static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
|
||||
{
|
||||
spin_lock(&bank->lock);
|
||||
if (bank->ring_mask & (1 << ring)) {
|
||||
spin_unlock(&bank->lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
bank->ring_mask |= (1 << ring);
|
||||
spin_unlock(&bank->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
|
||||
{
|
||||
spin_lock(&bank->lock);
|
||||
bank->ring_mask &= ~(1 << ring);
|
||||
spin_unlock(&bank->lock);
|
||||
}
|
||||
|
||||
static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
|
||||
{
|
||||
spin_lock_bh(&bank->lock);
|
||||
bank->irq_mask |= (1 << ring);
|
||||
spin_unlock_bh(&bank->lock);
|
||||
WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
|
||||
WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
|
||||
bank->irq_coalesc_timer);
|
||||
}
|
||||
|
||||
static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
|
||||
{
|
||||
spin_lock_bh(&bank->lock);
|
||||
bank->irq_mask &= ~(1 << ring);
|
||||
spin_unlock_bh(&bank->lock);
|
||||
WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
|
||||
}
|
||||
|
||||
int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
|
||||
{
|
||||
if (atomic_add_return(1, ring->inflights) >
|
||||
ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
|
||||
atomic_dec(ring->inflights);
|
||||
return -EAGAIN;
|
||||
}
|
||||
spin_lock_bh(&ring->lock);
|
||||
memcpy(ring->base_addr + ring->tail, msg,
|
||||
ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
|
||||
|
||||
ring->tail = adf_modulo(ring->tail +
|
||||
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
|
||||
ADF_RING_SIZE_MODULO(ring->ring_size));
|
||||
WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
|
||||
ring->ring_number, ring->tail);
|
||||
spin_unlock_bh(&ring->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adf_handle_response(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
uint32_t msg_counter = 0;
|
||||
uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
|
||||
|
||||
while (*msg != ADF_RING_EMPTY_SIG) {
|
||||
ring->callback((uint32_t *)msg);
|
||||
*msg = ADF_RING_EMPTY_SIG;
|
||||
ring->head = adf_modulo(ring->head +
|
||||
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
|
||||
ADF_RING_SIZE_MODULO(ring->ring_size));
|
||||
msg_counter++;
|
||||
msg = (uint32_t *)(ring->base_addr + ring->head);
|
||||
}
|
||||
if (msg_counter > 0) {
|
||||
WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
|
||||
ring->bank->bank_number,
|
||||
ring->ring_number, ring->head);
|
||||
atomic_sub(msg_counter, ring->inflights);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
|
||||
|
||||
WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
|
||||
ring->ring_number, ring_config);
|
||||
}
|
||||
|
||||
static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
uint32_t ring_config =
|
||||
BUILD_RESP_RING_CONFIG(ring->ring_size,
|
||||
ADF_RING_NEAR_WATERMARK_512,
|
||||
ADF_RING_NEAR_WATERMARK_0);
|
||||
|
||||
WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
|
||||
ring->ring_number, ring_config);
|
||||
}
|
||||
|
||||
static int adf_init_ring(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
struct adf_etr_bank_data *bank = ring->bank;
|
||||
struct adf_accel_dev *accel_dev = bank->accel_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
uint64_t ring_base;
|
||||
uint32_t ring_size_bytes =
|
||||
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
|
||||
|
||||
ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
|
||||
ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
|
||||
ring_size_bytes, &ring->dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!ring->base_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(ring->base_addr, 0x7F, ring_size_bytes);
|
||||
/* The base_addr has to be aligned to the size of the buffer */
|
||||
if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
|
||||
pr_err("QAT: Ring address not aligned\n");
|
||||
dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
|
||||
ring->base_addr, ring->dma_addr);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (hw_data->tx_rings_mask & (1 << ring->ring_number))
|
||||
adf_configure_tx_ring(ring);
|
||||
|
||||
else
|
||||
adf_configure_rx_ring(ring);
|
||||
|
||||
ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
|
||||
WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
|
||||
ring->ring_number, ring_base);
|
||||
spin_lock_init(&ring->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
uint32_t ring_size_bytes =
|
||||
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
|
||||
ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
|
||||
|
||||
if (ring->base_addr) {
|
||||
memset(ring->base_addr, 0x7F, ring_size_bytes);
|
||||
dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
|
||||
ring_size_bytes, ring->base_addr,
|
||||
ring->dma_addr);
|
||||
}
|
||||
}
|
||||
|
||||
int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
|
||||
uint32_t bank_num, uint32_t num_msgs,
|
||||
uint32_t msg_size, const char *ring_name,
|
||||
adf_callback_fn callback, int poll_mode,
|
||||
struct adf_etr_ring_data **ring_ptr)
|
||||
{
|
||||
struct adf_etr_data *transport_data = accel_dev->transport;
|
||||
struct adf_etr_bank_data *bank;
|
||||
struct adf_etr_ring_data *ring;
|
||||
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
|
||||
uint32_t ring_num;
|
||||
int ret;
|
||||
|
||||
if (bank_num >= GET_MAX_BANKS(accel_dev)) {
|
||||
pr_err("QAT: Invalid bank number\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
|
||||
pr_err("QAT: Invalid msg size\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
|
||||
ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
|
||||
pr_err("QAT: Invalid ring size for given msg size\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
|
||||
pr_err("QAT: Section %s, no such entry : %s\n",
|
||||
section, ring_name);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (kstrtouint(val, 10, &ring_num)) {
|
||||
pr_err("QAT: Can't get ring number\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
bank = &transport_data->banks[bank_num];
|
||||
if (adf_reserve_ring(bank, ring_num)) {
|
||||
pr_err("QAT: Ring %d, %s already exists.\n",
|
||||
ring_num, ring_name);
|
||||
return -EFAULT;
|
||||
}
|
||||
ring = &bank->rings[ring_num];
|
||||
ring->ring_number = ring_num;
|
||||
ring->bank = bank;
|
||||
ring->callback = callback;
|
||||
ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
|
||||
ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
|
||||
ring->head = 0;
|
||||
ring->tail = 0;
|
||||
atomic_set(ring->inflights, 0);
|
||||
ret = adf_init_ring(ring);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* Enable HW arbitration for the given ring */
|
||||
accel_dev->hw_device->hw_arb_ring_enable(ring);
|
||||
|
||||
if (adf_ring_debugfs_add(ring, ring_name)) {
|
||||
pr_err("QAT: Couldn't add ring debugfs entry\n");
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Enable interrupts if needed */
|
||||
if (callback && (!poll_mode))
|
||||
adf_enable_ring_irq(bank, ring->ring_number);
|
||||
*ring_ptr = ring;
|
||||
return 0;
|
||||
err:
|
||||
adf_cleanup_ring(ring);
|
||||
adf_unreserve_ring(bank, ring_num);
|
||||
accel_dev->hw_device->hw_arb_ring_disable(ring);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void adf_remove_ring(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
struct adf_etr_bank_data *bank = ring->bank;
|
||||
struct adf_accel_dev *accel_dev = bank->accel_dev;
|
||||
|
||||
/* Disable interrupts for the given ring */
|
||||
adf_disable_ring_irq(bank, ring->ring_number);
|
||||
|
||||
/* Clear PCI config space */
|
||||
WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
|
||||
ring->ring_number, 0);
|
||||
WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
|
||||
ring->ring_number, 0);
|
||||
adf_ring_debugfs_rm(ring);
|
||||
adf_unreserve_ring(bank, ring->ring_number);
|
||||
/* Disable HW arbitration for the given ring */
|
||||
accel_dev->hw_device->hw_arb_ring_disable(ring);
|
||||
adf_cleanup_ring(ring);
|
||||
}
|
||||
|
||||
static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
|
||||
{
|
||||
uint32_t empty_rings, i;
|
||||
|
||||
empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
|
||||
empty_rings = ~empty_rings & bank->irq_mask;
|
||||
|
||||
for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
|
||||
if (empty_rings & (1 << i))
|
||||
adf_handle_response(&bank->rings[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_response_handler() - Bottom half handler response handler
|
||||
* @bank_addr: Address of a ring bank for with the BH was scheduled.
|
||||
*
|
||||
* Function is the bottom half handler for the response from acceleration
|
||||
* device. There is one handler for every ring bank. Function checks all
|
||||
* communication rings in the bank.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void adf_response_handler(unsigned long bank_addr)
|
||||
{
|
||||
struct adf_etr_bank_data *bank = (void *)bank_addr;
|
||||
|
||||
/* Handle all the responses nad reenable IRQs */
|
||||
adf_ring_response_handler(bank);
|
||||
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
|
||||
bank->irq_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_response_handler);
|
||||
|
||||
static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
|
||||
const char *section, const char *format,
|
||||
uint32_t key, uint32_t *value)
|
||||
{
|
||||
char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
|
||||
char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
|
||||
|
||||
snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
|
||||
|
||||
if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
|
||||
return -EFAULT;
|
||||
|
||||
if (kstrtouint(val_buf, 10, value))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
|
||||
const char *section, uint32_t bank_num_in_accel)
|
||||
{
|
||||
if (adf_get_cfg_int(bank->accel_dev, section,
|
||||
ADF_ETRMGR_COALESCE_TIMER_FORMAT,
|
||||
bank_num_in_accel, &bank->irq_coalesc_timer))
|
||||
bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
|
||||
|
||||
if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
|
||||
ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
|
||||
bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
|
||||
}
|
||||
|
||||
static int adf_init_bank(struct adf_accel_dev *accel_dev,
|
||||
struct adf_etr_bank_data *bank,
|
||||
uint32_t bank_num, void __iomem *csr_addr)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_etr_ring_data *ring;
|
||||
struct adf_etr_ring_data *tx_ring;
|
||||
uint32_t i, coalesc_enabled;
|
||||
|
||||
memset(bank, 0, sizeof(*bank));
|
||||
bank->bank_number = bank_num;
|
||||
bank->csr_addr = csr_addr;
|
||||
bank->accel_dev = accel_dev;
|
||||
spin_lock_init(&bank->lock);
|
||||
|
||||
/* Enable IRQ coalescing always. This will allow to use
|
||||
* the optimised flag and coalesc register.
|
||||
* If it is disabled in the config file just use min time value */
|
||||
if (adf_get_cfg_int(accel_dev, "Accelerator0",
|
||||
ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
|
||||
bank_num, &coalesc_enabled) && coalesc_enabled)
|
||||
adf_enable_coalesc(bank, "Accelerator0", bank_num);
|
||||
else
|
||||
bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
|
||||
|
||||
for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
|
||||
WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
|
||||
WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
|
||||
ring = &bank->rings[i];
|
||||
if (hw_data->tx_rings_mask & (1 << i)) {
|
||||
ring->inflights = kzalloc_node(sizeof(atomic_t),
|
||||
GFP_KERNEL,
|
||||
accel_dev->numa_node);
|
||||
if (!ring->inflights)
|
||||
goto err;
|
||||
} else {
|
||||
if (i < hw_data->tx_rx_gap) {
|
||||
pr_err("QAT: Invalid tx rings mask config\n");
|
||||
goto err;
|
||||
}
|
||||
tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
|
||||
ring->inflights = tx_ring->inflights;
|
||||
}
|
||||
}
|
||||
if (adf_bank_debugfs_add(bank)) {
|
||||
pr_err("QAT: Failed to add bank debugfs entry\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
|
||||
return 0;
|
||||
err:
|
||||
for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
|
||||
ring = &bank->rings[i];
|
||||
if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
|
||||
kfree(ring->inflights);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_init_etr_data() - Initialize transport rings for acceleration device
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function is the initializes the communications channels (rings) to the
|
||||
* acceleration device accel_dev.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: 0 on success, error code othewise.
|
||||
*/
|
||||
int adf_init_etr_data(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_etr_data *etr_data;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
void __iomem *csr_addr;
|
||||
uint32_t size;
|
||||
uint32_t num_banks = 0;
|
||||
int i, ret;
|
||||
|
||||
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
|
||||
accel_dev->numa_node);
|
||||
if (!etr_data)
|
||||
return -ENOMEM;
|
||||
|
||||
num_banks = GET_MAX_BANKS(accel_dev);
|
||||
size = num_banks * sizeof(struct adf_etr_bank_data);
|
||||
etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
|
||||
if (!etr_data->banks) {
|
||||
ret = -ENOMEM;
|
||||
goto err_bank;
|
||||
}
|
||||
|
||||
accel_dev->transport = etr_data;
|
||||
i = hw_data->get_etr_bar_id(hw_data);
|
||||
csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
|
||||
|
||||
/* accel_dev->debugfs_dir should always be non-NULL here */
|
||||
etr_data->debug = debugfs_create_dir("transport",
|
||||
accel_dev->debugfs_dir);
|
||||
if (!etr_data->debug) {
|
||||
pr_err("QAT: Unable to create transport debugfs entry\n");
|
||||
ret = -ENOENT;
|
||||
goto err_bank_debug;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_banks; i++) {
|
||||
ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
|
||||
csr_addr);
|
||||
if (ret)
|
||||
goto err_bank_all;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_bank_all:
|
||||
debugfs_remove(etr_data->debug);
|
||||
err_bank_debug:
|
||||
kfree(etr_data->banks);
|
||||
err_bank:
|
||||
kfree(etr_data);
|
||||
accel_dev->transport = NULL;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_init_etr_data);
|
||||
|
||||
static void cleanup_bank(struct adf_etr_bank_data *bank)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
|
||||
struct adf_accel_dev *accel_dev = bank->accel_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct adf_etr_ring_data *ring = &bank->rings[i];
|
||||
|
||||
if (bank->ring_mask & (1 << i))
|
||||
adf_cleanup_ring(ring);
|
||||
|
||||
if (hw_data->tx_rings_mask & (1 << i))
|
||||
kfree(ring->inflights);
|
||||
}
|
||||
adf_bank_debugfs_rm(bank);
|
||||
memset(bank, 0, sizeof(*bank));
|
||||
}
|
||||
|
||||
static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_etr_data *etr_data = accel_dev->transport;
|
||||
uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
|
||||
|
||||
for (i = 0; i < num_banks; i++)
|
||||
cleanup_bank(&etr_data->banks[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* adf_cleanup_etr_data() - Clear transport rings for acceleration device
|
||||
* @accel_dev: Pointer to acceleration device.
|
||||
*
|
||||
* Function is the clears the communications channels (rings) of the
|
||||
* acceleration device accel_dev.
|
||||
* To be used by QAT device specific drivers.
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_etr_data *etr_data = accel_dev->transport;
|
||||
|
||||
if (etr_data) {
|
||||
adf_cleanup_etr_handles(accel_dev);
|
||||
debugfs_remove(etr_data->debug);
|
||||
kfree(etr_data->banks);
|
||||
kfree(etr_data);
|
||||
accel_dev->transport = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
|
63
drivers/crypto/qat/qat_common/adf_transport.h
Normal file
63
drivers/crypto/qat/qat_common/adf_transport.h
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_TRANSPORT_H
|
||||
#define ADF_TRANSPORT_H
|
||||
|
||||
#include "adf_accel_devices.h"
|
||||
|
||||
struct adf_etr_ring_data;
|
||||
|
||||
typedef void (*adf_callback_fn)(void *resp_msg);
|
||||
|
||||
int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
|
||||
uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
|
||||
const char *ring_name, adf_callback_fn callback,
|
||||
int poll_mode, struct adf_etr_ring_data **ring_ptr);
|
||||
|
||||
int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
|
||||
void adf_remove_ring(struct adf_etr_ring_data *ring);
|
||||
#endif
|
160
drivers/crypto/qat/qat_common/adf_transport_access_macros.h
Normal file
160
drivers/crypto/qat/qat_common/adf_transport_access_macros.h
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
|
||||
#define ADF_TRANSPORT_ACCESS_MACROS_H
|
||||
|
||||
#include "adf_accel_devices.h"
|
||||
#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
|
||||
#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
|
||||
#define ADF_RING_CSR_RING_CONFIG 0x000
|
||||
#define ADF_RING_CSR_RING_LBASE 0x040
|
||||
#define ADF_RING_CSR_RING_UBASE 0x080
|
||||
#define ADF_RING_CSR_RING_HEAD 0x0C0
|
||||
#define ADF_RING_CSR_RING_TAIL 0x100
|
||||
#define ADF_RING_CSR_E_STAT 0x14C
|
||||
#define ADF_RING_CSR_INT_SRCSEL 0x174
|
||||
#define ADF_RING_CSR_INT_SRCSEL_2 0x178
|
||||
#define ADF_RING_CSR_INT_COL_EN 0x17C
|
||||
#define ADF_RING_CSR_INT_COL_CTL 0x180
|
||||
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
|
||||
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
|
||||
#define ADF_RING_BUNDLE_SIZE 0x1000
|
||||
#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
|
||||
#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
|
||||
#define ADF_COALESCING_MIN_TIME 0x1FF
|
||||
#define ADF_COALESCING_MAX_TIME 0xFFFFF
|
||||
#define ADF_COALESCING_DEF_TIME 0x27FF
|
||||
#define ADF_RING_NEAR_WATERMARK_512 0x08
|
||||
#define ADF_RING_NEAR_WATERMARK_0 0x00
|
||||
#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
|
||||
|
||||
/* Valid internal ring size values */
|
||||
#define ADF_RING_SIZE_128 0x01
|
||||
#define ADF_RING_SIZE_256 0x02
|
||||
#define ADF_RING_SIZE_512 0x03
|
||||
#define ADF_RING_SIZE_4K 0x06
|
||||
#define ADF_RING_SIZE_16K 0x08
|
||||
#define ADF_RING_SIZE_4M 0x10
|
||||
#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
|
||||
#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
|
||||
#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
|
||||
|
||||
/* Valid internal msg size values internal */
|
||||
#define ADF_MSG_SIZE_32 0x01
|
||||
#define ADF_MSG_SIZE_64 0x02
|
||||
#define ADF_MSG_SIZE_128 0x04
|
||||
#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
|
||||
#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
|
||||
|
||||
/* Size to bytes conversion macros for ring and msg values */
|
||||
#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
|
||||
#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
|
||||
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
|
||||
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
|
||||
|
||||
/* Minimum ring bufer size for memory allocation */
|
||||
#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
|
||||
ADF_RING_SIZE_4K : SIZE)
|
||||
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
|
||||
#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
|
||||
((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
|
||||
#define BUILD_RING_CONFIG(size) \
|
||||
((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
|
||||
| (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
|
||||
| size)
|
||||
#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
|
||||
((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
|
||||
| (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
|
||||
| size)
|
||||
#define BUILD_RING_BASE_ADDR(addr, size) \
|
||||
((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
|
||||
#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
|
||||
ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_RING_HEAD + (ring << 2))
|
||||
#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
|
||||
ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_RING_TAIL + (ring << 2))
|
||||
#define READ_CSR_E_STAT(csr_base_addr, bank) \
|
||||
ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_E_STAT)
|
||||
#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
|
||||
#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
|
||||
do { \
|
||||
uint32_t l_base = 0, u_base = 0; \
|
||||
l_base = (uint32_t)(value & 0xFFFFFFFF); \
|
||||
u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
|
||||
} while (0)
|
||||
#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_RING_HEAD + (ring << 2), value)
|
||||
#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_RING_TAIL + (ring << 2), value)
|
||||
#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
|
||||
do { \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
|
||||
} while (0)
|
||||
#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_INT_COL_EN, value)
|
||||
#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_INT_COL_CTL, \
|
||||
ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
|
||||
#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
|
||||
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
|
||||
ADF_RING_CSR_INT_FLAG_AND_COL, value)
|
||||
#endif
|
304
drivers/crypto/qat/qat_common/adf_transport_debug.c
Normal file
304
drivers/crypto/qat/qat_common/adf_transport_debug.c
Normal file
@ -0,0 +1,304 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_transport_internal.h"
|
||||
#include "adf_transport_access_macros.h"
|
||||
|
||||
static DEFINE_MUTEX(ring_read_lock);
|
||||
static DEFINE_MUTEX(bank_read_lock);
|
||||
|
||||
static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
|
||||
{
|
||||
struct adf_etr_ring_data *ring = sfile->private;
|
||||
|
||||
mutex_lock(&ring_read_lock);
|
||||
if (*pos == 0)
|
||||
return SEQ_START_TOKEN;
|
||||
|
||||
if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
|
||||
ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
|
||||
return NULL;
|
||||
|
||||
return ring->base_addr +
|
||||
(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
|
||||
}
|
||||
|
||||
static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
|
||||
{
|
||||
struct adf_etr_ring_data *ring = sfile->private;
|
||||
|
||||
if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
|
||||
ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
|
||||
return NULL;
|
||||
|
||||
return ring->base_addr +
|
||||
(ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
|
||||
}
|
||||
|
||||
static int adf_ring_show(struct seq_file *sfile, void *v)
|
||||
{
|
||||
struct adf_etr_ring_data *ring = sfile->private;
|
||||
struct adf_etr_bank_data *bank = ring->bank;
|
||||
uint32_t *msg = v;
|
||||
void __iomem *csr = ring->bank->csr_addr;
|
||||
int i, x;
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
int head, tail, empty;
|
||||
|
||||
head = READ_CSR_RING_HEAD(csr, bank->bank_number,
|
||||
ring->ring_number);
|
||||
tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
|
||||
ring->ring_number);
|
||||
empty = READ_CSR_E_STAT(csr, bank->bank_number);
|
||||
|
||||
seq_puts(sfile, "------- Ring configuration -------\n");
|
||||
seq_printf(sfile, "ring num %d, bank num %d\n",
|
||||
ring->ring_number, ring->bank->bank_number);
|
||||
seq_printf(sfile, "head %x, tail %x, empty: %d\n",
|
||||
head, tail, (empty & 1 << ring->ring_number)
|
||||
>> ring->ring_number);
|
||||
seq_printf(sfile, "ring size %d, msg size %d\n",
|
||||
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
|
||||
ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
|
||||
seq_puts(sfile, "----------- Ring data ------------\n");
|
||||
return 0;
|
||||
}
|
||||
seq_printf(sfile, "%p:", msg);
|
||||
x = 0;
|
||||
i = 0;
|
||||
for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) {
|
||||
seq_printf(sfile, " %08X", *(msg + i));
|
||||
if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 &&
|
||||
(++x == 8)) {
|
||||
seq_printf(sfile, "\n%p:", msg + i + 1);
|
||||
x = 0;
|
||||
}
|
||||
}
|
||||
seq_puts(sfile, "\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_ring_stop(struct seq_file *sfile, void *v)
|
||||
{
|
||||
mutex_unlock(&ring_read_lock);
|
||||
}
|
||||
|
||||
static const struct seq_operations adf_ring_sops = {
|
||||
.start = adf_ring_start,
|
||||
.next = adf_ring_next,
|
||||
.stop = adf_ring_stop,
|
||||
.show = adf_ring_show
|
||||
};
|
||||
|
||||
static int adf_ring_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret = seq_open(file, &adf_ring_sops);
|
||||
|
||||
if (!ret) {
|
||||
struct seq_file *seq_f = file->private_data;
|
||||
|
||||
seq_f->private = inode->i_private;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations adf_ring_debug_fops = {
|
||||
.open = adf_ring_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release
|
||||
};
|
||||
|
||||
int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
|
||||
{
|
||||
struct adf_etr_ring_debug_entry *ring_debug;
|
||||
char entry_name[8];
|
||||
|
||||
ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
|
||||
if (!ring_debug)
|
||||
return -ENOMEM;
|
||||
|
||||
strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
|
||||
snprintf(entry_name, sizeof(entry_name), "ring_%02d",
|
||||
ring->ring_number);
|
||||
|
||||
ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
|
||||
ring->bank->bank_debug_dir,
|
||||
ring, &adf_ring_debug_fops);
|
||||
if (!ring_debug->debug) {
|
||||
pr_err("QAT: Failed to create ring debug entry.\n");
|
||||
kfree(ring_debug);
|
||||
return -EFAULT;
|
||||
}
|
||||
ring->ring_debug = ring_debug;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
if (ring->ring_debug) {
|
||||
debugfs_remove(ring->ring_debug->debug);
|
||||
kfree(ring->ring_debug);
|
||||
ring->ring_debug = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
|
||||
{
|
||||
mutex_lock(&bank_read_lock);
|
||||
if (*pos == 0)
|
||||
return SEQ_START_TOKEN;
|
||||
|
||||
if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK)
|
||||
return NULL;
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
|
||||
{
|
||||
if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK)
|
||||
return NULL;
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
static int adf_bank_show(struct seq_file *sfile, void *v)
|
||||
{
|
||||
struct adf_etr_bank_data *bank = sfile->private;
|
||||
|
||||
if (v == SEQ_START_TOKEN) {
|
||||
seq_printf(sfile, "------- Bank %d configuration -------\n",
|
||||
bank->bank_number);
|
||||
} else {
|
||||
int ring_id = *((int *)v) - 1;
|
||||
struct adf_etr_ring_data *ring = &bank->rings[ring_id];
|
||||
void __iomem *csr = bank->csr_addr;
|
||||
int head, tail, empty;
|
||||
|
||||
if (!(bank->ring_mask & 1 << ring_id))
|
||||
return 0;
|
||||
|
||||
head = READ_CSR_RING_HEAD(csr, bank->bank_number,
|
||||
ring->ring_number);
|
||||
tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
|
||||
ring->ring_number);
|
||||
empty = READ_CSR_E_STAT(csr, bank->bank_number);
|
||||
|
||||
seq_printf(sfile,
|
||||
"ring num %02d, head %04x, tail %04x, empty: %d\n",
|
||||
ring->ring_number, head, tail,
|
||||
(empty & 1 << ring->ring_number) >>
|
||||
ring->ring_number);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_bank_stop(struct seq_file *sfile, void *v)
|
||||
{
|
||||
mutex_unlock(&bank_read_lock);
|
||||
}
|
||||
|
||||
static const struct seq_operations adf_bank_sops = {
|
||||
.start = adf_bank_start,
|
||||
.next = adf_bank_next,
|
||||
.stop = adf_bank_stop,
|
||||
.show = adf_bank_show
|
||||
};
|
||||
|
||||
static int adf_bank_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
int ret = seq_open(file, &adf_bank_sops);
|
||||
|
||||
if (!ret) {
|
||||
struct seq_file *seq_f = file->private_data;
|
||||
|
||||
seq_f->private = inode->i_private;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct file_operations adf_bank_debug_fops = {
|
||||
.open = adf_bank_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release
|
||||
};
|
||||
|
||||
int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev = bank->accel_dev;
|
||||
struct dentry *parent = accel_dev->transport->debug;
|
||||
char name[8];
|
||||
|
||||
snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
|
||||
bank->bank_debug_dir = debugfs_create_dir(name, parent);
|
||||
if (!bank->bank_debug_dir) {
|
||||
pr_err("QAT: Failed to create bank debug dir.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
|
||||
bank->bank_debug_dir, bank,
|
||||
&adf_bank_debug_fops);
|
||||
if (!bank->bank_debug_cfg) {
|
||||
pr_err("QAT: Failed to create bank debug entry.\n");
|
||||
debugfs_remove(bank->bank_debug_dir);
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
|
||||
{
|
||||
debugfs_remove(bank->bank_debug_cfg);
|
||||
debugfs_remove(bank->bank_debug_dir);
|
||||
}
|
118
drivers/crypto/qat/qat_common/adf_transport_internal.h
Normal file
118
drivers/crypto/qat/qat_common/adf_transport_internal.h
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_TRANSPORT_INTRN_H
|
||||
#define ADF_TRANSPORT_INTRN_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/spinlock_types.h>
|
||||
#include "adf_transport.h"
|
||||
|
||||
struct adf_etr_ring_debug_entry {
|
||||
char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
|
||||
struct dentry *debug;
|
||||
};
|
||||
|
||||
struct adf_etr_ring_data {
|
||||
void *base_addr;
|
||||
atomic_t *inflights;
|
||||
spinlock_t lock; /* protects ring data struct */
|
||||
adf_callback_fn callback;
|
||||
struct adf_etr_bank_data *bank;
|
||||
dma_addr_t dma_addr;
|
||||
uint16_t head;
|
||||
uint16_t tail;
|
||||
uint8_t ring_number;
|
||||
uint8_t ring_size;
|
||||
uint8_t msg_size;
|
||||
uint8_t reserved;
|
||||
struct adf_etr_ring_debug_entry *ring_debug;
|
||||
} __packed;
|
||||
|
||||
struct adf_etr_bank_data {
|
||||
struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
|
||||
struct tasklet_struct resp_hanlder;
|
||||
void __iomem *csr_addr;
|
||||
struct adf_accel_dev *accel_dev;
|
||||
uint32_t irq_coalesc_timer;
|
||||
uint16_t ring_mask;
|
||||
uint16_t irq_mask;
|
||||
spinlock_t lock; /* protects bank data struct */
|
||||
struct dentry *bank_debug_dir;
|
||||
struct dentry *bank_debug_cfg;
|
||||
uint32_t bank_number;
|
||||
} __packed;
|
||||
|
||||
struct adf_etr_data {
|
||||
struct adf_etr_bank_data *banks;
|
||||
struct dentry *debug;
|
||||
};
|
||||
|
||||
void adf_response_handler(unsigned long bank_addr);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
|
||||
void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
|
||||
int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
|
||||
void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
|
||||
#else
|
||||
static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define adf_bank_debugfs_rm(bank) do {} while (0)
|
||||
|
||||
static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
|
||||
const char *name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define adf_ring_debugfs_rm(ring) do {} while (0)
|
||||
#endif
|
||||
#endif
|
316
drivers/crypto/qat/qat_common/icp_qat_fw.h
Normal file
316
drivers/crypto/qat/qat_common/icp_qat_fw.h
Normal file
@ -0,0 +1,316 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _ICP_QAT_FW_H_
|
||||
#define _ICP_QAT_FW_H_
|
||||
#include <linux/types.h>
|
||||
#include "icp_qat_hw.h"
|
||||
|
||||
#define QAT_FIELD_SET(flags, val, bitpos, mask) \
|
||||
{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
|
||||
(((val) & (mask)) << (bitpos))) ; }
|
||||
|
||||
#define QAT_FIELD_GET(flags, bitpos, mask) \
|
||||
(((flags) >> (bitpos)) & (mask))
|
||||
|
||||
#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
|
||||
#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
|
||||
#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
|
||||
#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_1 1
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_2 2
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_3 3
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_4 4
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_5 5
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_6 6
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_7 7
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_10 10
|
||||
#define ICP_QAT_FW_NUM_LONGWORDS_13 13
|
||||
#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
|
||||
|
||||
enum icp_qat_fw_comn_resp_serv_id {
|
||||
ICP_QAT_FW_COMN_RESP_SERV_NULL,
|
||||
ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
|
||||
ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
|
||||
};
|
||||
|
||||
enum icp_qat_fw_comn_request_id {
|
||||
ICP_QAT_FW_COMN_REQ_NULL = 0,
|
||||
ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
|
||||
ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
|
||||
ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
|
||||
ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
|
||||
ICP_QAT_FW_COMN_REQ_DELIMITER
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_req_hdr_cd_pars {
|
||||
union {
|
||||
struct {
|
||||
uint64_t content_desc_addr;
|
||||
uint16_t content_desc_resrvd1;
|
||||
uint8_t content_desc_params_sz;
|
||||
uint8_t content_desc_hdr_resrvd2;
|
||||
uint32_t content_desc_resrvd3;
|
||||
} s;
|
||||
struct {
|
||||
uint32_t serv_specif_fields[4];
|
||||
} s1;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_req_mid {
|
||||
uint64_t opaque_data;
|
||||
uint64_t src_data_addr;
|
||||
uint64_t dest_data_addr;
|
||||
uint32_t src_length;
|
||||
uint32_t dst_length;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_req_cd_ctrl {
|
||||
uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_req_hdr {
|
||||
uint8_t resrvd1;
|
||||
uint8_t service_cmd_id;
|
||||
uint8_t service_type;
|
||||
uint8_t hdr_flags;
|
||||
uint16_t serv_specif_flags;
|
||||
uint16_t comn_req_flags;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_req_rqpars {
|
||||
uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_req {
|
||||
struct icp_qat_fw_comn_req_hdr comn_hdr;
|
||||
struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
|
||||
struct icp_qat_fw_comn_req_mid comn_mid;
|
||||
struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
|
||||
struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_error {
|
||||
uint8_t xlat_err_code;
|
||||
uint8_t cmp_err_code;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_resp_hdr {
|
||||
uint8_t resrvd1;
|
||||
uint8_t service_id;
|
||||
uint8_t response_type;
|
||||
uint8_t hdr_flags;
|
||||
struct icp_qat_fw_comn_error comn_error;
|
||||
uint8_t comn_status;
|
||||
uint8_t cmd_id;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_comn_resp {
|
||||
struct icp_qat_fw_comn_resp_hdr comn_hdr;
|
||||
uint64_t opaque_data;
|
||||
uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
|
||||
};
|
||||
|
||||
#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
|
||||
#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
|
||||
#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
|
||||
#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
|
||||
#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
|
||||
|
||||
#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
|
||||
icp_qat_fw_comn_req_hdr_t.service_type
|
||||
|
||||
#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
|
||||
icp_qat_fw_comn_req_hdr_t.service_type = val
|
||||
|
||||
#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
|
||||
icp_qat_fw_comn_req_hdr_t.service_cmd_id
|
||||
|
||||
#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
|
||||
icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
|
||||
|
||||
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
|
||||
ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
|
||||
|
||||
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
|
||||
ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
|
||||
|
||||
#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
|
||||
QAT_FIELD_GET(hdr_flags, \
|
||||
ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
|
||||
ICP_QAT_FW_COMN_VALID_FLAG_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
|
||||
(hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
|
||||
QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
|
||||
ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
|
||||
ICP_QAT_FW_COMN_VALID_FLAG_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
|
||||
(((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
|
||||
ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
|
||||
|
||||
#define QAT_COMN_PTR_TYPE_BITPOS 0
|
||||
#define QAT_COMN_PTR_TYPE_MASK 0x1
|
||||
#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
|
||||
#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
|
||||
#define QAT_COMN_PTR_TYPE_FLAT 0x0
|
||||
#define QAT_COMN_PTR_TYPE_SGL 0x1
|
||||
#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
|
||||
#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
|
||||
|
||||
#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
|
||||
((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
|
||||
| (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
|
||||
|
||||
#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
|
||||
QAT_COMN_CD_FLD_TYPE_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
|
||||
QAT_COMN_PTR_TYPE_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
|
||||
QAT_COMN_CD_FLD_TYPE_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
|
||||
#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
|
||||
#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
|
||||
#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
|
||||
|
||||
#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
|
||||
((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
|
||||
>> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
|
||||
|
||||
#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
|
||||
{ ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
|
||||
& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
|
||||
((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
|
||||
& ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
|
||||
|
||||
#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
|
||||
(((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
|
||||
{ ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
|
||||
& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
|
||||
((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
|
||||
|
||||
#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
|
||||
#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
|
||||
#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
|
||||
#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
|
||||
#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
|
||||
#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
|
||||
#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
|
||||
#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
|
||||
|
||||
#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
|
||||
((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
|
||||
QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
|
||||
(((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
|
||||
QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
|
||||
(((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
|
||||
QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
|
||||
(((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
|
||||
QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
|
||||
|
||||
#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
|
||||
QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
|
||||
QAT_COMN_RESP_CRYPTO_STATUS_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
|
||||
QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
|
||||
QAT_COMN_RESP_CMP_STATUS_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
|
||||
QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
|
||||
QAT_COMN_RESP_XLAT_STATUS_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
|
||||
QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
|
||||
QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
|
||||
|
||||
#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
|
||||
#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
|
||||
#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
|
||||
#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
|
||||
#define ERR_CODE_NO_ERROR 0
|
||||
#define ERR_CODE_INVALID_BLOCK_TYPE -1
|
||||
#define ERR_CODE_NO_MATCH_ONES_COMP -2
|
||||
#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
|
||||
#define ERR_CODE_INCOMPLETE_LEN -4
|
||||
#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
|
||||
#define ERR_CODE_RPT_GT_SPEC_LEN -6
|
||||
#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
|
||||
#define ERR_CODE_INV_DIS_CODE_LEN -8
|
||||
#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
|
||||
#define ERR_CODE_DIS_TOO_FAR_BACK -10
|
||||
#define ERR_CODE_OVERFLOW_ERROR -11
|
||||
#define ERR_CODE_SOFT_ERROR -12
|
||||
#define ERR_CODE_FATAL_ERROR -13
|
||||
#define ERR_CODE_SSM_ERROR -14
|
||||
#define ERR_CODE_ENDPOINT_ERROR -15
|
||||
|
||||
enum icp_qat_fw_slice {
|
||||
ICP_QAT_FW_SLICE_NULL = 0,
|
||||
ICP_QAT_FW_SLICE_CIPHER = 1,
|
||||
ICP_QAT_FW_SLICE_AUTH = 2,
|
||||
ICP_QAT_FW_SLICE_DRAM_RD = 3,
|
||||
ICP_QAT_FW_SLICE_DRAM_WR = 4,
|
||||
ICP_QAT_FW_SLICE_COMP = 5,
|
||||
ICP_QAT_FW_SLICE_XLAT = 6,
|
||||
ICP_QAT_FW_SLICE_DELIMITER
|
||||
};
|
||||
#endif
|
131
drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
Normal file
131
drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
Normal file
@ -0,0 +1,131 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
|
||||
#define _ICP_QAT_FW_INIT_ADMIN_H_
|
||||
|
||||
#include "icp_qat_fw.h"
|
||||
|
||||
enum icp_qat_fw_init_admin_cmd_id {
|
||||
ICP_QAT_FW_INIT_ME = 0,
|
||||
ICP_QAT_FW_TRNG_ENABLE = 1,
|
||||
ICP_QAT_FW_TRNG_DISABLE = 2,
|
||||
ICP_QAT_FW_CONSTANTS_CFG = 3,
|
||||
ICP_QAT_FW_STATUS_GET = 4,
|
||||
ICP_QAT_FW_COUNTERS_GET = 5,
|
||||
ICP_QAT_FW_LOOPBACK = 6,
|
||||
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
|
||||
ICP_QAT_FW_HEARTBEAT_GET = 8
|
||||
};
|
||||
|
||||
enum icp_qat_fw_init_admin_resp_status {
|
||||
ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
|
||||
ICP_QAT_FW_INIT_RESP_STATUS_FAIL
|
||||
};
|
||||
|
||||
struct icp_qat_fw_init_admin_req {
|
||||
uint16_t init_cfg_sz;
|
||||
uint8_t resrvd1;
|
||||
uint8_t init_admin_cmd_id;
|
||||
uint32_t resrvd2;
|
||||
uint64_t opaque_data;
|
||||
uint64_t init_cfg_ptr;
|
||||
uint64_t resrvd3;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_init_admin_resp_hdr {
|
||||
uint8_t flags;
|
||||
uint8_t resrvd1;
|
||||
uint8_t status;
|
||||
uint8_t init_admin_cmd_id;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_init_admin_resp_pars {
|
||||
union {
|
||||
uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
|
||||
struct {
|
||||
uint32_t version_patch_num;
|
||||
uint8_t context_id;
|
||||
uint8_t ae_id;
|
||||
uint16_t resrvd1;
|
||||
uint64_t resrvd2;
|
||||
} s1;
|
||||
struct {
|
||||
uint64_t req_rec_count;
|
||||
uint64_t resp_sent_count;
|
||||
} s2;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_init_admin_resp {
|
||||
struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
|
||||
union {
|
||||
uint32_t resrvd2;
|
||||
struct {
|
||||
uint16_t version_minor_num;
|
||||
uint16_t version_major_num;
|
||||
} s;
|
||||
} u;
|
||||
uint64_t opaque_data;
|
||||
struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
|
||||
};
|
||||
|
||||
#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
|
||||
#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
|
||||
#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
|
||||
#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
|
||||
#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
|
||||
#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
|
||||
ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
|
||||
|
||||
#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
|
||||
ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
|
||||
|
||||
#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
|
||||
QAT_FIELD_GET(flags, \
|
||||
ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
|
||||
ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
|
||||
#endif
|
404
drivers/crypto/qat/qat_common/icp_qat_fw_la.h
Normal file
404
drivers/crypto/qat/qat_common/icp_qat_fw_la.h
Normal file
@ -0,0 +1,404 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _ICP_QAT_FW_LA_H_
|
||||
#define _ICP_QAT_FW_LA_H_
|
||||
#include "icp_qat_fw.h"
|
||||
|
||||
enum icp_qat_fw_la_cmd_id {
|
||||
ICP_QAT_FW_LA_CMD_CIPHER = 0,
|
||||
ICP_QAT_FW_LA_CMD_AUTH = 1,
|
||||
ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
|
||||
ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
|
||||
ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
|
||||
ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
|
||||
ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
|
||||
ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
|
||||
ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
|
||||
ICP_QAT_FW_LA_CMD_MGF1 = 9,
|
||||
ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
|
||||
ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
|
||||
ICP_QAT_FW_LA_CMD_DELIMITER = 12
|
||||
};
|
||||
|
||||
#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
|
||||
#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
|
||||
#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
|
||||
#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
|
||||
|
||||
struct icp_qat_fw_la_bulk_req {
|
||||
struct icp_qat_fw_comn_req_hdr comn_hdr;
|
||||
struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
|
||||
struct icp_qat_fw_comn_req_mid comn_mid;
|
||||
struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
|
||||
struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
|
||||
};
|
||||
|
||||
#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
|
||||
#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
|
||||
#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
|
||||
#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
|
||||
#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
|
||||
#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
|
||||
#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
|
||||
#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
|
||||
#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
|
||||
#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
|
||||
#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
|
||||
#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
|
||||
#define ICP_QAT_FW_LA_GCM_PROTO 2
|
||||
#define ICP_QAT_FW_LA_CCM_PROTO 1
|
||||
#define ICP_QAT_FW_LA_NO_PROTO 0
|
||||
#define QAT_LA_PROTO_BITPOS 7
|
||||
#define QAT_LA_PROTO_MASK 0x7
|
||||
#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
|
||||
#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
|
||||
#define QAT_LA_CMP_AUTH_RES_BITPOS 6
|
||||
#define QAT_LA_CMP_AUTH_RES_MASK 0x1
|
||||
#define ICP_QAT_FW_LA_RET_AUTH_RES 1
|
||||
#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
|
||||
#define QAT_LA_RET_AUTH_RES_BITPOS 5
|
||||
#define QAT_LA_RET_AUTH_RES_MASK 0x1
|
||||
#define ICP_QAT_FW_LA_UPDATE_STATE 1
|
||||
#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
|
||||
#define QAT_LA_UPDATE_STATE_BITPOS 4
|
||||
#define QAT_LA_UPDATE_STATE_MASK 0x1
|
||||
#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
|
||||
#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
|
||||
#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
|
||||
#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
|
||||
#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
|
||||
#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
|
||||
#define QAT_LA_CIPH_IV_FLD_BITPOS 2
|
||||
#define QAT_LA_CIPH_IV_FLD_MASK 0x1
|
||||
#define ICP_QAT_FW_LA_PARTIAL_NONE 0
|
||||
#define ICP_QAT_FW_LA_PARTIAL_START 1
|
||||
#define ICP_QAT_FW_LA_PARTIAL_MID 3
|
||||
#define ICP_QAT_FW_LA_PARTIAL_END 2
|
||||
#define QAT_LA_PARTIAL_BITPOS 0
|
||||
#define QAT_LA_PARTIAL_MASK 0x3
|
||||
#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
|
||||
cmp_auth, ret_auth, update_state, \
|
||||
ciph_iv, ciphcfg, partial) \
|
||||
(((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
|
||||
QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
|
||||
((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
|
||||
QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
|
||||
((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
|
||||
QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
|
||||
((proto & QAT_LA_PROTO_MASK) << \
|
||||
QAT_LA_PROTO_BITPOS) | \
|
||||
((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
|
||||
QAT_LA_CMP_AUTH_RES_BITPOS) | \
|
||||
((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
|
||||
QAT_LA_RET_AUTH_RES_BITPOS) | \
|
||||
((update_state & QAT_LA_UPDATE_STATE_MASK) << \
|
||||
QAT_LA_UPDATE_STATE_BITPOS) | \
|
||||
((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
|
||||
QAT_LA_CIPH_IV_FLD_BITPOS) | \
|
||||
((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
|
||||
QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
|
||||
((partial & QAT_LA_PARTIAL_MASK) << \
|
||||
QAT_LA_PARTIAL_BITPOS))
|
||||
|
||||
#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
|
||||
QAT_LA_CIPH_IV_FLD_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
|
||||
QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
|
||||
QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
|
||||
QAT_LA_GCM_IV_LEN_FLAG_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_PROTO_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
|
||||
QAT_LA_CMP_AUTH_RES_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
|
||||
QAT_LA_RET_AUTH_RES_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
|
||||
QAT_LA_DIGEST_IN_BUFFER_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
|
||||
QAT_LA_UPDATE_STATE_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
|
||||
QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
|
||||
QAT_LA_PARTIAL_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
|
||||
QAT_LA_CIPH_IV_FLD_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
|
||||
QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
|
||||
QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
|
||||
QAT_LA_GCM_IV_LEN_FLAG_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
|
||||
QAT_LA_PROTO_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
|
||||
QAT_LA_CMP_AUTH_RES_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
|
||||
QAT_LA_RET_AUTH_RES_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
|
||||
QAT_LA_DIGEST_IN_BUFFER_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
|
||||
QAT_LA_UPDATE_STATE_MASK)
|
||||
|
||||
#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
|
||||
QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
|
||||
QAT_LA_PARTIAL_MASK)
|
||||
|
||||
struct icp_qat_fw_cipher_req_hdr_cd_pars {
|
||||
union {
|
||||
struct {
|
||||
uint64_t content_desc_addr;
|
||||
uint16_t content_desc_resrvd1;
|
||||
uint8_t content_desc_params_sz;
|
||||
uint8_t content_desc_hdr_resrvd2;
|
||||
uint32_t content_desc_resrvd3;
|
||||
} s;
|
||||
struct {
|
||||
uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
|
||||
} s1;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
|
||||
union {
|
||||
struct {
|
||||
uint64_t content_desc_addr;
|
||||
uint16_t content_desc_resrvd1;
|
||||
uint8_t content_desc_params_sz;
|
||||
uint8_t content_desc_hdr_resrvd2;
|
||||
uint32_t content_desc_resrvd3;
|
||||
} s;
|
||||
struct {
|
||||
uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
|
||||
} sl;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_cipher_cd_ctrl_hdr {
|
||||
uint8_t cipher_state_sz;
|
||||
uint8_t cipher_key_sz;
|
||||
uint8_t cipher_cfg_offset;
|
||||
uint8_t next_curr_id;
|
||||
uint8_t cipher_padding_sz;
|
||||
uint8_t resrvd1;
|
||||
uint16_t resrvd2;
|
||||
uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
|
||||
};
|
||||
|
||||
struct icp_qat_fw_auth_cd_ctrl_hdr {
|
||||
uint32_t resrvd1;
|
||||
uint8_t resrvd2;
|
||||
uint8_t hash_flags;
|
||||
uint8_t hash_cfg_offset;
|
||||
uint8_t next_curr_id;
|
||||
uint8_t resrvd3;
|
||||
uint8_t outer_prefix_sz;
|
||||
uint8_t final_sz;
|
||||
uint8_t inner_res_sz;
|
||||
uint8_t resrvd4;
|
||||
uint8_t inner_state1_sz;
|
||||
uint8_t inner_state2_offset;
|
||||
uint8_t inner_state2_sz;
|
||||
uint8_t outer_config_offset;
|
||||
uint8_t outer_state1_sz;
|
||||
uint8_t outer_res_sz;
|
||||
uint8_t outer_prefix_offset;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
|
||||
uint8_t cipher_state_sz;
|
||||
uint8_t cipher_key_sz;
|
||||
uint8_t cipher_cfg_offset;
|
||||
uint8_t next_curr_id_cipher;
|
||||
uint8_t cipher_padding_sz;
|
||||
uint8_t hash_flags;
|
||||
uint8_t hash_cfg_offset;
|
||||
uint8_t next_curr_id_auth;
|
||||
uint8_t resrvd1;
|
||||
uint8_t outer_prefix_sz;
|
||||
uint8_t final_sz;
|
||||
uint8_t inner_res_sz;
|
||||
uint8_t resrvd2;
|
||||
uint8_t inner_state1_sz;
|
||||
uint8_t inner_state2_offset;
|
||||
uint8_t inner_state2_sz;
|
||||
uint8_t outer_config_offset;
|
||||
uint8_t outer_state1_sz;
|
||||
uint8_t outer_res_sz;
|
||||
uint8_t outer_prefix_offset;
|
||||
};
|
||||
|
||||
#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
|
||||
#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
|
||||
#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
|
||||
#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
|
||||
(sizeof(struct icp_qat_fw_la_cipher_req_params_t))
|
||||
#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
|
||||
|
||||
struct icp_qat_fw_la_cipher_req_params {
|
||||
uint32_t cipher_offset;
|
||||
uint32_t cipher_length;
|
||||
union {
|
||||
uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
|
||||
struct {
|
||||
uint64_t cipher_IV_ptr;
|
||||
uint64_t resrvd1;
|
||||
} s;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_la_auth_req_params {
|
||||
uint32_t auth_off;
|
||||
uint32_t auth_len;
|
||||
union {
|
||||
uint64_t auth_partial_st_prefix;
|
||||
uint64_t aad_adr;
|
||||
} u1;
|
||||
uint64_t auth_res_addr;
|
||||
union {
|
||||
uint8_t inner_prefix_sz;
|
||||
uint8_t aad_sz;
|
||||
} u2;
|
||||
uint8_t resrvd1;
|
||||
uint8_t hash_state_sz;
|
||||
uint8_t auth_res_sz;
|
||||
} __packed;
|
||||
|
||||
struct icp_qat_fw_la_auth_req_params_resrvd_flds {
|
||||
uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
|
||||
union {
|
||||
uint8_t inner_prefix_sz;
|
||||
uint8_t aad_sz;
|
||||
} u2;
|
||||
uint8_t resrvd1;
|
||||
uint16_t resrvd2;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_la_resp {
|
||||
struct icp_qat_fw_comn_resp_hdr comn_resp;
|
||||
uint64_t opaque_data;
|
||||
uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
|
||||
};
|
||||
|
||||
#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
|
||||
((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
|
||||
ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
|
||||
|
||||
#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
|
||||
{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
|
||||
((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
|
||||
& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
|
||||
((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
|
||||
& ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
|
||||
|
||||
#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
|
||||
(((cd_ctrl_hdr_t)->next_curr_id_cipher) \
|
||||
& ICP_QAT_FW_COMN_CURR_ID_MASK)
|
||||
|
||||
#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
|
||||
{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
|
||||
((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
|
||||
& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
|
||||
((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
|
||||
|
||||
#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
|
||||
((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
|
||||
>> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
|
||||
|
||||
#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
|
||||
{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
|
||||
((((cd_ctrl_hdr_t)->next_curr_id_auth) \
|
||||
& ICP_QAT_FW_COMN_CURR_ID_MASK) | \
|
||||
((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
|
||||
& ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
|
||||
|
||||
#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
|
||||
(((cd_ctrl_hdr_t)->next_curr_id_auth) \
|
||||
& ICP_QAT_FW_COMN_CURR_ID_MASK)
|
||||
|
||||
#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
|
||||
{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
|
||||
((((cd_ctrl_hdr_t)->next_curr_id_auth) \
|
||||
& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
|
||||
((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
|
||||
|
||||
#endif
|
78
drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
Normal file
78
drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
|
||||
#define __ICP_QAT_FW_LOADER_HANDLE_H__
|
||||
#include "icp_qat_uclo.h"
|
||||
|
||||
struct icp_qat_fw_loader_ae_data {
|
||||
unsigned int state;
|
||||
unsigned int ustore_size;
|
||||
unsigned int free_addr;
|
||||
unsigned int free_size;
|
||||
unsigned int live_ctx_mask;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_loader_hal_handle {
|
||||
struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
|
||||
unsigned int ae_mask;
|
||||
unsigned int slice_mask;
|
||||
unsigned int revision_id;
|
||||
unsigned int ae_max_num;
|
||||
unsigned int upc_mask;
|
||||
unsigned int max_ustore;
|
||||
};
|
||||
|
||||
struct icp_qat_fw_loader_handle {
|
||||
struct icp_qat_fw_loader_hal_handle *hal_handle;
|
||||
void *obj_handle;
|
||||
void __iomem *hal_sram_addr_v;
|
||||
void __iomem *hal_cap_g_ctl_csr_addr_v;
|
||||
void __iomem *hal_cap_ae_xfer_csr_addr_v;
|
||||
void __iomem *hal_cap_ae_local_csr_addr_v;
|
||||
void __iomem *hal_ep_csr_addr_v;
|
||||
};
|
||||
#endif
|
125
drivers/crypto/qat/qat_common/icp_qat_hal.h
Normal file
125
drivers/crypto/qat/qat_common/icp_qat_hal.h
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef __ICP_QAT_HAL_H
|
||||
#define __ICP_QAT_HAL_H
|
||||
#include "icp_qat_fw_loader_handle.h"
|
||||
|
||||
enum hal_global_csr {
|
||||
MISC_CONTROL = 0x04,
|
||||
ICP_RESET = 0x0c,
|
||||
ICP_GLOBAL_CLK_ENABLE = 0x50
|
||||
};
|
||||
|
||||
enum hal_ae_csr {
|
||||
USTORE_ADDRESS = 0x000,
|
||||
USTORE_DATA_LOWER = 0x004,
|
||||
USTORE_DATA_UPPER = 0x008,
|
||||
ALU_OUT = 0x010,
|
||||
CTX_ARB_CNTL = 0x014,
|
||||
CTX_ENABLES = 0x018,
|
||||
CC_ENABLE = 0x01c,
|
||||
CSR_CTX_POINTER = 0x020,
|
||||
CTX_STS_INDIRECT = 0x040,
|
||||
ACTIVE_CTX_STATUS = 0x044,
|
||||
CTX_SIG_EVENTS_INDIRECT = 0x048,
|
||||
CTX_SIG_EVENTS_ACTIVE = 0x04c,
|
||||
CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
|
||||
LM_ADDR_0_INDIRECT = 0x060,
|
||||
LM_ADDR_1_INDIRECT = 0x068,
|
||||
INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
|
||||
INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
|
||||
FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
|
||||
TIMESTAMP_LOW = 0x0c0,
|
||||
TIMESTAMP_HIGH = 0x0c4,
|
||||
PROFILE_COUNT = 0x144,
|
||||
SIGNATURE_ENABLE = 0x150,
|
||||
AE_MISC_CONTROL = 0x160,
|
||||
LOCAL_CSR_STATUS = 0x180,
|
||||
};
|
||||
|
||||
#define UA_ECS (0x1 << 31)
|
||||
#define ACS_ABO_BITPOS 31
|
||||
#define ACS_ACNO 0x7
|
||||
#define CE_ENABLE_BITPOS 0x8
|
||||
#define CE_LMADDR_0_GLOBAL_BITPOS 16
|
||||
#define CE_LMADDR_1_GLOBAL_BITPOS 17
|
||||
#define CE_NN_MODE_BITPOS 20
|
||||
#define CE_REG_PAR_ERR_BITPOS 25
|
||||
#define CE_BREAKPOINT_BITPOS 27
|
||||
#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
|
||||
#define CE_INUSE_CONTEXTS_BITPOS 31
|
||||
#define CE_NN_MODE (0x1 << CE_NN_MODE_BITPOS)
|
||||
#define CE_INUSE_CONTEXTS (0x1 << CE_INUSE_CONTEXTS_BITPOS)
|
||||
#define XCWE_VOLUNTARY (0x1)
|
||||
#define LCS_STATUS (0x1)
|
||||
#define MMC_SHARE_CS_BITPOS 2
|
||||
#define GLOBAL_CSR 0xA00
|
||||
|
||||
#define SET_CAP_CSR(handle, csr, val) \
|
||||
ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
|
||||
#define GET_CAP_CSR(handle, csr) \
|
||||
ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr)
|
||||
#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
|
||||
#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
|
||||
#define AE_CSR(handle, ae) \
|
||||
(handle->hal_cap_ae_local_csr_addr_v + \
|
||||
((ae & handle->hal_handle->ae_mask) << 12))
|
||||
#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
|
||||
#define SET_AE_CSR(handle, ae, csr, val) \
|
||||
ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
|
||||
#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
|
||||
#define AE_XFER(handle, ae) \
|
||||
(handle->hal_cap_ae_xfer_csr_addr_v + \
|
||||
((ae & handle->hal_handle->ae_mask) << 12))
|
||||
#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
|
||||
((reg & 0xff) << 2))
|
||||
#define SET_AE_XFER(handle, ae, reg, val) \
|
||||
ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
|
||||
#define SRAM_WRITE(handle, addr, val) \
|
||||
ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
|
||||
#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr)
|
||||
#endif
|
305
drivers/crypto/qat/qat_common/icp_qat_hw.h
Normal file
305
drivers/crypto/qat/qat_common/icp_qat_hw.h
Normal file
@ -0,0 +1,305 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _ICP_QAT_HW_H_
|
||||
#define _ICP_QAT_HW_H_
|
||||
|
||||
enum icp_qat_hw_ae_id {
|
||||
ICP_QAT_HW_AE_0 = 0,
|
||||
ICP_QAT_HW_AE_1 = 1,
|
||||
ICP_QAT_HW_AE_2 = 2,
|
||||
ICP_QAT_HW_AE_3 = 3,
|
||||
ICP_QAT_HW_AE_4 = 4,
|
||||
ICP_QAT_HW_AE_5 = 5,
|
||||
ICP_QAT_HW_AE_6 = 6,
|
||||
ICP_QAT_HW_AE_7 = 7,
|
||||
ICP_QAT_HW_AE_8 = 8,
|
||||
ICP_QAT_HW_AE_9 = 9,
|
||||
ICP_QAT_HW_AE_10 = 10,
|
||||
ICP_QAT_HW_AE_11 = 11,
|
||||
ICP_QAT_HW_AE_DELIMITER = 12
|
||||
};
|
||||
|
||||
enum icp_qat_hw_qat_id {
|
||||
ICP_QAT_HW_QAT_0 = 0,
|
||||
ICP_QAT_HW_QAT_1 = 1,
|
||||
ICP_QAT_HW_QAT_2 = 2,
|
||||
ICP_QAT_HW_QAT_3 = 3,
|
||||
ICP_QAT_HW_QAT_4 = 4,
|
||||
ICP_QAT_HW_QAT_5 = 5,
|
||||
ICP_QAT_HW_QAT_DELIMITER = 6
|
||||
};
|
||||
|
||||
enum icp_qat_hw_auth_algo {
|
||||
ICP_QAT_HW_AUTH_ALGO_NULL = 0,
|
||||
ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
|
||||
ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
|
||||
ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
|
||||
ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
|
||||
ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
|
||||
ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
|
||||
ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
|
||||
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
|
||||
ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
|
||||
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
|
||||
ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
|
||||
ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
|
||||
ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
|
||||
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
|
||||
ICP_QAT_HW_AUTH_RESERVED_1 = 15,
|
||||
ICP_QAT_HW_AUTH_RESERVED_2 = 16,
|
||||
ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
|
||||
ICP_QAT_HW_AUTH_RESERVED_3 = 18,
|
||||
ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
|
||||
ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
|
||||
};
|
||||
|
||||
enum icp_qat_hw_auth_mode {
|
||||
ICP_QAT_HW_AUTH_MODE0 = 0,
|
||||
ICP_QAT_HW_AUTH_MODE1 = 1,
|
||||
ICP_QAT_HW_AUTH_MODE2 = 2,
|
||||
ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
|
||||
};
|
||||
|
||||
struct icp_qat_hw_auth_config {
|
||||
uint32_t config;
|
||||
uint32_t reserved;
|
||||
};
|
||||
|
||||
#define QAT_AUTH_MODE_BITPOS 4
|
||||
#define QAT_AUTH_MODE_MASK 0xF
|
||||
#define QAT_AUTH_ALGO_BITPOS 0
|
||||
#define QAT_AUTH_ALGO_MASK 0xF
|
||||
#define QAT_AUTH_CMP_BITPOS 8
|
||||
#define QAT_AUTH_CMP_MASK 0x7F
|
||||
#define QAT_AUTH_SHA3_PADDING_BITPOS 16
|
||||
#define QAT_AUTH_SHA3_PADDING_MASK 0x1
|
||||
#define QAT_AUTH_ALGO_SHA3_BITPOS 22
|
||||
#define QAT_AUTH_ALGO_SHA3_MASK 0x3
|
||||
#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
|
||||
(((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
|
||||
((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
|
||||
(((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
|
||||
QAT_AUTH_ALGO_SHA3_BITPOS) | \
|
||||
(((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
|
||||
(algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
|
||||
& QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
|
||||
((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
|
||||
|
||||
struct icp_qat_hw_auth_counter {
|
||||
__be32 counter;
|
||||
uint32_t reserved;
|
||||
};
|
||||
|
||||
#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
|
||||
#define QAT_AUTH_COUNT_BITPOS 0
|
||||
#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
|
||||
(((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
|
||||
|
||||
struct icp_qat_hw_auth_setup {
|
||||
struct icp_qat_hw_auth_config auth_config;
|
||||
struct icp_qat_hw_auth_counter auth_counter;
|
||||
};
|
||||
|
||||
#define QAT_HW_DEFAULT_ALIGNMENT 8
|
||||
#define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1)))
|
||||
#define ICP_QAT_HW_NULL_STATE1_SZ 32
|
||||
#define ICP_QAT_HW_MD5_STATE1_SZ 16
|
||||
#define ICP_QAT_HW_SHA1_STATE1_SZ 20
|
||||
#define ICP_QAT_HW_SHA224_STATE1_SZ 32
|
||||
#define ICP_QAT_HW_SHA256_STATE1_SZ 32
|
||||
#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
|
||||
#define ICP_QAT_HW_SHA384_STATE1_SZ 64
|
||||
#define ICP_QAT_HW_SHA512_STATE1_SZ 64
|
||||
#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
|
||||
#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
|
||||
#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
|
||||
#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
|
||||
#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
|
||||
#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
|
||||
#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
|
||||
#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
|
||||
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
|
||||
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
|
||||
#define ICP_QAT_HW_NULL_STATE2_SZ 32
|
||||
#define ICP_QAT_HW_MD5_STATE2_SZ 16
|
||||
#define ICP_QAT_HW_SHA1_STATE2_SZ 20
|
||||
#define ICP_QAT_HW_SHA224_STATE2_SZ 32
|
||||
#define ICP_QAT_HW_SHA256_STATE2_SZ 32
|
||||
#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
|
||||
#define ICP_QAT_HW_SHA384_STATE2_SZ 64
|
||||
#define ICP_QAT_HW_SHA512_STATE2_SZ 64
|
||||
#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
|
||||
#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
|
||||
#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
|
||||
#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
|
||||
#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
|
||||
#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
|
||||
#define ICP_QAT_HW_F9_IK_SZ 16
|
||||
#define ICP_QAT_HW_F9_FK_SZ 16
|
||||
#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
|
||||
ICP_QAT_HW_F9_FK_SZ)
|
||||
#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
|
||||
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
|
||||
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
|
||||
#define ICP_QAT_HW_GALOIS_H_SZ 16
|
||||
#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
|
||||
#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
|
||||
|
||||
struct icp_qat_hw_auth_sha512 {
|
||||
struct icp_qat_hw_auth_setup inner_setup;
|
||||
uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
|
||||
struct icp_qat_hw_auth_setup outer_setup;
|
||||
uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
|
||||
};
|
||||
|
||||
struct icp_qat_hw_auth_algo_blk {
|
||||
struct icp_qat_hw_auth_sha512 sha;
|
||||
};
|
||||
|
||||
#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
|
||||
#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
|
||||
|
||||
enum icp_qat_hw_cipher_algo {
|
||||
ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
|
||||
ICP_QAT_HW_CIPHER_ALGO_DES = 1,
|
||||
ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
|
||||
ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
|
||||
ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
|
||||
ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
|
||||
ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
|
||||
ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
|
||||
ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
|
||||
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
|
||||
ICP_QAT_HW_CIPHER_DELIMITER = 10
|
||||
};
|
||||
|
||||
enum icp_qat_hw_cipher_mode {
|
||||
ICP_QAT_HW_CIPHER_ECB_MODE = 0,
|
||||
ICP_QAT_HW_CIPHER_CBC_MODE = 1,
|
||||
ICP_QAT_HW_CIPHER_CTR_MODE = 2,
|
||||
ICP_QAT_HW_CIPHER_F8_MODE = 3,
|
||||
ICP_QAT_HW_CIPHER_XTS_MODE = 6,
|
||||
ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
|
||||
};
|
||||
|
||||
struct icp_qat_hw_cipher_config {
|
||||
uint32_t val;
|
||||
uint32_t reserved;
|
||||
};
|
||||
|
||||
enum icp_qat_hw_cipher_dir {
|
||||
ICP_QAT_HW_CIPHER_ENCRYPT = 0,
|
||||
ICP_QAT_HW_CIPHER_DECRYPT = 1,
|
||||
};
|
||||
|
||||
enum icp_qat_hw_cipher_convert {
|
||||
ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
|
||||
ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
|
||||
};
|
||||
|
||||
#define QAT_CIPHER_MODE_BITPOS 4
|
||||
#define QAT_CIPHER_MODE_MASK 0xF
|
||||
#define QAT_CIPHER_ALGO_BITPOS 0
|
||||
#define QAT_CIPHER_ALGO_MASK 0xF
|
||||
#define QAT_CIPHER_CONVERT_BITPOS 9
|
||||
#define QAT_CIPHER_CONVERT_MASK 0x1
|
||||
#define QAT_CIPHER_DIR_BITPOS 8
|
||||
#define QAT_CIPHER_DIR_MASK 0x1
|
||||
#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
|
||||
#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
|
||||
#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
|
||||
(((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
|
||||
((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
|
||||
((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
|
||||
((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
|
||||
#define ICP_QAT_HW_DES_BLK_SZ 8
|
||||
#define ICP_QAT_HW_3DES_BLK_SZ 8
|
||||
#define ICP_QAT_HW_NULL_BLK_SZ 8
|
||||
#define ICP_QAT_HW_AES_BLK_SZ 16
|
||||
#define ICP_QAT_HW_KASUMI_BLK_SZ 8
|
||||
#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
|
||||
#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
|
||||
#define ICP_QAT_HW_NULL_KEY_SZ 256
|
||||
#define ICP_QAT_HW_DES_KEY_SZ 8
|
||||
#define ICP_QAT_HW_3DES_KEY_SZ 24
|
||||
#define ICP_QAT_HW_AES_128_KEY_SZ 16
|
||||
#define ICP_QAT_HW_AES_192_KEY_SZ 24
|
||||
#define ICP_QAT_HW_AES_256_KEY_SZ 32
|
||||
#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
|
||||
QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
|
||||
#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
|
||||
QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
|
||||
#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
|
||||
QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
|
||||
#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
|
||||
QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
|
||||
#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
|
||||
QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
|
||||
#define ICP_QAT_HW_KASUMI_KEY_SZ 16
|
||||
#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
|
||||
QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
|
||||
#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
|
||||
QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
|
||||
#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
|
||||
QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
|
||||
#define ICP_QAT_HW_ARC4_KEY_SZ 256
|
||||
#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
|
||||
#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
|
||||
#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
|
||||
#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
|
||||
#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
|
||||
#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
|
||||
|
||||
struct icp_qat_hw_cipher_aes256_f8 {
|
||||
struct icp_qat_hw_cipher_config cipher_config;
|
||||
uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
|
||||
};
|
||||
|
||||
struct icp_qat_hw_cipher_algo_blk {
|
||||
struct icp_qat_hw_cipher_aes256_f8 aes;
|
||||
};
|
||||
#endif
|
377
drivers/crypto/qat/qat_common/icp_qat_uclo.h
Normal file
377
drivers/crypto/qat/qat_common/icp_qat_uclo.h
Normal file
@ -0,0 +1,377 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef __ICP_QAT_UCLO_H__
|
||||
#define __ICP_QAT_UCLO_H__
|
||||
|
||||
#define ICP_QAT_AC_C_CPU_TYPE 0x00400000
|
||||
#define ICP_QAT_UCLO_MAX_AE 12
|
||||
#define ICP_QAT_UCLO_MAX_CTX 8
|
||||
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
|
||||
#define ICP_QAT_UCLO_MAX_USTORE 0x4000
|
||||
#define ICP_QAT_UCLO_MAX_XFER_REG 128
|
||||
#define ICP_QAT_UCLO_MAX_GPR_REG 128
|
||||
#define ICP_QAT_UCLO_MAX_NN_REG 128
|
||||
#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
|
||||
#define ICP_QAT_UCLO_AE_ALL_CTX 0xff
|
||||
#define ICP_QAT_UOF_OBJID_LEN 8
|
||||
#define ICP_QAT_UOF_FID 0xc6c2
|
||||
#define ICP_QAT_UOF_MAJVER 0x4
|
||||
#define ICP_QAT_UOF_MINVER 0x11
|
||||
#define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff
|
||||
#define ICP_QAT_UOF_OBJS "UOF_OBJS"
|
||||
#define ICP_QAT_UOF_STRT "UOF_STRT"
|
||||
#define ICP_QAT_UOF_GTID "UOF_GTID"
|
||||
#define ICP_QAT_UOF_IMAG "UOF_IMAG"
|
||||
#define ICP_QAT_UOF_IMEM "UOF_IMEM"
|
||||
#define ICP_QAT_UOF_MSEG "UOF_MSEG"
|
||||
#define ICP_QAT_UOF_LOCAL_SCOPE 1
|
||||
#define ICP_QAT_UOF_INIT_EXPR 0
|
||||
#define ICP_QAT_UOF_INIT_REG 1
|
||||
#define ICP_QAT_UOF_INIT_REG_CTX 2
|
||||
#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3
|
||||
|
||||
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
|
||||
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
|
||||
#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
|
||||
#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
|
||||
|
||||
#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
|
||||
#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
|
||||
|
||||
enum icp_qat_uof_mem_region {
|
||||
ICP_QAT_UOF_SRAM_REGION = 0x0,
|
||||
ICP_QAT_UOF_LMEM_REGION = 0x3,
|
||||
ICP_QAT_UOF_UMEM_REGION = 0x5
|
||||
};
|
||||
|
||||
enum icp_qat_uof_regtype {
|
||||
ICP_NO_DEST,
|
||||
ICP_GPA_REL,
|
||||
ICP_GPA_ABS,
|
||||
ICP_GPB_REL,
|
||||
ICP_GPB_ABS,
|
||||
ICP_SR_REL,
|
||||
ICP_SR_RD_REL,
|
||||
ICP_SR_WR_REL,
|
||||
ICP_SR_ABS,
|
||||
ICP_SR_RD_ABS,
|
||||
ICP_SR_WR_ABS,
|
||||
ICP_DR_REL,
|
||||
ICP_DR_RD_REL,
|
||||
ICP_DR_WR_REL,
|
||||
ICP_DR_ABS,
|
||||
ICP_DR_RD_ABS,
|
||||
ICP_DR_WR_ABS,
|
||||
ICP_LMEM,
|
||||
ICP_LMEM0,
|
||||
ICP_LMEM1,
|
||||
ICP_NEIGH_REL,
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_page {
|
||||
struct icp_qat_uclo_encap_page *encap_page;
|
||||
struct icp_qat_uclo_region *region;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_region {
|
||||
struct icp_qat_uclo_page *loaded;
|
||||
struct icp_qat_uclo_page *page;
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_aeslice {
|
||||
struct icp_qat_uclo_region *region;
|
||||
struct icp_qat_uclo_page *page;
|
||||
struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
|
||||
struct icp_qat_uclo_encapme *encap_image;
|
||||
unsigned int ctx_mask_assigned;
|
||||
unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_aedata {
|
||||
unsigned int slice_num;
|
||||
unsigned int eff_ustore_size;
|
||||
struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
|
||||
};
|
||||
|
||||
struct icp_qat_uof_encap_obj {
|
||||
char *beg_uof;
|
||||
struct icp_qat_uof_objhdr *obj_hdr;
|
||||
struct icp_qat_uof_chunkhdr *chunk_hdr;
|
||||
struct icp_qat_uof_varmem_seg *var_mem_seg;
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_encap_uwblock {
|
||||
unsigned int start_addr;
|
||||
unsigned int words_num;
|
||||
uint64_t micro_words;
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_encap_page {
|
||||
unsigned int def_page;
|
||||
unsigned int page_region;
|
||||
unsigned int beg_addr_v;
|
||||
unsigned int beg_addr_p;
|
||||
unsigned int micro_words_num;
|
||||
unsigned int uwblock_num;
|
||||
struct icp_qat_uclo_encap_uwblock *uwblock;
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_encapme {
|
||||
struct icp_qat_uof_image *img_ptr;
|
||||
struct icp_qat_uclo_encap_page *page;
|
||||
unsigned int ae_reg_num;
|
||||
struct icp_qat_uof_ae_reg *ae_reg;
|
||||
unsigned int init_regsym_num;
|
||||
struct icp_qat_uof_init_regsym *init_regsym;
|
||||
unsigned int sbreak_num;
|
||||
struct icp_qat_uof_sbreak *sbreak;
|
||||
unsigned int uwords_num;
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_init_mem_table {
|
||||
unsigned int entry_num;
|
||||
struct icp_qat_uof_initmem *init_mem;
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_objhdr {
|
||||
char *file_buff;
|
||||
unsigned int checksum;
|
||||
unsigned int size;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_strtable {
|
||||
unsigned int table_len;
|
||||
unsigned int reserved;
|
||||
uint64_t strings;
|
||||
};
|
||||
|
||||
struct icp_qat_uclo_objhandle {
|
||||
unsigned int prod_type;
|
||||
unsigned int prod_rev;
|
||||
struct icp_qat_uclo_objhdr *obj_hdr;
|
||||
struct icp_qat_uof_encap_obj encap_uof_obj;
|
||||
struct icp_qat_uof_strtable str_table;
|
||||
struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
|
||||
struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
|
||||
struct icp_qat_uclo_init_mem_table init_mem_tab;
|
||||
struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
|
||||
struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
|
||||
int uimage_num;
|
||||
int uword_in_bytes;
|
||||
int global_inited;
|
||||
unsigned int ae_num;
|
||||
unsigned int ustore_phy_size;
|
||||
void *obj_buf;
|
||||
uint64_t *uword_buf;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_uword_block {
|
||||
unsigned int start_addr;
|
||||
unsigned int words_num;
|
||||
unsigned int uword_offset;
|
||||
unsigned int reserved;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_filehdr {
|
||||
unsigned short file_id;
|
||||
unsigned short reserved1;
|
||||
char min_ver;
|
||||
char maj_ver;
|
||||
unsigned short reserved2;
|
||||
unsigned short max_chunks;
|
||||
unsigned short num_chunks;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_filechunkhdr {
|
||||
char chunk_id[ICP_QAT_UOF_OBJID_LEN];
|
||||
unsigned int checksum;
|
||||
unsigned int offset;
|
||||
unsigned int size;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_objhdr {
|
||||
unsigned int cpu_type;
|
||||
unsigned short min_cpu_ver;
|
||||
unsigned short max_cpu_ver;
|
||||
short max_chunks;
|
||||
short num_chunks;
|
||||
unsigned int reserved1;
|
||||
unsigned int reserved2;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_chunkhdr {
|
||||
char chunk_id[ICP_QAT_UOF_OBJID_LEN];
|
||||
unsigned int offset;
|
||||
unsigned int size;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_memvar_attr {
|
||||
unsigned int offset_in_byte;
|
||||
unsigned int value;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_initmem {
|
||||
unsigned int sym_name;
|
||||
char region;
|
||||
char scope;
|
||||
unsigned short reserved1;
|
||||
unsigned int addr;
|
||||
unsigned int num_in_bytes;
|
||||
unsigned int val_attr_num;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_init_regsym {
|
||||
unsigned int sym_name;
|
||||
char init_type;
|
||||
char value_type;
|
||||
char reg_type;
|
||||
unsigned char ctx;
|
||||
unsigned int reg_addr;
|
||||
unsigned int value;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_varmem_seg {
|
||||
unsigned int sram_base;
|
||||
unsigned int sram_size;
|
||||
unsigned int sram_alignment;
|
||||
unsigned int sdram_base;
|
||||
unsigned int sdram_size;
|
||||
unsigned int sdram_alignment;
|
||||
unsigned int sdram1_base;
|
||||
unsigned int sdram1_size;
|
||||
unsigned int sdram1_alignment;
|
||||
unsigned int scratch_base;
|
||||
unsigned int scratch_size;
|
||||
unsigned int scratch_alignment;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_gtid {
|
||||
char tool_id[ICP_QAT_UOF_OBJID_LEN];
|
||||
int tool_ver;
|
||||
unsigned int reserved1;
|
||||
unsigned int reserved2;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_sbreak {
|
||||
unsigned int page_num;
|
||||
unsigned int virt_uaddr;
|
||||
unsigned char sbreak_type;
|
||||
unsigned char reg_type;
|
||||
unsigned short reserved1;
|
||||
unsigned int addr_offset;
|
||||
unsigned int reg_addr;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_code_page {
|
||||
unsigned int page_region;
|
||||
unsigned int page_num;
|
||||
unsigned char def_page;
|
||||
unsigned char reserved2;
|
||||
unsigned short reserved1;
|
||||
unsigned int beg_addr_v;
|
||||
unsigned int beg_addr_p;
|
||||
unsigned int neigh_reg_tab_offset;
|
||||
unsigned int uc_var_tab_offset;
|
||||
unsigned int imp_var_tab_offset;
|
||||
unsigned int imp_expr_tab_offset;
|
||||
unsigned int code_area_offset;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_image {
|
||||
unsigned int img_name;
|
||||
unsigned int ae_assigned;
|
||||
unsigned int ctx_assigned;
|
||||
unsigned int cpu_type;
|
||||
unsigned int entry_address;
|
||||
unsigned int fill_pattern[2];
|
||||
unsigned int reloadable_size;
|
||||
unsigned char sensitivity;
|
||||
unsigned char reserved;
|
||||
unsigned short ae_mode;
|
||||
unsigned short max_ver;
|
||||
unsigned short min_ver;
|
||||
unsigned short image_attrib;
|
||||
unsigned short reserved2;
|
||||
unsigned short page_region_num;
|
||||
unsigned short numpages;
|
||||
unsigned int reg_tab_offset;
|
||||
unsigned int init_reg_sym_tab;
|
||||
unsigned int sbreak_tab;
|
||||
unsigned int app_metadata;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_objtable {
|
||||
unsigned int entry_num;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_ae_reg {
|
||||
unsigned int name;
|
||||
unsigned int vis_name;
|
||||
unsigned short type;
|
||||
unsigned short addr;
|
||||
unsigned short access_mode;
|
||||
unsigned char visible;
|
||||
unsigned char reserved1;
|
||||
unsigned short ref_count;
|
||||
unsigned short reserved2;
|
||||
unsigned int xo_id;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_code_area {
|
||||
unsigned int micro_words_num;
|
||||
unsigned int uword_block_tab;
|
||||
};
|
||||
|
||||
struct icp_qat_uof_batch_init {
|
||||
unsigned int ae;
|
||||
unsigned int addr;
|
||||
unsigned int *value;
|
||||
unsigned int size;
|
||||
struct icp_qat_uof_batch_init *next;
|
||||
};
|
||||
#endif
|
1038
drivers/crypto/qat/qat_common/qat_algs.c
Normal file
1038
drivers/crypto/qat/qat_common/qat_algs.c
Normal file
File diff suppressed because it is too large
Load Diff
284
drivers/crypto/qat/qat_common/qat_crypto.c
Normal file
284
drivers/crypto/qat/qat_common/qat_crypto.c
Normal file
@ -0,0 +1,284 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "adf_common_drv.h"
|
||||
#include "adf_transport.h"
|
||||
#include "adf_cfg.h"
|
||||
#include "adf_cfg_strings.h"
|
||||
#include "qat_crypto.h"
|
||||
#include "icp_qat_fw.h"
|
||||
|
||||
#define SEC ADF_KERNEL_SEC
|
||||
|
||||
static struct service_hndl qat_crypto;
|
||||
|
||||
void qat_crypto_put_instance(struct qat_crypto_instance *inst)
|
||||
{
|
||||
if (atomic_sub_return(1, &inst->refctr) == 0)
|
||||
adf_dev_put(inst->accel_dev);
|
||||
}
|
||||
|
||||
static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct qat_crypto_instance *inst;
|
||||
struct list_head *list_ptr, *tmp;
|
||||
int i;
|
||||
|
||||
list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
|
||||
inst = list_entry(list_ptr, struct qat_crypto_instance, list);
|
||||
|
||||
for (i = 0; i < atomic_read(&inst->refctr); i++)
|
||||
qat_crypto_put_instance(inst);
|
||||
|
||||
if (inst->sym_tx)
|
||||
adf_remove_ring(inst->sym_tx);
|
||||
|
||||
if (inst->sym_rx)
|
||||
adf_remove_ring(inst->sym_rx);
|
||||
|
||||
if (inst->pke_tx)
|
||||
adf_remove_ring(inst->pke_tx);
|
||||
|
||||
if (inst->pke_rx)
|
||||
adf_remove_ring(inst->pke_rx);
|
||||
|
||||
if (inst->rnd_tx)
|
||||
adf_remove_ring(inst->rnd_tx);
|
||||
|
||||
if (inst->rnd_rx)
|
||||
adf_remove_ring(inst->rnd_rx);
|
||||
|
||||
list_del(list_ptr);
|
||||
kfree(inst);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev = NULL;
|
||||
struct qat_crypto_instance *inst_best = NULL;
|
||||
struct list_head *itr;
|
||||
unsigned long best = ~0;
|
||||
|
||||
list_for_each(itr, adf_devmgr_get_head()) {
|
||||
accel_dev = list_entry(itr, struct adf_accel_dev, list);
|
||||
if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
|
||||
break;
|
||||
accel_dev = NULL;
|
||||
}
|
||||
if (!accel_dev) {
|
||||
pr_err("QAT: Could not find device on give node\n");
|
||||
accel_dev = adf_devmgr_get_first();
|
||||
}
|
||||
if (!accel_dev || !adf_dev_started(accel_dev))
|
||||
return NULL;
|
||||
|
||||
list_for_each(itr, &accel_dev->crypto_list) {
|
||||
struct qat_crypto_instance *inst;
|
||||
unsigned long cur;
|
||||
|
||||
inst = list_entry(itr, struct qat_crypto_instance, list);
|
||||
cur = atomic_read(&inst->refctr);
|
||||
if (best > cur) {
|
||||
inst_best = inst;
|
||||
best = cur;
|
||||
}
|
||||
}
|
||||
if (inst_best) {
|
||||
if (atomic_add_return(1, &inst_best->refctr) == 1) {
|
||||
if (adf_dev_get(accel_dev)) {
|
||||
atomic_dec(&inst_best->refctr);
|
||||
pr_err("QAT: Could increment dev refctr\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
return inst_best;
|
||||
}
|
||||
|
||||
static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
int i;
|
||||
unsigned long bank;
|
||||
unsigned long num_inst, num_msg_sym, num_msg_asym;
|
||||
int msg_size;
|
||||
struct qat_crypto_instance *inst;
|
||||
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
|
||||
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
|
||||
|
||||
INIT_LIST_HEAD(&accel_dev->crypto_list);
|
||||
strlcpy(key, ADF_NUM_CY, sizeof(key));
|
||||
|
||||
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
|
||||
return -EFAULT;
|
||||
|
||||
if (kstrtoul(val, 0, &num_inst))
|
||||
return -EFAULT;
|
||||
|
||||
for (i = 0; i < num_inst; i++) {
|
||||
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
|
||||
accel_dev->numa_node);
|
||||
if (!inst)
|
||||
goto err;
|
||||
|
||||
list_add_tail(&inst->list, &accel_dev->crypto_list);
|
||||
inst->id = i;
|
||||
atomic_set(&inst->refctr, 0);
|
||||
inst->accel_dev = accel_dev;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
|
||||
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
|
||||
goto err;
|
||||
|
||||
if (kstrtoul(val, 10, &bank))
|
||||
goto err;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
|
||||
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
|
||||
goto err;
|
||||
|
||||
if (kstrtoul(val, 10, &num_msg_sym))
|
||||
goto err;
|
||||
num_msg_sym = num_msg_sym >> 1;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
|
||||
if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
|
||||
goto err;
|
||||
|
||||
if (kstrtoul(val, 10, &num_msg_asym))
|
||||
goto err;
|
||||
num_msg_asym = num_msg_asym >> 1;
|
||||
|
||||
msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
|
||||
if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
|
||||
msg_size, key, NULL, 0, &inst->sym_tx))
|
||||
goto err;
|
||||
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
|
||||
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
|
||||
msg_size, key, NULL, 0, &inst->rnd_tx))
|
||||
goto err;
|
||||
|
||||
msg_size = msg_size >> 1;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
|
||||
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
|
||||
msg_size, key, NULL, 0, &inst->pke_tx))
|
||||
goto err;
|
||||
|
||||
msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
|
||||
if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
|
||||
msg_size, key, qat_alg_callback, 0,
|
||||
&inst->sym_rx))
|
||||
goto err;
|
||||
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
|
||||
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
|
||||
msg_size, key, qat_alg_callback, 0,
|
||||
&inst->rnd_rx))
|
||||
goto err;
|
||||
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
|
||||
if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
|
||||
msg_size, key, qat_alg_callback, 0,
|
||||
&inst->pke_rx))
|
||||
goto err;
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
qat_crypto_free_instances(accel_dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int qat_crypto_init(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
if (qat_crypto_create_instances(accel_dev))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return qat_crypto_free_instances(accel_dev);
|
||||
}
|
||||
|
||||
static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
|
||||
enum adf_event event)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (event) {
|
||||
case ADF_EVENT_INIT:
|
||||
ret = qat_crypto_init(accel_dev);
|
||||
break;
|
||||
case ADF_EVENT_SHUTDOWN:
|
||||
ret = qat_crypto_shutdown(accel_dev);
|
||||
break;
|
||||
case ADF_EVENT_RESTARTING:
|
||||
case ADF_EVENT_RESTARTED:
|
||||
case ADF_EVENT_START:
|
||||
case ADF_EVENT_STOP:
|
||||
default:
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qat_crypto_register(void)
|
||||
{
|
||||
memset(&qat_crypto, 0, sizeof(qat_crypto));
|
||||
qat_crypto.event_hld = qat_crypto_event_handler;
|
||||
qat_crypto.name = "qat_crypto";
|
||||
return adf_service_register(&qat_crypto);
|
||||
}
|
||||
|
||||
int qat_crypto_unregister(void)
|
||||
{
|
||||
return adf_service_unregister(&qat_crypto);
|
||||
}
|
83
drivers/crypto/qat/qat_common/qat_crypto.h
Normal file
83
drivers/crypto/qat/qat_common/qat_crypto.h
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _QAT_CRYPTO_INSTANCE_H_
|
||||
#define _QAT_CRYPTO_INSTANCE_H_
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include "adf_accel_devices.h"
|
||||
#include "icp_qat_fw_la.h"
|
||||
|
||||
struct qat_crypto_instance {
|
||||
struct adf_etr_ring_data *sym_tx;
|
||||
struct adf_etr_ring_data *sym_rx;
|
||||
struct adf_etr_ring_data *pke_tx;
|
||||
struct adf_etr_ring_data *pke_rx;
|
||||
struct adf_etr_ring_data *rnd_tx;
|
||||
struct adf_etr_ring_data *rnd_rx;
|
||||
struct adf_accel_dev *accel_dev;
|
||||
struct list_head list;
|
||||
unsigned long state;
|
||||
int id;
|
||||
atomic_t refctr;
|
||||
};
|
||||
|
||||
struct qat_crypto_request_buffs {
|
||||
struct qat_alg_buf_list *bl;
|
||||
dma_addr_t blp;
|
||||
struct qat_alg_buf_list *blout;
|
||||
dma_addr_t bloutp;
|
||||
size_t sz;
|
||||
};
|
||||
|
||||
struct qat_crypto_request {
|
||||
struct icp_qat_fw_la_bulk_req req;
|
||||
struct qat_alg_session_ctx *ctx;
|
||||
struct aead_request *areq;
|
||||
struct qat_crypto_request_buffs buf;
|
||||
};
|
||||
#endif
|
1393
drivers/crypto/qat/qat_common/qat_hal.c
Normal file
1393
drivers/crypto/qat/qat_common/qat_hal.c
Normal file
File diff suppressed because it is too large
Load Diff
1181
drivers/crypto/qat/qat_common/qat_uclo.c
Normal file
1181
drivers/crypto/qat/qat_common/qat_uclo.c
Normal file
File diff suppressed because it is too large
Load Diff
8
drivers/crypto/qat/qat_dh895xcc/Makefile
Normal file
8
drivers/crypto/qat/qat_dh895xcc/Makefile
Normal file
@ -0,0 +1,8 @@
|
||||
ccflags-y := -I$(src)/../qat_common
|
||||
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
|
||||
qat_dh895xcc-objs := adf_drv.o \
|
||||
adf_isr.o \
|
||||
adf_dh895xcc_hw_data.o \
|
||||
adf_hw_arbiter.o \
|
||||
qat_admin.o \
|
||||
adf_admin.o
|
144
drivers/crypto/qat/qat_dh895xcc/adf_admin.c
Normal file
144
drivers/crypto/qat/qat_dh895xcc/adf_admin.c
Normal file
@ -0,0 +1,144 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <adf_accel_devices.h>
|
||||
#include "adf_drv.h"
|
||||
#include "adf_dh895xcc_hw_data.h"
|
||||
|
||||
#define ADF_ADMINMSG_LEN 32
|
||||
|
||||
struct adf_admin_comms {
|
||||
dma_addr_t phy_addr;
|
||||
void *virt_addr;
|
||||
void __iomem *mailbox_addr;
|
||||
struct mutex lock; /* protects adf_admin_comms struct */
|
||||
};
|
||||
|
||||
int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
|
||||
uint32_t ae, void *in, void *out)
|
||||
{
|
||||
struct adf_admin_comms *admin = accel_dev->admin;
|
||||
int offset = ae * ADF_ADMINMSG_LEN * 2;
|
||||
void __iomem *mailbox = admin->mailbox_addr;
|
||||
int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
|
||||
int times, received;
|
||||
|
||||
mutex_lock(&admin->lock);
|
||||
|
||||
if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
|
||||
mutex_unlock(&admin->lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
|
||||
ADF_CSR_WR(mailbox, mb_offset, 1);
|
||||
received = 0;
|
||||
for (times = 0; times < 50; times++) {
|
||||
msleep(20);
|
||||
if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
|
||||
received = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (received)
|
||||
memcpy(out, admin->virt_addr + offset +
|
||||
ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
|
||||
else
|
||||
pr_err("QAT: Failed to send admin msg to accelerator\n");
|
||||
|
||||
mutex_unlock(&admin->lock);
|
||||
return received ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_admin_comms *admin;
|
||||
struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
|
||||
void __iomem *csr = pmisc->virt_addr;
|
||||
void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
|
||||
uint64_t reg_val;
|
||||
|
||||
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
|
||||
accel_dev->numa_node);
|
||||
if (!admin)
|
||||
return -ENOMEM;
|
||||
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
|
||||
&admin->phy_addr, GFP_KERNEL);
|
||||
if (!admin->virt_addr) {
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
|
||||
kfree(admin);
|
||||
return -ENOMEM;
|
||||
}
|
||||
reg_val = (uint64_t)admin->phy_addr;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
|
||||
mutex_init(&admin->lock);
|
||||
admin->mailbox_addr = mailbox;
|
||||
accel_dev->admin = admin;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_admin_comms *admin = accel_dev->admin;
|
||||
|
||||
if (!admin)
|
||||
return;
|
||||
|
||||
if (admin->virt_addr)
|
||||
dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
|
||||
admin->virt_addr, admin->phy_addr);
|
||||
|
||||
mutex_destroy(&admin->lock);
|
||||
kfree(admin);
|
||||
accel_dev->admin = NULL;
|
||||
}
|
214
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
Normal file
214
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <adf_accel_devices.h>
|
||||
#include "adf_dh895xcc_hw_data.h"
|
||||
#include "adf_drv.h"
|
||||
|
||||
/* Worker thread to service arbiter mappings based on dev SKUs */
|
||||
static const uint32_t thrd_to_arb_map_sku4[] = {
|
||||
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
|
||||
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
|
||||
0x00000000, 0x00000000, 0x00000000, 0x00000000
|
||||
};
|
||||
|
||||
static const uint32_t thrd_to_arb_map_sku6[] = {
|
||||
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
|
||||
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
|
||||
0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
|
||||
};
|
||||
|
||||
static struct adf_hw_device_class dh895xcc_class = {
|
||||
.name = ADF_DH895XCC_DEVICE_NAME,
|
||||
.type = DEV_DH895XCC,
|
||||
.instances = 0
|
||||
};
|
||||
|
||||
static uint32_t get_accel_mask(uint32_t fuse)
|
||||
{
|
||||
return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
|
||||
ADF_DH895XCC_ACCELERATORS_MASK;
|
||||
}
|
||||
|
||||
static uint32_t get_ae_mask(uint32_t fuse)
|
||||
{
|
||||
return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
|
||||
}
|
||||
|
||||
static uint32_t get_num_accels(struct adf_hw_device_data *self)
|
||||
{
|
||||
uint32_t i, ctr = 0;
|
||||
|
||||
if (!self || !self->accel_mask)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
|
||||
if (self->accel_mask & (1 << i))
|
||||
ctr++;
|
||||
}
|
||||
return ctr;
|
||||
}
|
||||
|
||||
static uint32_t get_num_aes(struct adf_hw_device_data *self)
|
||||
{
|
||||
uint32_t i, ctr = 0;
|
||||
|
||||
if (!self || !self->ae_mask)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
|
||||
if (self->ae_mask & (1 << i))
|
||||
ctr++;
|
||||
}
|
||||
return ctr;
|
||||
}
|
||||
|
||||
static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
|
||||
{
|
||||
return ADF_DH895XCC_PMISC_BAR;
|
||||
}
|
||||
|
||||
static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
|
||||
{
|
||||
return ADF_DH895XCC_ETR_BAR;
|
||||
}
|
||||
|
||||
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
|
||||
{
|
||||
int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
|
||||
>> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
|
||||
|
||||
switch (sku) {
|
||||
case ADF_DH895XCC_FUSECTL_SKU_1:
|
||||
return DEV_SKU_1;
|
||||
case ADF_DH895XCC_FUSECTL_SKU_2:
|
||||
return DEV_SKU_2;
|
||||
case ADF_DH895XCC_FUSECTL_SKU_3:
|
||||
return DEV_SKU_3;
|
||||
case ADF_DH895XCC_FUSECTL_SKU_4:
|
||||
return DEV_SKU_4;
|
||||
default:
|
||||
return DEV_SKU_UNKNOWN;
|
||||
}
|
||||
return DEV_SKU_UNKNOWN;
|
||||
}
|
||||
|
||||
void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
|
||||
uint32_t const **arb_map_config)
|
||||
{
|
||||
switch (accel_dev->accel_pci_dev.sku) {
|
||||
case DEV_SKU_1:
|
||||
*arb_map_config = thrd_to_arb_map_sku4;
|
||||
break;
|
||||
|
||||
case DEV_SKU_2:
|
||||
case DEV_SKU_4:
|
||||
*arb_map_config = thrd_to_arb_map_sku6;
|
||||
break;
|
||||
default:
|
||||
pr_err("QAT: The configuration doesn't match any SKU");
|
||||
*arb_map_config = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
|
||||
void __iomem *csr = misc_bar->virt_addr;
|
||||
unsigned int val, i;
|
||||
|
||||
/* Enable Accel Engine error detection & correction */
|
||||
for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
|
||||
val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
|
||||
val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
|
||||
val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
|
||||
}
|
||||
|
||||
/* Enable shared memory error detection & correction */
|
||||
for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
|
||||
val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
|
||||
val |= ADF_DH895XCC_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
|
||||
val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
|
||||
val |= ADF_DH895XCC_ERRSSMSH_EN;
|
||||
ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
|
||||
}
|
||||
}
|
||||
|
||||
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
||||
{
|
||||
hw_data->dev_class = &dh895xcc_class;
|
||||
hw_data->instance_id = dh895xcc_class.instances++;
|
||||
hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
|
||||
hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
|
||||
hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
|
||||
hw_data->num_logical_accel = 1;
|
||||
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
|
||||
hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
|
||||
hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
|
||||
hw_data->alloc_irq = adf_isr_resource_alloc;
|
||||
hw_data->free_irq = adf_isr_resource_free;
|
||||
hw_data->enable_error_correction = adf_enable_error_correction;
|
||||
hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
|
||||
hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
|
||||
hw_data->get_accel_mask = get_accel_mask;
|
||||
hw_data->get_ae_mask = get_ae_mask;
|
||||
hw_data->get_num_accels = get_num_accels;
|
||||
hw_data->get_num_aes = get_num_aes;
|
||||
hw_data->get_etr_bar_id = get_etr_bar_id;
|
||||
hw_data->get_misc_bar_id = get_misc_bar_id;
|
||||
hw_data->get_sku = get_sku;
|
||||
hw_data->fw_name = ADF_DH895XCC_FW;
|
||||
}
|
||||
|
||||
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
|
||||
{
|
||||
hw_data->dev_class->instances--;
|
||||
}
|
86
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
Normal file
86
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_DH895x_HW_DATA_H_
|
||||
#define ADF_DH895x_HW_DATA_H_
|
||||
|
||||
/* PCIe configuration space */
|
||||
#define ADF_DH895XCC_RX_RINGS_OFFSET 8
|
||||
#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
|
||||
#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
|
||||
#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
|
||||
#define ADF_DH895XCC_MAX_ACCELERATORS 6
|
||||
#define ADF_DH895XCC_MAX_ACCELENGINES 12
|
||||
#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
|
||||
#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
|
||||
#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
|
||||
#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C
|
||||
#define ADF_DH895XCC_ETR_MAX_BANKS 32
|
||||
#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
|
||||
#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
|
||||
#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
|
||||
#define ADF_DH895XCC_SMIA1_MASK 0x1
|
||||
/* Error detection and correction */
|
||||
#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
|
||||
#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
|
||||
#define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28)
|
||||
#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12)
|
||||
#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
|
||||
#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
|
||||
#define ADF_DH895XCC_ERRSSMSH_EN (1 << 3)
|
||||
|
||||
/* Admin Messages Registers */
|
||||
#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
|
||||
#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
|
||||
#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
|
||||
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
|
||||
#define ADF_DH895XCC_FW "qat_895xcc.bin"
|
||||
#endif
|
449
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
Normal file
449
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
Normal file
@ -0,0 +1,449 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/io.h>
|
||||
#include <adf_accel_devices.h>
|
||||
#include <adf_common_drv.h>
|
||||
#include <adf_cfg.h>
|
||||
#include <adf_transport_access_macros.h>
|
||||
#include "adf_dh895xcc_hw_data.h"
|
||||
#include "adf_drv.h"
|
||||
|
||||
static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME;
|
||||
|
||||
#define ADF_SYSTEM_DEVICE(device_id) \
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
|
||||
|
||||
static const struct pci_device_id adf_pci_tbl[] = {
|
||||
ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
|
||||
{0,}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
|
||||
|
||||
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
|
||||
static void adf_remove(struct pci_dev *dev);
|
||||
|
||||
static struct pci_driver adf_driver = {
|
||||
.id_table = adf_pci_tbl,
|
||||
.name = adf_driver_name,
|
||||
.probe = adf_probe,
|
||||
.remove = adf_remove
|
||||
};
|
||||
|
||||
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
|
||||
int i;
|
||||
|
||||
adf_exit_admin_comms(accel_dev);
|
||||
adf_exit_arb(accel_dev);
|
||||
adf_cleanup_etr_data(accel_dev);
|
||||
|
||||
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
|
||||
|
||||
if (bar->virt_addr)
|
||||
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
|
||||
}
|
||||
|
||||
if (accel_dev->hw_device) {
|
||||
switch (accel_dev->hw_device->pci_dev_id) {
|
||||
case ADF_DH895XCC_PCI_DEVICE_ID:
|
||||
adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
kfree(accel_dev->hw_device);
|
||||
}
|
||||
adf_cfg_dev_remove(accel_dev);
|
||||
debugfs_remove(accel_dev->debugfs_dir);
|
||||
adf_devmgr_rm_dev(accel_dev);
|
||||
pci_release_regions(accel_pci_dev->pci_dev);
|
||||
pci_disable_device(accel_pci_dev->pci_dev);
|
||||
kfree(accel_dev);
|
||||
}
|
||||
|
||||
static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
|
||||
{
|
||||
unsigned int bus_per_cpu = 0;
|
||||
struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
|
||||
|
||||
if (!c->phys_proc_id)
|
||||
return 0;
|
||||
|
||||
bus_per_cpu = 256 / (c->phys_proc_id + 1);
|
||||
|
||||
if (bus_per_cpu != 0)
|
||||
return pdev->bus->number / bus_per_cpu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qat_dev_start(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
int cpus = num_online_cpus();
|
||||
int banks = GET_MAX_BANKS(accel_dev);
|
||||
int instances = min(cpus, banks);
|
||||
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
|
||||
int i;
|
||||
unsigned long val;
|
||||
|
||||
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
|
||||
goto err;
|
||||
if (adf_cfg_section_add(accel_dev, "Accelerator0"))
|
||||
goto err;
|
||||
for (i = 0; i < instances; i++) {
|
||||
val = i;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
|
||||
i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
|
||||
val = 128;
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
val = 512;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
val = 0;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
val = 2;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
val = 4;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
val = 8;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
val = 10;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
val = 12;
|
||||
snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
val = ADF_COALESCING_DEF_TIME;
|
||||
snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
|
||||
if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
|
||||
key, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
}
|
||||
|
||||
val = i;
|
||||
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
|
||||
ADF_NUM_CY, (void *)&val, ADF_DEC))
|
||||
goto err;
|
||||
|
||||
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
|
||||
return adf_dev_start(accel_dev);
|
||||
err:
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev;
|
||||
struct adf_accel_pci *accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data;
|
||||
void __iomem *pmisc_bar_addr = NULL;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
uint8_t node;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_DH895XCC_PCI_DEVICE_ID:
|
||||
break;
|
||||
default:
|
||||
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
node = adf_get_dev_node_id(pdev);
|
||||
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
|
||||
if (!accel_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
accel_dev->numa_node = node;
|
||||
INIT_LIST_HEAD(&accel_dev->crypto_list);
|
||||
|
||||
/* Add accel device to accel table.
|
||||
* This should be called before adf_cleanup_accel is called */
|
||||
if (adf_devmgr_add_dev(accel_dev)) {
|
||||
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
|
||||
kfree(accel_dev);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
accel_dev->owner = THIS_MODULE;
|
||||
/* Allocate and configure device configuration structure */
|
||||
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
|
||||
if (!hw_data) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
accel_dev->hw_device = hw_data;
|
||||
switch (ent->device) {
|
||||
case ADF_DH895XCC_PCI_DEVICE_ID:
|
||||
adf_init_hw_data_dh895xcc(accel_dev->hw_device);
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
accel_pci_dev = &accel_dev->accel_pci_dev;
|
||||
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
|
||||
pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
|
||||
&hw_data->fuses);
|
||||
|
||||
/* Get Accelerators and Accelerators Engines masks */
|
||||
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
|
||||
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
|
||||
accel_pci_dev->sku = hw_data->get_sku(hw_data);
|
||||
accel_pci_dev->pci_dev = pdev;
|
||||
/* If the device has no acceleration engines then ignore it. */
|
||||
if (!hw_data->accel_mask || !hw_data->ae_mask ||
|
||||
((~hw_data->ae_mask) & 0x01)) {
|
||||
dev_err(&pdev->dev, "No acceleration units found");
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Create dev top level debugfs entry */
|
||||
snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
|
||||
hw_data->dev_class->name, hw_data->instance_id);
|
||||
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
|
||||
if (!accel_dev->debugfs_dir) {
|
||||
dev_err(&pdev->dev, "Could not create debugfs dir\n");
|
||||
ret = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Create device configuration table */
|
||||
ret = adf_cfg_dev_add(accel_dev);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
/* enable PCI device */
|
||||
if (pci_enable_device(pdev)) {
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* set dma identifier */
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
|
||||
dev_err(&pdev->dev, "No usable DMA configuration\n");
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
} else {
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
}
|
||||
|
||||
} else {
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
}
|
||||
|
||||
if (pci_request_regions(pdev, adf_driver_name)) {
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Read accelerator capabilities mask */
|
||||
pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
|
||||
&hw_data->accel_capabilities_mask);
|
||||
|
||||
/* Find and map all the device's BARS */
|
||||
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
|
||||
|
||||
bar_nr = i * 2;
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
if (!bar->base_addr)
|
||||
break;
|
||||
bar->size = pci_resource_len(pdev, bar_nr);
|
||||
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
|
||||
if (!bar->virt_addr) {
|
||||
dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
if (i == ADF_DH895XCC_PMISC_BAR)
|
||||
pmisc_bar_addr = bar->virt_addr;
|
||||
}
|
||||
pci_set_master(pdev);
|
||||
|
||||
if (adf_enable_aer(accel_dev, &adf_driver)) {
|
||||
dev_err(&pdev->dev, "Failed to enable aer\n");
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (adf_init_etr_data(accel_dev)) {
|
||||
dev_err(&pdev->dev, "Failed initialize etr\n");
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (adf_init_admin_comms(accel_dev)) {
|
||||
dev_err(&pdev->dev, "Failed initialize admin comms\n");
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (adf_init_arb(accel_dev)) {
|
||||
dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
|
||||
ret = -EFAULT;
|
||||
goto out_err;
|
||||
}
|
||||
if (pci_save_state(pdev)) {
|
||||
dev_err(&pdev->dev, "Failed to save pci state\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Enable bundle and misc interrupts */
|
||||
ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
|
||||
ADF_DH895XCC_SMIA0_MASK);
|
||||
ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
|
||||
ADF_DH895XCC_SMIA1_MASK);
|
||||
|
||||
ret = qat_dev_start(accel_dev);
|
||||
if (ret) {
|
||||
adf_dev_stop(accel_dev);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
adf_cleanup_accel(accel_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit adf_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
|
||||
|
||||
if (!accel_dev) {
|
||||
pr_err("QAT: Driver removal failed\n");
|
||||
return;
|
||||
}
|
||||
if (adf_dev_stop(accel_dev))
|
||||
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
|
||||
adf_disable_aer(accel_dev);
|
||||
adf_cleanup_accel(accel_dev);
|
||||
}
|
||||
|
||||
static int __init adfdrv_init(void)
|
||||
{
|
||||
request_module("intel_qat");
|
||||
if (qat_admin_register())
|
||||
return -EFAULT;
|
||||
|
||||
if (pci_register_driver(&adf_driver)) {
|
||||
pr_err("QAT: Driver initialization failed\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit adfdrv_release(void)
|
||||
{
|
||||
pci_unregister_driver(&adf_driver);
|
||||
qat_admin_unregister();
|
||||
}
|
||||
|
||||
module_init(adfdrv_init);
|
||||
module_exit(adfdrv_release);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Intel");
|
||||
MODULE_FIRMWARE("qat_895xcc.bin");
|
||||
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
|
67
drivers/crypto/qat/qat_dh895xcc/adf_drv.h
Normal file
67
drivers/crypto/qat/qat_dh895xcc/adf_drv.h
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef ADF_DH895x_DRV_H_
|
||||
#define ADF_DH895x_DRV_H_
|
||||
#include <adf_accel_devices.h>
|
||||
#include <adf_transport.h>
|
||||
|
||||
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
|
||||
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
|
||||
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
|
||||
void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
|
||||
void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
|
||||
void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
|
||||
uint32_t const **arb_map_config);
|
||||
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
|
||||
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
|
||||
int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
|
||||
uint32_t ae, void *in, void *out);
|
||||
int qat_admin_register(void);
|
||||
int qat_admin_unregister(void);
|
||||
int adf_init_arb(struct adf_accel_dev *accel_dev);
|
||||
void adf_exit_arb(struct adf_accel_dev *accel_dev);
|
||||
#endif
|
159
drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
Normal file
159
drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
Normal file
@ -0,0 +1,159 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <adf_accel_devices.h>
|
||||
#include <adf_transport_internal.h>
|
||||
#include "adf_drv.h"
|
||||
|
||||
#define ADF_ARB_NUM 4
|
||||
#define ADF_ARB_REQ_RING_NUM 8
|
||||
#define ADF_ARB_REG_SIZE 0x4
|
||||
#define ADF_ARB_WTR_SIZE 0x20
|
||||
#define ADF_ARB_OFFSET 0x30000
|
||||
#define ADF_ARB_REG_SLOT 0x1000
|
||||
#define ADF_ARB_WTR_OFFSET 0x010
|
||||
#define ADF_ARB_RO_EN_OFFSET 0x090
|
||||
#define ADF_ARB_WQCFG_OFFSET 0x100
|
||||
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
|
||||
#define ADF_ARB_WRK_2_SER_MAP 10
|
||||
#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
|
||||
|
||||
#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
|
||||
ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
|
||||
(ADF_ARB_REG_SLOT * index), value)
|
||||
|
||||
#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
|
||||
ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
|
||||
ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
|
||||
|
||||
#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
|
||||
ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
|
||||
ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
|
||||
(ADF_ARB_REG_SIZE * index), value)
|
||||
|
||||
#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
|
||||
ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
|
||||
(ADF_ARB_REG_SIZE * index), value)
|
||||
|
||||
#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
|
||||
ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
|
||||
ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
|
||||
(ADF_ARB_REG_SIZE * index), value)
|
||||
|
||||
#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
|
||||
ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
|
||||
ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
|
||||
|
||||
int adf_init_arb(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
|
||||
uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
|
||||
uint32_t arb, i;
|
||||
const uint32_t *thd_2_arb_cfg;
|
||||
|
||||
/* Service arb configured for 32 bytes responses and
|
||||
* ring flow control check enabled. */
|
||||
for (arb = 0; arb < ADF_ARB_NUM; arb++)
|
||||
WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
|
||||
|
||||
/* Setup service weighting */
|
||||
for (arb = 0; arb < ADF_ARB_NUM; arb++)
|
||||
for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
|
||||
WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
|
||||
|
||||
/* Setup ring response ordering */
|
||||
for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
|
||||
WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
|
||||
|
||||
/* Setup worker queue registers */
|
||||
for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
|
||||
WRITE_CSR_ARB_WQCFG(csr, i, i);
|
||||
|
||||
/* Map worker threads to service arbiters */
|
||||
adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
|
||||
|
||||
if (!thd_2_arb_cfg)
|
||||
return -EFAULT;
|
||||
|
||||
for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
|
||||
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
|
||||
{
|
||||
WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
|
||||
ring->bank->bank_number,
|
||||
ring->bank->ring_mask & 0xFF);
|
||||
}
|
||||
|
||||
void adf_exit_arb(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
void __iomem *csr;
|
||||
unsigned int i;
|
||||
|
||||
if (!accel_dev->transport)
|
||||
return;
|
||||
|
||||
csr = accel_dev->transport->banks[0].csr_addr;
|
||||
|
||||
/* Reset arbiter configuration */
|
||||
for (i = 0; i < ADF_ARB_NUM; i++)
|
||||
WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
|
||||
|
||||
/* Shutdown work queue */
|
||||
for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
|
||||
WRITE_CSR_ARB_WQCFG(csr, i, 0);
|
||||
|
||||
/* Unmap worker threads to service arbiters */
|
||||
for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
|
||||
WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
|
||||
|
||||
/* Disable arbitration on all rings */
|
||||
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
|
||||
WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
|
||||
}
|
266
drivers/crypto/qat/qat_dh895xcc/adf_isr.c
Normal file
266
drivers/crypto/qat/qat_dh895xcc/adf_isr.c
Normal file
@ -0,0 +1,266 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <adf_accel_devices.h>
|
||||
#include <adf_common_drv.h>
|
||||
#include <adf_cfg.h>
|
||||
#include <adf_cfg_strings.h>
|
||||
#include <adf_cfg_common.h>
|
||||
#include <adf_transport_access_macros.h>
|
||||
#include <adf_transport_internal.h>
|
||||
#include "adf_drv.h"
|
||||
|
||||
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
uint32_t msix_num_entries = hw_data->num_banks + 1;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < msix_num_entries; i++)
|
||||
pci_dev_info->msix_entries.entries[i].entry = i;
|
||||
|
||||
if (pci_enable_msix(pci_dev_info->pci_dev,
|
||||
pci_dev_info->msix_entries.entries,
|
||||
msix_num_entries)) {
|
||||
pr_err("QAT: Failed to enable MSIX IRQ\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
|
||||
{
|
||||
pci_disable_msix(pci_dev_info->pci_dev);
|
||||
}
|
||||
|
||||
static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
|
||||
{
|
||||
struct adf_etr_bank_data *bank = bank_ptr;
|
||||
|
||||
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
|
||||
tasklet_hi_schedule(&bank->resp_hanlder);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
|
||||
{
|
||||
struct adf_accel_dev *accel_dev = dev_ptr;
|
||||
|
||||
pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
|
||||
struct adf_etr_data *etr_data = accel_dev->transport;
|
||||
int ret, i;
|
||||
char *name;
|
||||
|
||||
/* Request msix irq for all banks */
|
||||
for (i = 0; i < hw_data->num_banks; i++) {
|
||||
struct adf_etr_bank_data *bank = &etr_data->banks[i];
|
||||
unsigned int cpu, cpus = num_online_cpus();
|
||||
|
||||
name = *(pci_dev_info->msix_entries.names + i);
|
||||
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
|
||||
"qat%d-bundle%d", accel_dev->accel_id, i);
|
||||
ret = request_irq(msixe[i].vector,
|
||||
adf_msix_isr_bundle, 0, name, bank);
|
||||
if (ret) {
|
||||
pr_err("QAT: failed to enable irq %d for %s\n",
|
||||
msixe[i].vector, name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
|
||||
irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
|
||||
}
|
||||
|
||||
/* Request msix irq for AE */
|
||||
name = *(pci_dev_info->msix_entries.names + i);
|
||||
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
|
||||
"qat%d-ae-cluster", accel_dev->accel_id);
|
||||
ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
|
||||
if (ret) {
|
||||
pr_err("QAT: failed to enable irq %d, for %s\n",
|
||||
msixe[i].vector, name);
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void adf_free_irqs(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
|
||||
struct adf_etr_data *etr_data = accel_dev->transport;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hw_data->num_banks; i++) {
|
||||
irq_set_affinity_hint(msixe[i].vector, NULL);
|
||||
free_irq(msixe[i].vector, &etr_data->banks[i]);
|
||||
}
|
||||
irq_set_affinity_hint(msixe[i].vector, NULL);
|
||||
free_irq(msixe[i].vector, accel_dev);
|
||||
}
|
||||
|
||||
static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
int i;
|
||||
char **names;
|
||||
struct msix_entry *entries;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
uint32_t msix_num_entries = hw_data->num_banks + 1;
|
||||
|
||||
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
|
||||
GFP_KERNEL, accel_dev->numa_node);
|
||||
if (!entries)
|
||||
return -ENOMEM;
|
||||
|
||||
names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
|
||||
if (!names) {
|
||||
kfree(entries);
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < msix_num_entries; i++) {
|
||||
*(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
|
||||
if (!(*(names + i)))
|
||||
goto err;
|
||||
}
|
||||
accel_dev->accel_pci_dev.msix_entries.entries = entries;
|
||||
accel_dev->accel_pci_dev.msix_entries.names = names;
|
||||
return 0;
|
||||
err:
|
||||
for (i = 0; i < msix_num_entries; i++) {
|
||||
if (*(names + i))
|
||||
kfree(*(names + i));
|
||||
}
|
||||
kfree(entries);
|
||||
kfree(names);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
uint32_t msix_num_entries = hw_data->num_banks + 1;
|
||||
char **names = accel_dev->accel_pci_dev.msix_entries.names;
|
||||
int i;
|
||||
|
||||
kfree(accel_dev->accel_pci_dev.msix_entries.entries);
|
||||
for (i = 0; i < msix_num_entries; i++) {
|
||||
if (*(names + i))
|
||||
kfree(*(names + i));
|
||||
}
|
||||
kfree(names);
|
||||
}
|
||||
|
||||
static int adf_setup_bh(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_etr_data *priv_data = accel_dev->transport;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hw_data->num_banks; i++)
|
||||
tasklet_init(&priv_data->banks[i].resp_hanlder,
|
||||
adf_response_handler,
|
||||
(unsigned long)&priv_data->banks[i]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
struct adf_etr_data *priv_data = accel_dev->transport;
|
||||
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hw_data->num_banks; i++) {
|
||||
tasklet_disable(&priv_data->banks[i].resp_hanlder);
|
||||
tasklet_kill(&priv_data->banks[i].resp_hanlder);
|
||||
}
|
||||
}
|
||||
|
||||
void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
adf_free_irqs(accel_dev);
|
||||
adf_cleanup_bh(accel_dev);
|
||||
adf_disable_msix(&accel_dev->accel_pci_dev);
|
||||
adf_isr_free_msix_entry_table(accel_dev);
|
||||
}
|
||||
|
||||
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = adf_isr_alloc_msix_entry_table(accel_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (adf_enable_msix(accel_dev))
|
||||
goto err_out;
|
||||
|
||||
if (adf_setup_bh(accel_dev))
|
||||
goto err_out;
|
||||
|
||||
if (adf_request_irqs(accel_dev))
|
||||
goto err_out;
|
||||
|
||||
return 0;
|
||||
err_out:
|
||||
adf_isr_resource_free(accel_dev);
|
||||
return -EFAULT;
|
||||
}
|
107
drivers/crypto/qat/qat_dh895xcc/qat_admin.c
Normal file
107
drivers/crypto/qat/qat_dh895xcc/qat_admin.c
Normal file
@ -0,0 +1,107 @@
|
||||
/*
|
||||
This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
redistributing this file, you may do so under either license.
|
||||
|
||||
GPL LICENSE SUMMARY
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of version 2 of the GNU General Public License as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
Contact Information:
|
||||
qat-linux@intel.com
|
||||
|
||||
BSD LICENSE
|
||||
Copyright(c) 2014 Intel Corporation.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Intel Corporation nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <icp_qat_fw_init_admin.h>
|
||||
#include <adf_accel_devices.h>
|
||||
#include <adf_common_drv.h>
|
||||
#include "adf_drv.h"
|
||||
|
||||
static struct service_hndl qat_admin;
|
||||
|
||||
static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
|
||||
{
|
||||
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
|
||||
struct icp_qat_fw_init_admin_req req;
|
||||
struct icp_qat_fw_init_admin_resp resp;
|
||||
int i;
|
||||
|
||||
memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
|
||||
req.init_admin_cmd_id = cmd;
|
||||
for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
|
||||
memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
|
||||
if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
|
||||
resp.init_resp_hdr.status)
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qat_admin_start(struct adf_accel_dev *accel_dev)
|
||||
{
|
||||
return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
|
||||
}
|
||||
|
||||
static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
|
||||
enum adf_event event)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (event) {
|
||||
case ADF_EVENT_START:
|
||||
ret = qat_admin_start(accel_dev);
|
||||
break;
|
||||
case ADF_EVENT_STOP:
|
||||
case ADF_EVENT_INIT:
|
||||
case ADF_EVENT_SHUTDOWN:
|
||||
default:
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qat_admin_register(void)
|
||||
{
|
||||
memset(&qat_admin, 0, sizeof(struct service_hndl));
|
||||
qat_admin.event_hld = qat_admin_event_handler;
|
||||
qat_admin.name = "qat_admin";
|
||||
qat_admin.admin = 1;
|
||||
return adf_service_register(&qat_admin);
|
||||
}
|
||||
|
||||
int qat_admin_unregister(void)
|
||||
{
|
||||
return adf_service_unregister(&qat_admin);
|
||||
}
|
6
drivers/crypto/qce/Makefile
Normal file
6
drivers/crypto/qce/Makefile
Normal file
@ -0,0 +1,6 @@
|
||||
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
|
||||
qcrypto-objs := core.o \
|
||||
common.o \
|
||||
dma.o \
|
||||
sha.o \
|
||||
ablkcipher.o
|
431
drivers/crypto/qce/ablkcipher.c
Normal file
431
drivers/crypto/qce/ablkcipher.c
Normal file
@ -0,0 +1,431 @@
|
||||
/*
|
||||
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/des.h>
|
||||
|
||||
#include "cipher.h"
|
||||
|
||||
static LIST_HEAD(ablkcipher_algs);
|
||||
|
||||
static void qce_ablkcipher_done(void *data)
|
||||
{
|
||||
struct crypto_async_request *async_req = data;
|
||||
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
|
||||
struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
|
||||
struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
enum dma_data_direction dir_src, dir_dst;
|
||||
u32 status;
|
||||
int error;
|
||||
bool diff_dst;
|
||||
|
||||
diff_dst = (req->src != req->dst) ? true : false;
|
||||
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
|
||||
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
|
||||
|
||||
error = qce_dma_terminate_all(&qce->dma);
|
||||
if (error)
|
||||
dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
|
||||
error);
|
||||
|
||||
if (diff_dst)
|
||||
qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
|
||||
rctx->dst_chained);
|
||||
qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
|
||||
rctx->dst_chained);
|
||||
|
||||
sg_free_table(&rctx->dst_tbl);
|
||||
|
||||
error = qce_check_status(qce, &status);
|
||||
if (error < 0)
|
||||
dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
|
||||
|
||||
qce->async_req_done(tmpl->qce, error);
|
||||
}
|
||||
|
||||
static int
|
||||
qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
|
||||
{
|
||||
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
|
||||
struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
enum dma_data_direction dir_src, dir_dst;
|
||||
struct scatterlist *sg;
|
||||
bool diff_dst;
|
||||
gfp_t gfp;
|
||||
int ret;
|
||||
|
||||
rctx->iv = req->info;
|
||||
rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
|
||||
rctx->cryptlen = req->nbytes;
|
||||
|
||||
diff_dst = (req->src != req->dst) ? true : false;
|
||||
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
|
||||
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
|
||||
|
||||
rctx->src_nents = qce_countsg(req->src, req->nbytes,
|
||||
&rctx->src_chained);
|
||||
if (diff_dst) {
|
||||
rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
|
||||
&rctx->dst_chained);
|
||||
} else {
|
||||
rctx->dst_nents = rctx->src_nents;
|
||||
rctx->dst_chained = rctx->src_chained;
|
||||
}
|
||||
|
||||
rctx->dst_nents += 1;
|
||||
|
||||
gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
|
||||
ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
|
||||
|
||||
sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
|
||||
if (IS_ERR(sg)) {
|
||||
ret = PTR_ERR(sg);
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
|
||||
if (IS_ERR(sg)) {
|
||||
ret = PTR_ERR(sg);
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
sg_mark_end(sg);
|
||||
rctx->dst_sg = rctx->dst_tbl.sgl;
|
||||
|
||||
ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
|
||||
rctx->dst_chained);
|
||||
if (ret < 0)
|
||||
goto error_free;
|
||||
|
||||
if (diff_dst) {
|
||||
ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
|
||||
rctx->src_chained);
|
||||
if (ret < 0)
|
||||
goto error_unmap_dst;
|
||||
rctx->src_sg = req->src;
|
||||
} else {
|
||||
rctx->src_sg = rctx->dst_sg;
|
||||
}
|
||||
|
||||
ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
|
||||
rctx->dst_sg, rctx->dst_nents,
|
||||
qce_ablkcipher_done, async_req);
|
||||
if (ret)
|
||||
goto error_unmap_src;
|
||||
|
||||
qce_dma_issue_pending(&qce->dma);
|
||||
|
||||
ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
|
||||
if (ret)
|
||||
goto error_terminate;
|
||||
|
||||
return 0;
|
||||
|
||||
error_terminate:
|
||||
qce_dma_terminate_all(&qce->dma);
|
||||
error_unmap_src:
|
||||
if (diff_dst)
|
||||
qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
|
||||
rctx->src_chained);
|
||||
error_unmap_dst:
|
||||
qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
|
||||
rctx->dst_chained);
|
||||
error_free:
|
||||
sg_free_table(&rctx->dst_tbl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
|
||||
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
|
||||
int ret;
|
||||
|
||||
if (!key || !keylen)
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_AES(flags)) {
|
||||
switch (keylen) {
|
||||
case AES_KEYSIZE_128:
|
||||
case AES_KEYSIZE_256:
|
||||
break;
|
||||
default:
|
||||
goto fallback;
|
||||
}
|
||||
} else if (IS_DES(flags)) {
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
ret = des_ekey(tmp, key);
|
||||
if (!ret && crypto_ablkcipher_get_flags(ablk) &
|
||||
CRYPTO_TFM_REQ_WEAK_KEY)
|
||||
goto weakkey;
|
||||
}
|
||||
|
||||
ctx->enc_keylen = keylen;
|
||||
memcpy(ctx->enc_key, key, keylen);
|
||||
return 0;
|
||||
fallback:
|
||||
ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
|
||||
if (!ret)
|
||||
ctx->enc_keylen = keylen;
|
||||
return ret;
|
||||
weakkey:
|
||||
crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
|
||||
{
|
||||
struct crypto_tfm *tfm =
|
||||
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
|
||||
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
|
||||
struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
|
||||
int ret;
|
||||
|
||||
rctx->flags = tmpl->alg_flags;
|
||||
rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
|
||||
|
||||
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
|
||||
ctx->enc_keylen != AES_KEYSIZE_256) {
|
||||
ablkcipher_request_set_tfm(req, ctx->fallback);
|
||||
ret = encrypt ? crypto_ablkcipher_encrypt(req) :
|
||||
crypto_ablkcipher_decrypt(req);
|
||||
ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
|
||||
return ret;
|
||||
}
|
||||
|
||||
return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
|
||||
}
|
||||
|
||||
static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
return qce_ablkcipher_crypt(req, 1);
|
||||
}
|
||||
|
||||
static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
|
||||
{
|
||||
return qce_ablkcipher_crypt(req, 0);
|
||||
}
|
||||
|
||||
static int qce_ablkcipher_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
|
||||
|
||||
ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
|
||||
CRYPTO_ALG_TYPE_ABLKCIPHER,
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(ctx->fallback))
|
||||
return PTR_ERR(ctx->fallback);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_free_ablkcipher(ctx->fallback);
|
||||
}
|
||||
|
||||
struct qce_ablkcipher_def {
|
||||
unsigned long flags;
|
||||
const char *name;
|
||||
const char *drv_name;
|
||||
unsigned int blocksize;
|
||||
unsigned int ivsize;
|
||||
unsigned int min_keysize;
|
||||
unsigned int max_keysize;
|
||||
};
|
||||
|
||||
static const struct qce_ablkcipher_def ablkcipher_def[] = {
|
||||
{
|
||||
.flags = QCE_ALG_AES | QCE_MODE_ECB,
|
||||
.name = "ecb(aes)",
|
||||
.drv_name = "ecb-aes-qce",
|
||||
.blocksize = AES_BLOCK_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
},
|
||||
{
|
||||
.flags = QCE_ALG_AES | QCE_MODE_CBC,
|
||||
.name = "cbc(aes)",
|
||||
.drv_name = "cbc-aes-qce",
|
||||
.blocksize = AES_BLOCK_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
},
|
||||
{
|
||||
.flags = QCE_ALG_AES | QCE_MODE_CTR,
|
||||
.name = "ctr(aes)",
|
||||
.drv_name = "ctr-aes-qce",
|
||||
.blocksize = AES_BLOCK_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
},
|
||||
{
|
||||
.flags = QCE_ALG_AES | QCE_MODE_XTS,
|
||||
.name = "xts(aes)",
|
||||
.drv_name = "xts-aes-qce",
|
||||
.blocksize = AES_BLOCK_SIZE,
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
.min_keysize = AES_MIN_KEY_SIZE,
|
||||
.max_keysize = AES_MAX_KEY_SIZE,
|
||||
},
|
||||
{
|
||||
.flags = QCE_ALG_DES | QCE_MODE_ECB,
|
||||
.name = "ecb(des)",
|
||||
.drv_name = "ecb-des-qce",
|
||||
.blocksize = DES_BLOCK_SIZE,
|
||||
.ivsize = 0,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
},
|
||||
{
|
||||
.flags = QCE_ALG_DES | QCE_MODE_CBC,
|
||||
.name = "cbc(des)",
|
||||
.drv_name = "cbc-des-qce",
|
||||
.blocksize = DES_BLOCK_SIZE,
|
||||
.ivsize = DES_BLOCK_SIZE,
|
||||
.min_keysize = DES_KEY_SIZE,
|
||||
.max_keysize = DES_KEY_SIZE,
|
||||
},
|
||||
{
|
||||
.flags = QCE_ALG_3DES | QCE_MODE_ECB,
|
||||
.name = "ecb(des3_ede)",
|
||||
.drv_name = "ecb-3des-qce",
|
||||
.blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.ivsize = 0,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
},
|
||||
{
|
||||
.flags = QCE_ALG_3DES | QCE_MODE_CBC,
|
||||
.name = "cbc(des3_ede)",
|
||||
.drv_name = "cbc-3des-qce",
|
||||
.blocksize = DES3_EDE_BLOCK_SIZE,
|
||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||
},
|
||||
};
|
||||
|
||||
static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
|
||||
struct qce_device *qce)
|
||||
{
|
||||
struct qce_alg_template *tmpl;
|
||||
struct crypto_alg *alg;
|
||||
int ret;
|
||||
|
||||
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
|
||||
if (!tmpl)
|
||||
return -ENOMEM;
|
||||
|
||||
alg = &tmpl->alg.crypto;
|
||||
|
||||
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
|
||||
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
def->drv_name);
|
||||
|
||||
alg->cra_blocksize = def->blocksize;
|
||||
alg->cra_ablkcipher.ivsize = def->ivsize;
|
||||
alg->cra_ablkcipher.min_keysize = def->min_keysize;
|
||||
alg->cra_ablkcipher.max_keysize = def->max_keysize;
|
||||
alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
|
||||
alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
|
||||
alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
|
||||
|
||||
alg->cra_priority = 300;
|
||||
alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK;
|
||||
alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
|
||||
alg->cra_alignmask = 0;
|
||||
alg->cra_type = &crypto_ablkcipher_type;
|
||||
alg->cra_module = THIS_MODULE;
|
||||
alg->cra_init = qce_ablkcipher_init;
|
||||
alg->cra_exit = qce_ablkcipher_exit;
|
||||
INIT_LIST_HEAD(&alg->cra_list);
|
||||
|
||||
INIT_LIST_HEAD(&tmpl->entry);
|
||||
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
|
||||
tmpl->alg_flags = def->flags;
|
||||
tmpl->qce = qce;
|
||||
|
||||
ret = crypto_register_alg(alg);
|
||||
if (ret) {
|
||||
kfree(tmpl);
|
||||
dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_add_tail(&tmpl->entry, &ablkcipher_algs);
|
||||
dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qce_ablkcipher_unregister(struct qce_device *qce)
|
||||
{
|
||||
struct qce_alg_template *tmpl, *n;
|
||||
|
||||
list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
|
||||
crypto_unregister_alg(&tmpl->alg.crypto);
|
||||
list_del(&tmpl->entry);
|
||||
kfree(tmpl);
|
||||
}
|
||||
}
|
||||
|
||||
static int qce_ablkcipher_register(struct qce_device *qce)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
|
||||
ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
qce_ablkcipher_unregister(qce);
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct qce_algo_ops ablkcipher_ops = {
|
||||
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
|
||||
.register_algs = qce_ablkcipher_register,
|
||||
.unregister_algs = qce_ablkcipher_unregister,
|
||||
.async_req_handle = qce_ablkcipher_async_req_handle,
|
||||
};
|
68
drivers/crypto/qce/cipher.h
Normal file
68
drivers/crypto/qce/cipher.h
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _CIPHER_H_
|
||||
#define _CIPHER_H_
|
||||
|
||||
#include "common.h"
|
||||
#include "core.h"
|
||||
|
||||
#define QCE_MAX_KEY_SIZE 64
|
||||
|
||||
struct qce_cipher_ctx {
|
||||
u8 enc_key[QCE_MAX_KEY_SIZE];
|
||||
unsigned int enc_keylen;
|
||||
struct crypto_ablkcipher *fallback;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qce_cipher_reqctx - holds private cipher objects per request
|
||||
* @flags: operation flags
|
||||
* @iv: pointer to the IV
|
||||
* @ivsize: IV size
|
||||
* @src_nents: source entries
|
||||
* @dst_nents: destination entries
|
||||
* @src_chained: is source chained
|
||||
* @dst_chained: is destination chained
|
||||
* @result_sg: scatterlist used for result buffer
|
||||
* @dst_tbl: destination sg table
|
||||
* @dst_sg: destination sg pointer table beginning
|
||||
* @src_tbl: source sg table
|
||||
* @src_sg: source sg pointer table beginning;
|
||||
* @cryptlen: crypto length
|
||||
*/
|
||||
struct qce_cipher_reqctx {
|
||||
unsigned long flags;
|
||||
u8 *iv;
|
||||
unsigned int ivsize;
|
||||
int src_nents;
|
||||
int dst_nents;
|
||||
bool src_chained;
|
||||
bool dst_chained;
|
||||
struct scatterlist result_sg;
|
||||
struct sg_table dst_tbl;
|
||||
struct scatterlist *dst_sg;
|
||||
struct sg_table src_tbl;
|
||||
struct scatterlist *src_sg;
|
||||
unsigned int cryptlen;
|
||||
};
|
||||
|
||||
static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
return container_of(alg, struct qce_alg_template, alg.crypto);
|
||||
}
|
||||
|
||||
extern const struct qce_algo_ops ablkcipher_ops;
|
||||
|
||||
#endif /* _CIPHER_H_ */
|
438
drivers/crypto/qce/common.c
Normal file
438
drivers/crypto/qce/common.c
Normal file
@ -0,0 +1,438 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/sha.h>
|
||||
|
||||
#include "cipher.h"
|
||||
#include "common.h"
|
||||
#include "core.h"
|
||||
#include "regs-v5.h"
|
||||
#include "sha.h"
|
||||
|
||||
#define QCE_SECTOR_SIZE 512
|
||||
|
||||
static inline u32 qce_read(struct qce_device *qce, u32 offset)
|
||||
{
|
||||
return readl(qce->base + offset);
|
||||
}
|
||||
|
||||
static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
|
||||
{
|
||||
writel(val, qce->base + offset);
|
||||
}
|
||||
|
||||
static inline void qce_write_array(struct qce_device *qce, u32 offset,
|
||||
const u32 *val, unsigned int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
qce_write(qce, offset + i * sizeof(u32), val[i]);
|
||||
}
|
||||
|
||||
static inline void
|
||||
qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
qce_write(qce, offset + i * sizeof(u32), 0);
|
||||
}
|
||||
|
||||
static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
|
||||
{
|
||||
u32 cfg = 0;
|
||||
|
||||
if (IS_AES(flags)) {
|
||||
if (aes_key_size == AES_KEYSIZE_128)
|
||||
cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
|
||||
else if (aes_key_size == AES_KEYSIZE_256)
|
||||
cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
|
||||
}
|
||||
|
||||
if (IS_AES(flags))
|
||||
cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
|
||||
else if (IS_DES(flags) || IS_3DES(flags))
|
||||
cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
|
||||
|
||||
if (IS_DES(flags))
|
||||
cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
|
||||
|
||||
if (IS_3DES(flags))
|
||||
cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
|
||||
|
||||
switch (flags & QCE_MODE_MASK) {
|
||||
case QCE_MODE_ECB:
|
||||
cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
|
||||
break;
|
||||
case QCE_MODE_CBC:
|
||||
cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
|
||||
break;
|
||||
case QCE_MODE_CTR:
|
||||
cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
|
||||
break;
|
||||
case QCE_MODE_XTS:
|
||||
cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
|
||||
break;
|
||||
case QCE_MODE_CCM:
|
||||
cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
|
||||
cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
|
||||
break;
|
||||
default:
|
||||
return ~0;
|
||||
}
|
||||
|
||||
return cfg;
|
||||
}
|
||||
|
||||
static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
|
||||
{
|
||||
u32 cfg = 0;
|
||||
|
||||
if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
|
||||
cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
|
||||
else
|
||||
cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
|
||||
|
||||
if (IS_CCM(flags) || IS_CMAC(flags)) {
|
||||
if (key_size == AES_KEYSIZE_128)
|
||||
cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
|
||||
else if (key_size == AES_KEYSIZE_256)
|
||||
cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
|
||||
}
|
||||
|
||||
if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
|
||||
cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
|
||||
else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
|
||||
cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
|
||||
else if (IS_CMAC(flags))
|
||||
cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
|
||||
|
||||
if (IS_SHA1(flags) || IS_SHA256(flags))
|
||||
cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
|
||||
else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
|
||||
IS_CBC(flags) || IS_CTR(flags))
|
||||
cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
|
||||
else if (IS_AES(flags) && IS_CCM(flags))
|
||||
cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
|
||||
else if (IS_AES(flags) && IS_CMAC(flags))
|
||||
cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
|
||||
|
||||
if (IS_SHA(flags) || IS_SHA_HMAC(flags))
|
||||
cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
|
||||
|
||||
if (IS_CCM(flags))
|
||||
cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
|
||||
|
||||
if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
|
||||
IS_CMAC(flags))
|
||||
cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
|
||||
|
||||
return cfg;
|
||||
}
|
||||
|
||||
static u32 qce_config_reg(struct qce_device *qce, int little)
|
||||
{
|
||||
u32 beats = (qce->burst_size >> 3) - 1;
|
||||
u32 pipe_pair = qce->pipe_pair_id;
|
||||
u32 config;
|
||||
|
||||
config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
|
||||
config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
|
||||
BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
|
||||
config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
|
||||
config &= ~HIGH_SPD_EN_N_SHIFT;
|
||||
|
||||
if (little)
|
||||
config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
|
||||
{
|
||||
__be32 *d = dst;
|
||||
const u8 *s = src;
|
||||
unsigned int n;
|
||||
|
||||
n = len / sizeof(u32);
|
||||
for (; n > 0; n--) {
|
||||
*d = cpu_to_be32p((const __u32 *) s);
|
||||
s += sizeof(__u32);
|
||||
d++;
|
||||
}
|
||||
}
|
||||
|
||||
static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
|
||||
{
|
||||
u8 swap[QCE_AES_IV_LENGTH];
|
||||
u32 i, j;
|
||||
|
||||
if (ivsize > QCE_AES_IV_LENGTH)
|
||||
return;
|
||||
|
||||
memset(swap, 0, QCE_AES_IV_LENGTH);
|
||||
|
||||
for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
|
||||
i < QCE_AES_IV_LENGTH; i++, j--)
|
||||
swap[i] = src[j];
|
||||
|
||||
qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
|
||||
}
|
||||
|
||||
static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
|
||||
unsigned int enckeylen, unsigned int cryptlen)
|
||||
{
|
||||
u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
|
||||
unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
|
||||
unsigned int xtsdusize;
|
||||
|
||||
qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
|
||||
enckeylen / 2);
|
||||
qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
|
||||
|
||||
/* xts du size 512B */
|
||||
xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
|
||||
qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
|
||||
}
|
||||
|
||||
static void qce_setup_config(struct qce_device *qce)
|
||||
{
|
||||
u32 config;
|
||||
|
||||
/* get big endianness */
|
||||
config = qce_config_reg(qce, 0);
|
||||
|
||||
/* clear status */
|
||||
qce_write(qce, REG_STATUS, 0);
|
||||
qce_write(qce, REG_CONFIG, config);
|
||||
}
|
||||
|
||||
static inline void qce_crypto_go(struct qce_device *qce)
|
||||
{
|
||||
qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
|
||||
}
|
||||
|
||||
static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
|
||||
u32 totallen, u32 offset)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(async_req);
|
||||
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
unsigned int digestsize = crypto_ahash_digestsize(ahash);
|
||||
unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
|
||||
__be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
|
||||
__be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
|
||||
u32 auth_cfg = 0, config;
|
||||
unsigned int iv_words;
|
||||
|
||||
/* if not the last, the size has to be on the block boundary */
|
||||
if (!rctx->last_blk && req->nbytes % blocksize)
|
||||
return -EINVAL;
|
||||
|
||||
qce_setup_config(qce);
|
||||
|
||||
if (IS_CMAC(rctx->flags)) {
|
||||
qce_write(qce, REG_AUTH_SEG_CFG, 0);
|
||||
qce_write(qce, REG_ENCR_SEG_CFG, 0);
|
||||
qce_write(qce, REG_ENCR_SEG_SIZE, 0);
|
||||
qce_clear_array(qce, REG_AUTH_IV0, 16);
|
||||
qce_clear_array(qce, REG_AUTH_KEY0, 16);
|
||||
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
|
||||
|
||||
auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
|
||||
}
|
||||
|
||||
if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
|
||||
u32 authkey_words = rctx->authklen / sizeof(u32);
|
||||
|
||||
qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
|
||||
qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
|
||||
authkey_words);
|
||||
}
|
||||
|
||||
if (IS_CMAC(rctx->flags))
|
||||
goto go_proc;
|
||||
|
||||
if (rctx->first_blk)
|
||||
memcpy(auth, rctx->digest, digestsize);
|
||||
else
|
||||
qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
|
||||
|
||||
iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
|
||||
qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
|
||||
|
||||
if (rctx->first_blk)
|
||||
qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
|
||||
else
|
||||
qce_write_array(qce, REG_AUTH_BYTECNT0,
|
||||
(u32 *)rctx->byte_count, 2);
|
||||
|
||||
auth_cfg = qce_auth_cfg(rctx->flags, 0);
|
||||
|
||||
if (rctx->last_blk)
|
||||
auth_cfg |= BIT(AUTH_LAST_SHIFT);
|
||||
else
|
||||
auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
|
||||
|
||||
if (rctx->first_blk)
|
||||
auth_cfg |= BIT(AUTH_FIRST_SHIFT);
|
||||
else
|
||||
auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
|
||||
|
||||
go_proc:
|
||||
qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
|
||||
qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
|
||||
qce_write(qce, REG_AUTH_SEG_START, 0);
|
||||
qce_write(qce, REG_ENCR_SEG_CFG, 0);
|
||||
qce_write(qce, REG_SEG_SIZE, req->nbytes);
|
||||
|
||||
/* get little endianness */
|
||||
config = qce_config_reg(qce, 1);
|
||||
qce_write(qce, REG_CONFIG, config);
|
||||
|
||||
qce_crypto_go(qce);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
|
||||
u32 totallen, u32 offset)
|
||||
{
|
||||
struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
|
||||
struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
|
||||
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
|
||||
struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
|
||||
__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
|
||||
unsigned int enckey_words, enciv_words;
|
||||
unsigned int keylen;
|
||||
u32 encr_cfg = 0, auth_cfg = 0, config;
|
||||
unsigned int ivsize = rctx->ivsize;
|
||||
unsigned long flags = rctx->flags;
|
||||
|
||||
qce_setup_config(qce);
|
||||
|
||||
if (IS_XTS(flags))
|
||||
keylen = ctx->enc_keylen / 2;
|
||||
else
|
||||
keylen = ctx->enc_keylen;
|
||||
|
||||
qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
|
||||
enckey_words = keylen / sizeof(u32);
|
||||
|
||||
qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
|
||||
|
||||
encr_cfg = qce_encr_cfg(flags, keylen);
|
||||
|
||||
if (IS_DES(flags)) {
|
||||
enciv_words = 2;
|
||||
enckey_words = 2;
|
||||
} else if (IS_3DES(flags)) {
|
||||
enciv_words = 2;
|
||||
enckey_words = 6;
|
||||
} else if (IS_AES(flags)) {
|
||||
if (IS_XTS(flags))
|
||||
qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
|
||||
rctx->cryptlen);
|
||||
enciv_words = 4;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
|
||||
|
||||
if (!IS_ECB(flags)) {
|
||||
if (IS_XTS(flags))
|
||||
qce_xts_swapiv(enciv, rctx->iv, ivsize);
|
||||
else
|
||||
qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
|
||||
|
||||
qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
|
||||
}
|
||||
|
||||
if (IS_ENCRYPT(flags))
|
||||
encr_cfg |= BIT(ENCODE_SHIFT);
|
||||
|
||||
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
|
||||
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
|
||||
qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
|
||||
|
||||
if (IS_CTR(flags)) {
|
||||
qce_write(qce, REG_CNTR_MASK, ~0);
|
||||
qce_write(qce, REG_CNTR_MASK0, ~0);
|
||||
qce_write(qce, REG_CNTR_MASK1, ~0);
|
||||
qce_write(qce, REG_CNTR_MASK2, ~0);
|
||||
}
|
||||
|
||||
qce_write(qce, REG_SEG_SIZE, totallen);
|
||||
|
||||
/* get little endianness */
|
||||
config = qce_config_reg(qce, 1);
|
||||
qce_write(qce, REG_CONFIG, config);
|
||||
|
||||
qce_crypto_go(qce);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
|
||||
u32 offset)
|
||||
{
|
||||
switch (type) {
|
||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
||||
return qce_setup_regs_ablkcipher(async_req, totallen, offset);
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
return qce_setup_regs_ahash(async_req, totallen, offset);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
#define STATUS_ERRORS \
|
||||
(BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
|
||||
|
||||
int qce_check_status(struct qce_device *qce, u32 *status)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
*status = qce_read(qce, REG_STATUS);
|
||||
|
||||
/*
|
||||
* Don't use result dump status. The operation may not be complete.
|
||||
* Instead, use the status we just read from device. In case, we need to
|
||||
* use result_status from result dump the result_status needs to be byte
|
||||
* swapped, since we set the device to little endian.
|
||||
*/
|
||||
if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
|
||||
ret = -ENXIO;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = qce_read(qce, REG_VERSION);
|
||||
*major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
|
||||
*minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
|
||||
*step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
|
||||
}
|
102
drivers/crypto/qce/common.h
Normal file
102
drivers/crypto/qce/common.h
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _COMMON_H_
|
||||
#define _COMMON_H_
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/hash.h>
|
||||
|
||||
/* key size in bytes */
|
||||
#define QCE_SHA_HMAC_KEY_SIZE 64
|
||||
#define QCE_MAX_CIPHER_KEY_SIZE AES_KEYSIZE_256
|
||||
|
||||
/* IV length in bytes */
|
||||
#define QCE_AES_IV_LENGTH AES_BLOCK_SIZE
|
||||
/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
|
||||
#define QCE_MAX_IV_SIZE AES_BLOCK_SIZE
|
||||
|
||||
/* maximum nonce bytes */
|
||||
#define QCE_MAX_NONCE 16
|
||||
#define QCE_MAX_NONCE_WORDS (QCE_MAX_NONCE / sizeof(u32))
|
||||
|
||||
/* burst size alignment requirement */
|
||||
#define QCE_MAX_ALIGN_SIZE 64
|
||||
|
||||
/* cipher algorithms */
|
||||
#define QCE_ALG_DES BIT(0)
|
||||
#define QCE_ALG_3DES BIT(1)
|
||||
#define QCE_ALG_AES BIT(2)
|
||||
|
||||
/* hash and hmac algorithms */
|
||||
#define QCE_HASH_SHA1 BIT(3)
|
||||
#define QCE_HASH_SHA256 BIT(4)
|
||||
#define QCE_HASH_SHA1_HMAC BIT(5)
|
||||
#define QCE_HASH_SHA256_HMAC BIT(6)
|
||||
#define QCE_HASH_AES_CMAC BIT(7)
|
||||
|
||||
/* cipher modes */
|
||||
#define QCE_MODE_CBC BIT(8)
|
||||
#define QCE_MODE_ECB BIT(9)
|
||||
#define QCE_MODE_CTR BIT(10)
|
||||
#define QCE_MODE_XTS BIT(11)
|
||||
#define QCE_MODE_CCM BIT(12)
|
||||
#define QCE_MODE_MASK GENMASK(12, 8)
|
||||
|
||||
/* cipher encryption/decryption operations */
|
||||
#define QCE_ENCRYPT BIT(13)
|
||||
#define QCE_DECRYPT BIT(14)
|
||||
|
||||
#define IS_DES(flags) (flags & QCE_ALG_DES)
|
||||
#define IS_3DES(flags) (flags & QCE_ALG_3DES)
|
||||
#define IS_AES(flags) (flags & QCE_ALG_AES)
|
||||
|
||||
#define IS_SHA1(flags) (flags & QCE_HASH_SHA1)
|
||||
#define IS_SHA256(flags) (flags & QCE_HASH_SHA256)
|
||||
#define IS_SHA1_HMAC(flags) (flags & QCE_HASH_SHA1_HMAC)
|
||||
#define IS_SHA256_HMAC(flags) (flags & QCE_HASH_SHA256_HMAC)
|
||||
#define IS_CMAC(flags) (flags & QCE_HASH_AES_CMAC)
|
||||
#define IS_SHA(flags) (IS_SHA1(flags) || IS_SHA256(flags))
|
||||
#define IS_SHA_HMAC(flags) \
|
||||
(IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
|
||||
|
||||
#define IS_CBC(mode) (mode & QCE_MODE_CBC)
|
||||
#define IS_ECB(mode) (mode & QCE_MODE_ECB)
|
||||
#define IS_CTR(mode) (mode & QCE_MODE_CTR)
|
||||
#define IS_XTS(mode) (mode & QCE_MODE_XTS)
|
||||
#define IS_CCM(mode) (mode & QCE_MODE_CCM)
|
||||
|
||||
#define IS_ENCRYPT(dir) (dir & QCE_ENCRYPT)
|
||||
#define IS_DECRYPT(dir) (dir & QCE_DECRYPT)
|
||||
|
||||
struct qce_alg_template {
|
||||
struct list_head entry;
|
||||
u32 crypto_alg_type;
|
||||
unsigned long alg_flags;
|
||||
const u32 *std_iv;
|
||||
union {
|
||||
struct crypto_alg crypto;
|
||||
struct ahash_alg ahash;
|
||||
} alg;
|
||||
struct qce_device *qce;
|
||||
};
|
||||
|
||||
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
|
||||
int qce_check_status(struct qce_device *qce, u32 *status);
|
||||
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
|
||||
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
|
||||
u32 offset);
|
||||
|
||||
#endif /* _COMMON_H_ */
|
286
drivers/crypto/qce/core.c
Normal file
286
drivers/crypto/qce/core.c
Normal file
@ -0,0 +1,286 @@
|
||||
/*
|
||||
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/sha.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "cipher.h"
|
||||
#include "sha.h"
|
||||
|
||||
#define QCE_MAJOR_VERSION5 0x05
|
||||
#define QCE_QUEUE_LENGTH 1
|
||||
|
||||
static const struct qce_algo_ops *qce_ops[] = {
|
||||
&ablkcipher_ops,
|
||||
&ahash_ops,
|
||||
};
|
||||
|
||||
static void qce_unregister_algs(struct qce_device *qce)
|
||||
{
|
||||
const struct qce_algo_ops *ops;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
|
||||
ops = qce_ops[i];
|
||||
ops->unregister_algs(qce);
|
||||
}
|
||||
}
|
||||
|
||||
static int qce_register_algs(struct qce_device *qce)
|
||||
{
|
||||
const struct qce_algo_ops *ops;
|
||||
int i, ret = -ENODEV;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
|
||||
ops = qce_ops[i];
|
||||
ret = ops->register_algs(qce);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qce_handle_request(struct crypto_async_request *async_req)
|
||||
{
|
||||
int ret = -EINVAL, i;
|
||||
const struct qce_algo_ops *ops;
|
||||
u32 type = crypto_tfm_alg_type(async_req->tfm);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
|
||||
ops = qce_ops[i];
|
||||
if (type != ops->type)
|
||||
continue;
|
||||
ret = ops->async_req_handle(async_req);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qce_handle_queue(struct qce_device *qce,
|
||||
struct crypto_async_request *req)
|
||||
{
|
||||
struct crypto_async_request *async_req, *backlog;
|
||||
unsigned long flags;
|
||||
int ret = 0, err;
|
||||
|
||||
spin_lock_irqsave(&qce->lock, flags);
|
||||
|
||||
if (req)
|
||||
ret = crypto_enqueue_request(&qce->queue, req);
|
||||
|
||||
/* busy, do not dequeue request */
|
||||
if (qce->req) {
|
||||
spin_unlock_irqrestore(&qce->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
backlog = crypto_get_backlog(&qce->queue);
|
||||
async_req = crypto_dequeue_request(&qce->queue);
|
||||
if (async_req)
|
||||
qce->req = async_req;
|
||||
|
||||
spin_unlock_irqrestore(&qce->lock, flags);
|
||||
|
||||
if (!async_req)
|
||||
return ret;
|
||||
|
||||
if (backlog) {
|
||||
spin_lock_bh(&qce->lock);
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
spin_unlock_bh(&qce->lock);
|
||||
}
|
||||
|
||||
err = qce_handle_request(async_req);
|
||||
if (err) {
|
||||
qce->result = err;
|
||||
tasklet_schedule(&qce->done_tasklet);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qce_tasklet_req_done(unsigned long data)
|
||||
{
|
||||
struct qce_device *qce = (struct qce_device *)data;
|
||||
struct crypto_async_request *req;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qce->lock, flags);
|
||||
req = qce->req;
|
||||
qce->req = NULL;
|
||||
spin_unlock_irqrestore(&qce->lock, flags);
|
||||
|
||||
if (req)
|
||||
req->complete(req, qce->result);
|
||||
|
||||
qce_handle_queue(qce, NULL);
|
||||
}
|
||||
|
||||
static int qce_async_request_enqueue(struct qce_device *qce,
|
||||
struct crypto_async_request *req)
|
||||
{
|
||||
return qce_handle_queue(qce, req);
|
||||
}
|
||||
|
||||
static void qce_async_request_done(struct qce_device *qce, int ret)
|
||||
{
|
||||
qce->result = ret;
|
||||
tasklet_schedule(&qce->done_tasklet);
|
||||
}
|
||||
|
||||
static int qce_check_version(struct qce_device *qce)
|
||||
{
|
||||
u32 major, minor, step;
|
||||
|
||||
qce_get_version(qce, &major, &minor, &step);
|
||||
|
||||
/*
|
||||
* the driver does not support v5 with minor 0 because it has special
|
||||
* alignment requirements.
|
||||
*/
|
||||
if (major != QCE_MAJOR_VERSION5 || minor == 0)
|
||||
return -ENODEV;
|
||||
|
||||
qce->burst_size = QCE_BAM_BURST_SIZE;
|
||||
qce->pipe_pair_id = 1;
|
||||
|
||||
dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
|
||||
major, minor, step);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qce_crypto_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct qce_device *qce;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
|
||||
if (!qce)
|
||||
return -ENOMEM;
|
||||
|
||||
qce->dev = dev;
|
||||
platform_set_drvdata(pdev, qce);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
qce->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(qce->base))
|
||||
return PTR_ERR(qce->base);
|
||||
|
||||
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
qce->core = devm_clk_get(qce->dev, "core");
|
||||
if (IS_ERR(qce->core))
|
||||
return PTR_ERR(qce->core);
|
||||
|
||||
qce->iface = devm_clk_get(qce->dev, "iface");
|
||||
if (IS_ERR(qce->iface))
|
||||
return PTR_ERR(qce->iface);
|
||||
|
||||
qce->bus = devm_clk_get(qce->dev, "bus");
|
||||
if (IS_ERR(qce->bus))
|
||||
return PTR_ERR(qce->bus);
|
||||
|
||||
ret = clk_prepare_enable(qce->core);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_prepare_enable(qce->iface);
|
||||
if (ret)
|
||||
goto err_clks_core;
|
||||
|
||||
ret = clk_prepare_enable(qce->bus);
|
||||
if (ret)
|
||||
goto err_clks_iface;
|
||||
|
||||
ret = qce_dma_request(qce->dev, &qce->dma);
|
||||
if (ret)
|
||||
goto err_clks;
|
||||
|
||||
ret = qce_check_version(qce);
|
||||
if (ret)
|
||||
goto err_clks;
|
||||
|
||||
spin_lock_init(&qce->lock);
|
||||
tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
|
||||
(unsigned long)qce);
|
||||
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
|
||||
|
||||
qce->async_req_enqueue = qce_async_request_enqueue;
|
||||
qce->async_req_done = qce_async_request_done;
|
||||
|
||||
ret = qce_register_algs(qce);
|
||||
if (ret)
|
||||
goto err_dma;
|
||||
|
||||
return 0;
|
||||
|
||||
err_dma:
|
||||
qce_dma_release(&qce->dma);
|
||||
err_clks:
|
||||
clk_disable_unprepare(qce->bus);
|
||||
err_clks_iface:
|
||||
clk_disable_unprepare(qce->iface);
|
||||
err_clks_core:
|
||||
clk_disable_unprepare(qce->core);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qce_crypto_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct qce_device *qce = platform_get_drvdata(pdev);
|
||||
|
||||
tasklet_kill(&qce->done_tasklet);
|
||||
qce_unregister_algs(qce);
|
||||
qce_dma_release(&qce->dma);
|
||||
clk_disable_unprepare(qce->bus);
|
||||
clk_disable_unprepare(qce->iface);
|
||||
clk_disable_unprepare(qce->core);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qce_crypto_of_match[] = {
|
||||
{ .compatible = "qcom,crypto-v5.1", },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
|
||||
|
||||
static struct platform_driver qce_crypto_driver = {
|
||||
.probe = qce_crypto_probe,
|
||||
.remove = qce_crypto_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = KBUILD_MODNAME,
|
||||
.of_match_table = qce_crypto_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(qce_crypto_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm crypto engine driver");
|
||||
MODULE_ALIAS("platform:" KBUILD_MODNAME);
|
||||
MODULE_AUTHOR("The Linux Foundation");
|
68
drivers/crypto/qce/core.h
Normal file
68
drivers/crypto/qce/core.h
Normal file
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _CORE_H_
|
||||
#define _CORE_H_
|
||||
|
||||
#include "dma.h"
|
||||
|
||||
/**
|
||||
* struct qce_device - crypto engine device structure
|
||||
* @queue: crypto request queue
|
||||
* @lock: the lock protects queue and req
|
||||
* @done_tasklet: done tasklet object
|
||||
* @req: current active request
|
||||
* @result: result of current transform
|
||||
* @base: virtual IO base
|
||||
* @dev: pointer to device structure
|
||||
* @core: core device clock
|
||||
* @iface: interface clock
|
||||
* @bus: bus clock
|
||||
* @dma: pointer to dma data
|
||||
* @burst_size: the crypto burst size
|
||||
* @pipe_pair_id: which pipe pair id the device using
|
||||
* @async_req_enqueue: invoked by every algorithm to enqueue a request
|
||||
* @async_req_done: invoked by every algorithm to finish its request
|
||||
*/
|
||||
struct qce_device {
|
||||
struct crypto_queue queue;
|
||||
spinlock_t lock;
|
||||
struct tasklet_struct done_tasklet;
|
||||
struct crypto_async_request *req;
|
||||
int result;
|
||||
void __iomem *base;
|
||||
struct device *dev;
|
||||
struct clk *core, *iface, *bus;
|
||||
struct qce_dma_data dma;
|
||||
int burst_size;
|
||||
unsigned int pipe_pair_id;
|
||||
int (*async_req_enqueue)(struct qce_device *qce,
|
||||
struct crypto_async_request *req);
|
||||
void (*async_req_done)(struct qce_device *qce, int ret);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qce_algo_ops - algorithm operations per crypto type
|
||||
* @type: should be CRYPTO_ALG_TYPE_XXX
|
||||
* @register_algs: invoked by core to register the algorithms
|
||||
* @unregister_algs: invoked by core to unregister the algorithms
|
||||
* @async_req_handle: invoked by core to handle enqueued request
|
||||
*/
|
||||
struct qce_algo_ops {
|
||||
u32 type;
|
||||
int (*register_algs)(struct qce_device *qce);
|
||||
void (*unregister_algs)(struct qce_device *qce);
|
||||
int (*async_req_handle)(struct crypto_async_request *async_req);
|
||||
};
|
||||
|
||||
#endif /* _CORE_H_ */
|
186
drivers/crypto/qce/dma.c
Normal file
186
drivers/crypto/qce/dma.c
Normal file
@ -0,0 +1,186 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
|
||||
#include "dma.h"
|
||||
|
||||
int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dma->txchan = dma_request_slave_channel_reason(dev, "tx");
|
||||
if (IS_ERR(dma->txchan))
|
||||
return PTR_ERR(dma->txchan);
|
||||
|
||||
dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
|
||||
if (IS_ERR(dma->rxchan)) {
|
||||
ret = PTR_ERR(dma->rxchan);
|
||||
goto error_rx;
|
||||
}
|
||||
|
||||
dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
|
||||
GFP_KERNEL);
|
||||
if (!dma->result_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto error_nomem;
|
||||
}
|
||||
|
||||
dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
|
||||
|
||||
return 0;
|
||||
error_nomem:
|
||||
dma_release_channel(dma->rxchan);
|
||||
error_rx:
|
||||
dma_release_channel(dma->txchan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void qce_dma_release(struct qce_dma_data *dma)
|
||||
{
|
||||
dma_release_channel(dma->txchan);
|
||||
dma_release_channel(dma->rxchan);
|
||||
kfree(dma->result_buf);
|
||||
}
|
||||
|
||||
int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, bool chained)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (chained) {
|
||||
while (sg) {
|
||||
err = dma_map_sg(dev, sg, 1, dir);
|
||||
if (!err)
|
||||
return -EFAULT;
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
} else {
|
||||
err = dma_map_sg(dev, sg, nents, dir);
|
||||
if (!err)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, bool chained)
|
||||
{
|
||||
if (chained)
|
||||
while (sg) {
|
||||
dma_unmap_sg(dev, sg, 1, dir);
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
else
|
||||
dma_unmap_sg(dev, sg, nents, dir);
|
||||
}
|
||||
|
||||
int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
|
||||
{
|
||||
struct scatterlist *sg = sglist;
|
||||
int nents = 0;
|
||||
|
||||
if (chained)
|
||||
*chained = false;
|
||||
|
||||
while (nbytes > 0 && sg) {
|
||||
nents++;
|
||||
nbytes -= sg->length;
|
||||
if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
|
||||
*chained = true;
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
struct scatterlist *
|
||||
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
|
||||
{
|
||||
struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
|
||||
|
||||
while (sg) {
|
||||
if (!sg_page(sg))
|
||||
break;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
if (!sg)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
while (new_sgl && sg) {
|
||||
sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
|
||||
new_sgl->offset);
|
||||
sg_last = sg;
|
||||
sg = sg_next(sg);
|
||||
new_sgl = sg_next(new_sgl);
|
||||
}
|
||||
|
||||
return sg_last;
|
||||
}
|
||||
|
||||
static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
|
||||
int nents, unsigned long flags,
|
||||
enum dma_transfer_direction dir,
|
||||
dma_async_tx_callback cb, void *cb_param)
|
||||
{
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
dma_cookie_t cookie;
|
||||
|
||||
if (!sg || !nents)
|
||||
return -EINVAL;
|
||||
|
||||
desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
desc->callback = cb;
|
||||
desc->callback_param = cb_param;
|
||||
cookie = dmaengine_submit(desc);
|
||||
|
||||
return dma_submit_error(cookie);
|
||||
}
|
||||
|
||||
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
|
||||
int rx_nents, struct scatterlist *tx_sg, int tx_nents,
|
||||
dma_async_tx_callback cb, void *cb_param)
|
||||
{
|
||||
struct dma_chan *rxchan = dma->rxchan;
|
||||
struct dma_chan *txchan = dma->txchan;
|
||||
unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
|
||||
int ret;
|
||||
|
||||
ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
|
||||
NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
|
||||
cb, cb_param);
|
||||
}
|
||||
|
||||
void qce_dma_issue_pending(struct qce_dma_data *dma)
|
||||
{
|
||||
dma_async_issue_pending(dma->rxchan);
|
||||
dma_async_issue_pending(dma->txchan);
|
||||
}
|
||||
|
||||
int qce_dma_terminate_all(struct qce_dma_data *dma)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = dmaengine_terminate_all(dma->rxchan);
|
||||
return ret ?: dmaengine_terminate_all(dma->txchan);
|
||||
}
|
58
drivers/crypto/qce/dma.h
Normal file
58
drivers/crypto/qce/dma.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _DMA_H_
|
||||
#define _DMA_H_
|
||||
|
||||
/* maximum data transfer block size between BAM and CE */
|
||||
#define QCE_BAM_BURST_SIZE 64
|
||||
|
||||
#define QCE_AUTHIV_REGS_CNT 16
|
||||
#define QCE_AUTH_BYTECOUNT_REGS_CNT 4
|
||||
#define QCE_CNTRIV_REGS_CNT 4
|
||||
|
||||
struct qce_result_dump {
|
||||
u32 auth_iv[QCE_AUTHIV_REGS_CNT];
|
||||
u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
|
||||
u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
|
||||
u32 status;
|
||||
u32 status2;
|
||||
};
|
||||
|
||||
#define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE)
|
||||
#define QCE_RESULT_BUF_SZ \
|
||||
ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
|
||||
|
||||
struct qce_dma_data {
|
||||
struct dma_chan *txchan;
|
||||
struct dma_chan *rxchan;
|
||||
struct qce_result_dump *result_buf;
|
||||
void *ignore_buf;
|
||||
};
|
||||
|
||||
int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
|
||||
void qce_dma_release(struct qce_dma_data *dma);
|
||||
int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
|
||||
int in_ents, struct scatterlist *sg_out, int out_ents,
|
||||
dma_async_tx_callback cb, void *cb_param);
|
||||
void qce_dma_issue_pending(struct qce_dma_data *dma);
|
||||
int qce_dma_terminate_all(struct qce_dma_data *dma);
|
||||
int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
|
||||
void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, bool chained);
|
||||
int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, bool chained);
|
||||
struct scatterlist *
|
||||
qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
|
||||
|
||||
#endif /* _DMA_H_ */
|
334
drivers/crypto/qce/regs-v5.h
Normal file
334
drivers/crypto/qce/regs-v5.h
Normal file
@ -0,0 +1,334 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _REGS_V5_H_
|
||||
#define _REGS_V5_H_
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#define REG_VERSION 0x000
|
||||
#define REG_STATUS 0x100
|
||||
#define REG_STATUS2 0x104
|
||||
#define REG_ENGINES_AVAIL 0x108
|
||||
#define REG_FIFO_SIZES 0x10c
|
||||
#define REG_SEG_SIZE 0x110
|
||||
#define REG_GOPROC 0x120
|
||||
#define REG_ENCR_SEG_CFG 0x200
|
||||
#define REG_ENCR_SEG_SIZE 0x204
|
||||
#define REG_ENCR_SEG_START 0x208
|
||||
#define REG_CNTR0_IV0 0x20c
|
||||
#define REG_CNTR1_IV1 0x210
|
||||
#define REG_CNTR2_IV2 0x214
|
||||
#define REG_CNTR3_IV3 0x218
|
||||
#define REG_CNTR_MASK 0x21C
|
||||
#define REG_ENCR_CCM_INT_CNTR0 0x220
|
||||
#define REG_ENCR_CCM_INT_CNTR1 0x224
|
||||
#define REG_ENCR_CCM_INT_CNTR2 0x228
|
||||
#define REG_ENCR_CCM_INT_CNTR3 0x22c
|
||||
#define REG_ENCR_XTS_DU_SIZE 0x230
|
||||
#define REG_CNTR_MASK2 0x234
|
||||
#define REG_CNTR_MASK1 0x238
|
||||
#define REG_CNTR_MASK0 0x23c
|
||||
#define REG_AUTH_SEG_CFG 0x300
|
||||
#define REG_AUTH_SEG_SIZE 0x304
|
||||
#define REG_AUTH_SEG_START 0x308
|
||||
#define REG_AUTH_IV0 0x310
|
||||
#define REG_AUTH_IV1 0x314
|
||||
#define REG_AUTH_IV2 0x318
|
||||
#define REG_AUTH_IV3 0x31c
|
||||
#define REG_AUTH_IV4 0x320
|
||||
#define REG_AUTH_IV5 0x324
|
||||
#define REG_AUTH_IV6 0x328
|
||||
#define REG_AUTH_IV7 0x32c
|
||||
#define REG_AUTH_IV8 0x330
|
||||
#define REG_AUTH_IV9 0x334
|
||||
#define REG_AUTH_IV10 0x338
|
||||
#define REG_AUTH_IV11 0x33c
|
||||
#define REG_AUTH_IV12 0x340
|
||||
#define REG_AUTH_IV13 0x344
|
||||
#define REG_AUTH_IV14 0x348
|
||||
#define REG_AUTH_IV15 0x34c
|
||||
#define REG_AUTH_INFO_NONCE0 0x350
|
||||
#define REG_AUTH_INFO_NONCE1 0x354
|
||||
#define REG_AUTH_INFO_NONCE2 0x358
|
||||
#define REG_AUTH_INFO_NONCE3 0x35c
|
||||
#define REG_AUTH_BYTECNT0 0x390
|
||||
#define REG_AUTH_BYTECNT1 0x394
|
||||
#define REG_AUTH_BYTECNT2 0x398
|
||||
#define REG_AUTH_BYTECNT3 0x39c
|
||||
#define REG_AUTH_EXP_MAC0 0x3a0
|
||||
#define REG_AUTH_EXP_MAC1 0x3a4
|
||||
#define REG_AUTH_EXP_MAC2 0x3a8
|
||||
#define REG_AUTH_EXP_MAC3 0x3ac
|
||||
#define REG_AUTH_EXP_MAC4 0x3b0
|
||||
#define REG_AUTH_EXP_MAC5 0x3b4
|
||||
#define REG_AUTH_EXP_MAC6 0x3b8
|
||||
#define REG_AUTH_EXP_MAC7 0x3bc
|
||||
#define REG_CONFIG 0x400
|
||||
#define REG_GOPROC_QC_KEY 0x1000
|
||||
#define REG_GOPROC_OEM_KEY 0x2000
|
||||
#define REG_ENCR_KEY0 0x3000
|
||||
#define REG_ENCR_KEY1 0x3004
|
||||
#define REG_ENCR_KEY2 0x3008
|
||||
#define REG_ENCR_KEY3 0x300c
|
||||
#define REG_ENCR_KEY4 0x3010
|
||||
#define REG_ENCR_KEY5 0x3014
|
||||
#define REG_ENCR_KEY6 0x3018
|
||||
#define REG_ENCR_KEY7 0x301c
|
||||
#define REG_ENCR_XTS_KEY0 0x3020
|
||||
#define REG_ENCR_XTS_KEY1 0x3024
|
||||
#define REG_ENCR_XTS_KEY2 0x3028
|
||||
#define REG_ENCR_XTS_KEY3 0x302c
|
||||
#define REG_ENCR_XTS_KEY4 0x3030
|
||||
#define REG_ENCR_XTS_KEY5 0x3034
|
||||
#define REG_ENCR_XTS_KEY6 0x3038
|
||||
#define REG_ENCR_XTS_KEY7 0x303c
|
||||
#define REG_AUTH_KEY0 0x3040
|
||||
#define REG_AUTH_KEY1 0x3044
|
||||
#define REG_AUTH_KEY2 0x3048
|
||||
#define REG_AUTH_KEY3 0x304c
|
||||
#define REG_AUTH_KEY4 0x3050
|
||||
#define REG_AUTH_KEY5 0x3054
|
||||
#define REG_AUTH_KEY6 0x3058
|
||||
#define REG_AUTH_KEY7 0x305c
|
||||
#define REG_AUTH_KEY8 0x3060
|
||||
#define REG_AUTH_KEY9 0x3064
|
||||
#define REG_AUTH_KEY10 0x3068
|
||||
#define REG_AUTH_KEY11 0x306c
|
||||
#define REG_AUTH_KEY12 0x3070
|
||||
#define REG_AUTH_KEY13 0x3074
|
||||
#define REG_AUTH_KEY14 0x3078
|
||||
#define REG_AUTH_KEY15 0x307c
|
||||
|
||||
/* Register bits - REG_VERSION */
|
||||
#define CORE_STEP_REV_SHIFT 0
|
||||
#define CORE_STEP_REV_MASK GENMASK(15, 0)
|
||||
#define CORE_MINOR_REV_SHIFT 16
|
||||
#define CORE_MINOR_REV_MASK GENMASK(23, 16)
|
||||
#define CORE_MAJOR_REV_SHIFT 24
|
||||
#define CORE_MAJOR_REV_MASK GENMASK(31, 24)
|
||||
|
||||
/* Register bits - REG_STATUS */
|
||||
#define MAC_FAILED_SHIFT 31
|
||||
#define DOUT_SIZE_AVAIL_SHIFT 26
|
||||
#define DOUT_SIZE_AVAIL_MASK GENMASK(30, 26)
|
||||
#define DIN_SIZE_AVAIL_SHIFT 21
|
||||
#define DIN_SIZE_AVAIL_MASK GENMASK(25, 21)
|
||||
#define HSD_ERR_SHIFT 20
|
||||
#define ACCESS_VIOL_SHIFT 19
|
||||
#define PIPE_ACTIVE_ERR_SHIFT 18
|
||||
#define CFG_CHNG_ERR_SHIFT 17
|
||||
#define DOUT_ERR_SHIFT 16
|
||||
#define DIN_ERR_SHIFT 15
|
||||
#define AXI_ERR_SHIFT 14
|
||||
#define CRYPTO_STATE_SHIFT 10
|
||||
#define CRYPTO_STATE_MASK GENMASK(13, 10)
|
||||
#define ENCR_BUSY_SHIFT 9
|
||||
#define AUTH_BUSY_SHIFT 8
|
||||
#define DOUT_INTR_SHIFT 7
|
||||
#define DIN_INTR_SHIFT 6
|
||||
#define OP_DONE_INTR_SHIFT 5
|
||||
#define ERR_INTR_SHIFT 4
|
||||
#define DOUT_RDY_SHIFT 3
|
||||
#define DIN_RDY_SHIFT 2
|
||||
#define OPERATION_DONE_SHIFT 1
|
||||
#define SW_ERR_SHIFT 0
|
||||
|
||||
/* Register bits - REG_STATUS2 */
|
||||
#define AXI_EXTRA_SHIFT 1
|
||||
#define LOCKED_SHIFT 2
|
||||
|
||||
/* Register bits - REG_CONFIG */
|
||||
#define REQ_SIZE_SHIFT 17
|
||||
#define REQ_SIZE_MASK GENMASK(20, 17)
|
||||
#define REQ_SIZE_ENUM_1_BEAT 0
|
||||
#define REQ_SIZE_ENUM_2_BEAT 1
|
||||
#define REQ_SIZE_ENUM_3_BEAT 2
|
||||
#define REQ_SIZE_ENUM_4_BEAT 3
|
||||
#define REQ_SIZE_ENUM_5_BEAT 4
|
||||
#define REQ_SIZE_ENUM_6_BEAT 5
|
||||
#define REQ_SIZE_ENUM_7_BEAT 6
|
||||
#define REQ_SIZE_ENUM_8_BEAT 7
|
||||
#define REQ_SIZE_ENUM_9_BEAT 8
|
||||
#define REQ_SIZE_ENUM_10_BEAT 9
|
||||
#define REQ_SIZE_ENUM_11_BEAT 10
|
||||
#define REQ_SIZE_ENUM_12_BEAT 11
|
||||
#define REQ_SIZE_ENUM_13_BEAT 12
|
||||
#define REQ_SIZE_ENUM_14_BEAT 13
|
||||
#define REQ_SIZE_ENUM_15_BEAT 14
|
||||
#define REQ_SIZE_ENUM_16_BEAT 15
|
||||
|
||||
#define MAX_QUEUED_REQ_SHIFT 14
|
||||
#define MAX_QUEUED_REQ_MASK GENMASK(24, 16)
|
||||
#define ENUM_1_QUEUED_REQS 0
|
||||
#define ENUM_2_QUEUED_REQS 1
|
||||
#define ENUM_3_QUEUED_REQS 2
|
||||
|
||||
#define IRQ_ENABLES_SHIFT 10
|
||||
#define IRQ_ENABLES_MASK GENMASK(13, 10)
|
||||
|
||||
#define LITTLE_ENDIAN_MODE_SHIFT 9
|
||||
#define PIPE_SET_SELECT_SHIFT 5
|
||||
#define PIPE_SET_SELECT_MASK GENMASK(8, 5)
|
||||
|
||||
#define HIGH_SPD_EN_N_SHIFT 4
|
||||
#define MASK_DOUT_INTR_SHIFT 3
|
||||
#define MASK_DIN_INTR_SHIFT 2
|
||||
#define MASK_OP_DONE_INTR_SHIFT 1
|
||||
#define MASK_ERR_INTR_SHIFT 0
|
||||
|
||||
/* Register bits - REG_AUTH_SEG_CFG */
|
||||
#define COMP_EXP_MAC_SHIFT 24
|
||||
#define COMP_EXP_MAC_DISABLED 0
|
||||
#define COMP_EXP_MAC_ENABLED 1
|
||||
|
||||
#define F9_DIRECTION_SHIFT 23
|
||||
#define F9_DIRECTION_UPLINK 0
|
||||
#define F9_DIRECTION_DOWNLINK 1
|
||||
|
||||
#define AUTH_NONCE_NUM_WORDS_SHIFT 20
|
||||
#define AUTH_NONCE_NUM_WORDS_MASK GENMASK(22, 20)
|
||||
|
||||
#define USE_PIPE_KEY_AUTH_SHIFT 19
|
||||
#define USE_HW_KEY_AUTH_SHIFT 18
|
||||
#define AUTH_FIRST_SHIFT 17
|
||||
#define AUTH_LAST_SHIFT 16
|
||||
|
||||
#define AUTH_POS_SHIFT 14
|
||||
#define AUTH_POS_MASK GENMASK(15, 14)
|
||||
#define AUTH_POS_BEFORE 0
|
||||
#define AUTH_POS_AFTER 1
|
||||
|
||||
#define AUTH_SIZE_SHIFT 9
|
||||
#define AUTH_SIZE_MASK GENMASK(13, 9)
|
||||
#define AUTH_SIZE_SHA1 0
|
||||
#define AUTH_SIZE_SHA256 1
|
||||
#define AUTH_SIZE_ENUM_1_BYTES 0
|
||||
#define AUTH_SIZE_ENUM_2_BYTES 1
|
||||
#define AUTH_SIZE_ENUM_3_BYTES 2
|
||||
#define AUTH_SIZE_ENUM_4_BYTES 3
|
||||
#define AUTH_SIZE_ENUM_5_BYTES 4
|
||||
#define AUTH_SIZE_ENUM_6_BYTES 5
|
||||
#define AUTH_SIZE_ENUM_7_BYTES 6
|
||||
#define AUTH_SIZE_ENUM_8_BYTES 7
|
||||
#define AUTH_SIZE_ENUM_9_BYTES 8
|
||||
#define AUTH_SIZE_ENUM_10_BYTES 9
|
||||
#define AUTH_SIZE_ENUM_11_BYTES 10
|
||||
#define AUTH_SIZE_ENUM_12_BYTES 11
|
||||
#define AUTH_SIZE_ENUM_13_BYTES 12
|
||||
#define AUTH_SIZE_ENUM_14_BYTES 13
|
||||
#define AUTH_SIZE_ENUM_15_BYTES 14
|
||||
#define AUTH_SIZE_ENUM_16_BYTES 15
|
||||
|
||||
#define AUTH_MODE_SHIFT 6
|
||||
#define AUTH_MODE_MASK GENMASK(8, 6)
|
||||
#define AUTH_MODE_HASH 0
|
||||
#define AUTH_MODE_HMAC 1
|
||||
#define AUTH_MODE_CCM 0
|
||||
#define AUTH_MODE_CMAC 1
|
||||
|
||||
#define AUTH_KEY_SIZE_SHIFT 3
|
||||
#define AUTH_KEY_SIZE_MASK GENMASK(5, 3)
|
||||
#define AUTH_KEY_SZ_AES128 0
|
||||
#define AUTH_KEY_SZ_AES256 2
|
||||
|
||||
#define AUTH_ALG_SHIFT 0
|
||||
#define AUTH_ALG_MASK GENMASK(2, 0)
|
||||
#define AUTH_ALG_NONE 0
|
||||
#define AUTH_ALG_SHA 1
|
||||
#define AUTH_ALG_AES 2
|
||||
#define AUTH_ALG_KASUMI 3
|
||||
#define AUTH_ALG_SNOW3G 4
|
||||
#define AUTH_ALG_ZUC 5
|
||||
|
||||
/* Register bits - REG_ENCR_XTS_DU_SIZE */
|
||||
#define ENCR_XTS_DU_SIZE_SHIFT 0
|
||||
#define ENCR_XTS_DU_SIZE_MASK GENMASK(19, 0)
|
||||
|
||||
/* Register bits - REG_ENCR_SEG_CFG */
|
||||
#define F8_KEYSTREAM_ENABLE_SHIFT 17
|
||||
#define F8_KEYSTREAM_DISABLED 0
|
||||
#define F8_KEYSTREAM_ENABLED 1
|
||||
|
||||
#define F8_DIRECTION_SHIFT 16
|
||||
#define F8_DIRECTION_UPLINK 0
|
||||
#define F8_DIRECTION_DOWNLINK 1
|
||||
|
||||
#define USE_PIPE_KEY_ENCR_SHIFT 15
|
||||
#define USE_PIPE_KEY_ENCR_ENABLED 1
|
||||
#define USE_KEY_REGISTERS 0
|
||||
|
||||
#define USE_HW_KEY_ENCR_SHIFT 14
|
||||
#define USE_KEY_REG 0
|
||||
#define USE_HW_KEY 1
|
||||
|
||||
#define LAST_CCM_SHIFT 13
|
||||
#define LAST_CCM_XFR 1
|
||||
#define INTERM_CCM_XFR 0
|
||||
|
||||
#define CNTR_ALG_SHIFT 11
|
||||
#define CNTR_ALG_MASK GENMASK(12, 11)
|
||||
#define CNTR_ALG_NIST 0
|
||||
|
||||
#define ENCODE_SHIFT 10
|
||||
|
||||
#define ENCR_MODE_SHIFT 6
|
||||
#define ENCR_MODE_MASK GENMASK(9, 6)
|
||||
#define ENCR_MODE_ECB 0
|
||||
#define ENCR_MODE_CBC 1
|
||||
#define ENCR_MODE_CTR 2
|
||||
#define ENCR_MODE_XTS 3
|
||||
#define ENCR_MODE_CCM 4
|
||||
|
||||
#define ENCR_KEY_SZ_SHIFT 3
|
||||
#define ENCR_KEY_SZ_MASK GENMASK(5, 3)
|
||||
#define ENCR_KEY_SZ_DES 0
|
||||
#define ENCR_KEY_SZ_3DES 1
|
||||
#define ENCR_KEY_SZ_AES128 0
|
||||
#define ENCR_KEY_SZ_AES256 2
|
||||
|
||||
#define ENCR_ALG_SHIFT 0
|
||||
#define ENCR_ALG_MASK GENMASK(2, 0)
|
||||
#define ENCR_ALG_NONE 0
|
||||
#define ENCR_ALG_DES 1
|
||||
#define ENCR_ALG_AES 2
|
||||
#define ENCR_ALG_KASUMI 4
|
||||
#define ENCR_ALG_SNOW_3G 5
|
||||
#define ENCR_ALG_ZUC 6
|
||||
|
||||
/* Register bits - REG_GOPROC */
|
||||
#define GO_SHIFT 0
|
||||
#define CLR_CNTXT_SHIFT 1
|
||||
#define RESULTS_DUMP_SHIFT 2
|
||||
|
||||
/* Register bits - REG_ENGINES_AVAIL */
|
||||
#define ENCR_AES_SEL_SHIFT 0
|
||||
#define DES_SEL_SHIFT 1
|
||||
#define ENCR_SNOW3G_SEL_SHIFT 2
|
||||
#define ENCR_KASUMI_SEL_SHIFT 3
|
||||
#define SHA_SEL_SHIFT 4
|
||||
#define SHA512_SEL_SHIFT 5
|
||||
#define AUTH_AES_SEL_SHIFT 6
|
||||
#define AUTH_SNOW3G_SEL_SHIFT 7
|
||||
#define AUTH_KASUMI_SEL_SHIFT 8
|
||||
#define BAM_PIPE_SETS_SHIFT 9
|
||||
#define BAM_PIPE_SETS_MASK GENMASK(12, 9)
|
||||
#define AXI_WR_BEATS_SHIFT 13
|
||||
#define AXI_WR_BEATS_MASK GENMASK(18, 13)
|
||||
#define AXI_RD_BEATS_SHIFT 19
|
||||
#define AXI_RD_BEATS_MASK GENMASK(24, 19)
|
||||
#define ENCR_ZUC_SEL_SHIFT 26
|
||||
#define AUTH_ZUC_SEL_SHIFT 27
|
||||
#define ZUC_ENABLE_SHIFT 28
|
||||
|
||||
#endif /* _REGS_V5_H_ */
|
588
drivers/crypto/qce/sha.c
Normal file
588
drivers/crypto/qce/sha.c
Normal file
@ -0,0 +1,588 @@
|
||||
/*
|
||||
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "core.h"
|
||||
#include "sha.h"
|
||||
|
||||
/* crypto hw padding constant for first operation */
|
||||
#define SHA_PADDING 64
|
||||
#define SHA_PADDING_MASK (SHA_PADDING - 1)
|
||||
|
||||
static LIST_HEAD(ahash_algs);
|
||||
|
||||
static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
|
||||
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
|
||||
};
|
||||
|
||||
static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
|
||||
SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
|
||||
SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
|
||||
};
|
||||
|
||||
static void qce_ahash_done(void *data)
|
||||
{
|
||||
struct crypto_async_request *async_req = data;
|
||||
struct ahash_request *req = ahash_request_cast(async_req);
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
struct qce_result_dump *result = qce->dma.result_buf;
|
||||
unsigned int digestsize = crypto_ahash_digestsize(ahash);
|
||||
int error;
|
||||
u32 status;
|
||||
|
||||
error = qce_dma_terminate_all(&qce->dma);
|
||||
if (error)
|
||||
dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
|
||||
|
||||
qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
|
||||
rctx->src_chained);
|
||||
qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
|
||||
|
||||
memcpy(rctx->digest, result->auth_iv, digestsize);
|
||||
if (req->result)
|
||||
memcpy(req->result, result->auth_iv, digestsize);
|
||||
|
||||
rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
|
||||
rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);
|
||||
|
||||
error = qce_check_status(qce, &status);
|
||||
if (error < 0)
|
||||
dev_dbg(qce->dev, "ahash operation error (%x)\n", status);
|
||||
|
||||
req->src = rctx->src_orig;
|
||||
req->nbytes = rctx->nbytes_orig;
|
||||
rctx->last_blk = false;
|
||||
rctx->first_blk = false;
|
||||
|
||||
qce->async_req_done(tmpl->qce, error);
|
||||
}
|
||||
|
||||
static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
|
||||
{
|
||||
struct ahash_request *req = ahash_request_cast(async_req);
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
|
||||
struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
unsigned long flags = rctx->flags;
|
||||
int ret;
|
||||
|
||||
if (IS_SHA_HMAC(flags)) {
|
||||
rctx->authkey = ctx->authkey;
|
||||
rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
|
||||
} else if (IS_CMAC(flags)) {
|
||||
rctx->authkey = ctx->authkey;
|
||||
rctx->authklen = AES_KEYSIZE_128;
|
||||
}
|
||||
|
||||
rctx->src_nents = qce_countsg(req->src, req->nbytes,
|
||||
&rctx->src_chained);
|
||||
ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
|
||||
rctx->src_chained);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
|
||||
|
||||
ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
|
||||
if (ret < 0)
|
||||
goto error_unmap_src;
|
||||
|
||||
ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
|
||||
&rctx->result_sg, 1, qce_ahash_done, async_req);
|
||||
if (ret)
|
||||
goto error_unmap_dst;
|
||||
|
||||
qce_dma_issue_pending(&qce->dma);
|
||||
|
||||
ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
|
||||
if (ret)
|
||||
goto error_terminate;
|
||||
|
||||
return 0;
|
||||
|
||||
error_terminate:
|
||||
qce_dma_terminate_all(&qce->dma);
|
||||
error_unmap_dst:
|
||||
qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
|
||||
error_unmap_src:
|
||||
qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
|
||||
rctx->src_chained);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qce_ahash_init(struct ahash_request *req)
|
||||
{
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
|
||||
const u32 *std_iv = tmpl->std_iv;
|
||||
|
||||
memset(rctx, 0, sizeof(*rctx));
|
||||
rctx->first_blk = true;
|
||||
rctx->last_blk = false;
|
||||
rctx->flags = tmpl->alg_flags;
|
||||
memcpy(rctx->digest, std_iv, sizeof(rctx->digest));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qce_ahash_export(struct ahash_request *req, void *out)
|
||||
{
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
unsigned long flags = rctx->flags;
|
||||
unsigned int digestsize = crypto_ahash_digestsize(ahash);
|
||||
unsigned int blocksize =
|
||||
crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
|
||||
|
||||
if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
|
||||
struct sha1_state *out_state = out;
|
||||
|
||||
out_state->count = rctx->count;
|
||||
qce_cpu_to_be32p_array((__be32 *)out_state->state,
|
||||
rctx->digest, digestsize);
|
||||
memcpy(out_state->buffer, rctx->buf, blocksize);
|
||||
} else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
|
||||
struct sha256_state *out_state = out;
|
||||
|
||||
out_state->count = rctx->count;
|
||||
qce_cpu_to_be32p_array((__be32 *)out_state->state,
|
||||
rctx->digest, digestsize);
|
||||
memcpy(out_state->buf, rctx->buf, blocksize);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qce_import_common(struct ahash_request *req, u64 in_count,
|
||||
const u32 *state, const u8 *buffer, bool hmac)
|
||||
{
|
||||
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
unsigned int digestsize = crypto_ahash_digestsize(ahash);
|
||||
unsigned int blocksize;
|
||||
u64 count = in_count;
|
||||
|
||||
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
|
||||
rctx->count = in_count;
|
||||
memcpy(rctx->buf, buffer, blocksize);
|
||||
|
||||
if (in_count <= blocksize) {
|
||||
rctx->first_blk = 1;
|
||||
} else {
|
||||
rctx->first_blk = 0;
|
||||
/*
|
||||
* For HMAC, there is a hardware padding done when first block
|
||||
* is set. Therefore the byte_count must be incremened by 64
|
||||
* after the first block operation.
|
||||
*/
|
||||
if (hmac)
|
||||
count += SHA_PADDING;
|
||||
}
|
||||
|
||||
rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
|
||||
rctx->byte_count[1] = (__force __be32)(count >> 32);
|
||||
qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
|
||||
digestsize);
|
||||
rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qce_ahash_import(struct ahash_request *req, const void *in)
|
||||
{
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
unsigned long flags = rctx->flags;
|
||||
bool hmac = IS_SHA_HMAC(flags);
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
|
||||
const struct sha1_state *state = in;
|
||||
|
||||
ret = qce_import_common(req, state->count, state->state,
|
||||
state->buffer, hmac);
|
||||
} else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
|
||||
const struct sha256_state *state = in;
|
||||
|
||||
ret = qce_import_common(req, state->count, state->state,
|
||||
state->buf, hmac);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qce_ahash_update(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
struct scatterlist *sg_last, *sg;
|
||||
unsigned int total, len;
|
||||
unsigned int hash_later;
|
||||
unsigned int nbytes;
|
||||
unsigned int blocksize;
|
||||
|
||||
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
|
||||
rctx->count += req->nbytes;
|
||||
|
||||
/* check for buffer from previous updates and append it */
|
||||
total = req->nbytes + rctx->buflen;
|
||||
|
||||
if (total <= blocksize) {
|
||||
scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src,
|
||||
0, req->nbytes, 0);
|
||||
rctx->buflen += req->nbytes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* save the original req structure fields */
|
||||
rctx->src_orig = req->src;
|
||||
rctx->nbytes_orig = req->nbytes;
|
||||
|
||||
/*
|
||||
* if we have data from previous update copy them on buffer. The old
|
||||
* data will be combined with current request bytes.
|
||||
*/
|
||||
if (rctx->buflen)
|
||||
memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
|
||||
|
||||
/* calculate how many bytes will be hashed later */
|
||||
hash_later = total % blocksize;
|
||||
if (hash_later) {
|
||||
unsigned int src_offset = req->nbytes - hash_later;
|
||||
scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
|
||||
hash_later, 0);
|
||||
}
|
||||
|
||||
/* here nbytes is multiple of blocksize */
|
||||
nbytes = total - hash_later;
|
||||
|
||||
len = rctx->buflen;
|
||||
sg = sg_last = req->src;
|
||||
|
||||
while (len < nbytes && sg) {
|
||||
if (len + sg_dma_len(sg) > nbytes)
|
||||
break;
|
||||
len += sg_dma_len(sg);
|
||||
sg_last = sg;
|
||||
sg = scatterwalk_sg_next(sg);
|
||||
}
|
||||
|
||||
if (!sg_last)
|
||||
return -EINVAL;
|
||||
|
||||
sg_mark_end(sg_last);
|
||||
|
||||
if (rctx->buflen) {
|
||||
sg_init_table(rctx->sg, 2);
|
||||
sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
|
||||
scatterwalk_sg_chain(rctx->sg, 2, req->src);
|
||||
req->src = rctx->sg;
|
||||
}
|
||||
|
||||
req->nbytes = nbytes;
|
||||
rctx->buflen = hash_later;
|
||||
|
||||
return qce->async_req_enqueue(tmpl->qce, &req->base);
|
||||
}
|
||||
|
||||
static int qce_ahash_final(struct ahash_request *req)
|
||||
{
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
|
||||
if (!rctx->buflen)
|
||||
return 0;
|
||||
|
||||
rctx->last_blk = true;
|
||||
|
||||
rctx->src_orig = req->src;
|
||||
rctx->nbytes_orig = req->nbytes;
|
||||
|
||||
memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
|
||||
sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen);
|
||||
|
||||
req->src = rctx->sg;
|
||||
req->nbytes = rctx->buflen;
|
||||
|
||||
return qce->async_req_enqueue(tmpl->qce, &req->base);
|
||||
}
|
||||
|
||||
static int qce_ahash_digest(struct ahash_request *req)
|
||||
{
|
||||
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
|
||||
struct qce_device *qce = tmpl->qce;
|
||||
int ret;
|
||||
|
||||
ret = qce_ahash_init(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rctx->src_orig = req->src;
|
||||
rctx->nbytes_orig = req->nbytes;
|
||||
rctx->first_blk = true;
|
||||
rctx->last_blk = true;
|
||||
|
||||
return qce->async_req_enqueue(tmpl->qce, &req->base);
|
||||
}
|
||||
|
||||
struct qce_ahash_result {
|
||||
struct completion completion;
|
||||
int error;
|
||||
};
|
||||
|
||||
static void qce_digest_complete(struct crypto_async_request *req, int error)
|
||||
{
|
||||
struct qce_ahash_result *result = req->data;
|
||||
|
||||
if (error == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
result->error = error;
|
||||
complete(&result->completion);
|
||||
}
|
||||
|
||||
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
unsigned int digestsize = crypto_ahash_digestsize(tfm);
|
||||
struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
|
||||
struct qce_ahash_result result;
|
||||
struct ahash_request *req;
|
||||
struct scatterlist sg;
|
||||
unsigned int blocksize;
|
||||
struct crypto_ahash *ahash_tfm;
|
||||
u8 *buf;
|
||||
int ret;
|
||||
const char *alg_name;
|
||||
|
||||
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
|
||||
memset(ctx->authkey, 0, sizeof(ctx->authkey));
|
||||
|
||||
if (keylen <= blocksize) {
|
||||
memcpy(ctx->authkey, key, keylen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (digestsize == SHA1_DIGEST_SIZE)
|
||||
alg_name = "sha1-qce";
|
||||
else if (digestsize == SHA256_DIGEST_SIZE)
|
||||
alg_name = "sha256-qce";
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
|
||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
||||
if (IS_ERR(ahash_tfm))
|
||||
return PTR_ERR(ahash_tfm);
|
||||
|
||||
req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_ahash;
|
||||
}
|
||||
|
||||
init_completion(&result.completion);
|
||||
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
qce_digest_complete, &result);
|
||||
crypto_ahash_clear_flags(ahash_tfm, ~0);
|
||||
|
||||
buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_req;
|
||||
}
|
||||
|
||||
memcpy(buf, key, keylen);
|
||||
sg_init_one(&sg, buf, keylen);
|
||||
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
|
||||
|
||||
ret = crypto_ahash_digest(req);
|
||||
if (ret == -EINPROGRESS || ret == -EBUSY) {
|
||||
ret = wait_for_completion_interruptible(&result.completion);
|
||||
if (!ret)
|
||||
ret = result.error;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
|
||||
kfree(buf);
|
||||
err_free_req:
|
||||
ahash_request_free(req);
|
||||
err_free_ahash:
|
||||
crypto_free_ahash(ahash_tfm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qce_ahash_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
|
||||
struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx));
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct qce_ahash_def {
|
||||
unsigned long flags;
|
||||
const char *name;
|
||||
const char *drv_name;
|
||||
unsigned int digestsize;
|
||||
unsigned int blocksize;
|
||||
unsigned int statesize;
|
||||
const u32 *std_iv;
|
||||
};
|
||||
|
||||
static const struct qce_ahash_def ahash_def[] = {
|
||||
{
|
||||
.flags = QCE_HASH_SHA1,
|
||||
.name = "sha1",
|
||||
.drv_name = "sha1-qce",
|
||||
.digestsize = SHA1_DIGEST_SIZE,
|
||||
.blocksize = SHA1_BLOCK_SIZE,
|
||||
.statesize = sizeof(struct sha1_state),
|
||||
.std_iv = std_iv_sha1,
|
||||
},
|
||||
{
|
||||
.flags = QCE_HASH_SHA256,
|
||||
.name = "sha256",
|
||||
.drv_name = "sha256-qce",
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.blocksize = SHA256_BLOCK_SIZE,
|
||||
.statesize = sizeof(struct sha256_state),
|
||||
.std_iv = std_iv_sha256,
|
||||
},
|
||||
{
|
||||
.flags = QCE_HASH_SHA1_HMAC,
|
||||
.name = "hmac(sha1)",
|
||||
.drv_name = "hmac-sha1-qce",
|
||||
.digestsize = SHA1_DIGEST_SIZE,
|
||||
.blocksize = SHA1_BLOCK_SIZE,
|
||||
.statesize = sizeof(struct sha1_state),
|
||||
.std_iv = std_iv_sha1,
|
||||
},
|
||||
{
|
||||
.flags = QCE_HASH_SHA256_HMAC,
|
||||
.name = "hmac(sha256)",
|
||||
.drv_name = "hmac-sha256-qce",
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.blocksize = SHA256_BLOCK_SIZE,
|
||||
.statesize = sizeof(struct sha256_state),
|
||||
.std_iv = std_iv_sha256,
|
||||
},
|
||||
};
|
||||
|
||||
static int qce_ahash_register_one(const struct qce_ahash_def *def,
|
||||
struct qce_device *qce)
|
||||
{
|
||||
struct qce_alg_template *tmpl;
|
||||
struct ahash_alg *alg;
|
||||
struct crypto_alg *base;
|
||||
int ret;
|
||||
|
||||
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
|
||||
if (!tmpl)
|
||||
return -ENOMEM;
|
||||
|
||||
tmpl->std_iv = def->std_iv;
|
||||
|
||||
alg = &tmpl->alg.ahash;
|
||||
alg->init = qce_ahash_init;
|
||||
alg->update = qce_ahash_update;
|
||||
alg->final = qce_ahash_final;
|
||||
alg->digest = qce_ahash_digest;
|
||||
alg->export = qce_ahash_export;
|
||||
alg->import = qce_ahash_import;
|
||||
if (IS_SHA_HMAC(def->flags))
|
||||
alg->setkey = qce_ahash_hmac_setkey;
|
||||
alg->halg.digestsize = def->digestsize;
|
||||
alg->halg.statesize = def->statesize;
|
||||
|
||||
base = &alg->halg.base;
|
||||
base->cra_blocksize = def->blocksize;
|
||||
base->cra_priority = 300;
|
||||
base->cra_flags = CRYPTO_ALG_ASYNC;
|
||||
base->cra_ctxsize = sizeof(struct qce_sha_ctx);
|
||||
base->cra_alignmask = 0;
|
||||
base->cra_module = THIS_MODULE;
|
||||
base->cra_init = qce_ahash_cra_init;
|
||||
INIT_LIST_HEAD(&base->cra_list);
|
||||
|
||||
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
|
||||
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
def->drv_name);
|
||||
|
||||
INIT_LIST_HEAD(&tmpl->entry);
|
||||
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
|
||||
tmpl->alg_flags = def->flags;
|
||||
tmpl->qce = qce;
|
||||
|
||||
ret = crypto_register_ahash(alg);
|
||||
if (ret) {
|
||||
kfree(tmpl);
|
||||
dev_err(qce->dev, "%s registration failed\n", base->cra_name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_add_tail(&tmpl->entry, &ahash_algs);
|
||||
dev_dbg(qce->dev, "%s is registered\n", base->cra_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qce_ahash_unregister(struct qce_device *qce)
|
||||
{
|
||||
struct qce_alg_template *tmpl, *n;
|
||||
|
||||
list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) {
|
||||
crypto_unregister_ahash(&tmpl->alg.ahash);
|
||||
list_del(&tmpl->entry);
|
||||
kfree(tmpl);
|
||||
}
|
||||
}
|
||||
|
||||
static int qce_ahash_register(struct qce_device *qce)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ahash_def); i++) {
|
||||
ret = qce_ahash_register_one(&ahash_def[i], qce);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
qce_ahash_unregister(qce);
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct qce_algo_ops ahash_ops = {
|
||||
.type = CRYPTO_ALG_TYPE_AHASH,
|
||||
.register_algs = qce_ahash_register,
|
||||
.unregister_algs = qce_ahash_unregister,
|
||||
.async_req_handle = qce_ahash_async_req_handle,
|
||||
};
|
81
drivers/crypto/qce/sha.h
Normal file
81
drivers/crypto/qce/sha.h
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _SHA_H_
|
||||
#define _SHA_H_
|
||||
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/sha.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "core.h"
|
||||
|
||||
#define QCE_SHA_MAX_BLOCKSIZE SHA256_BLOCK_SIZE
|
||||
#define QCE_SHA_MAX_DIGESTSIZE SHA256_DIGEST_SIZE
|
||||
|
||||
struct qce_sha_ctx {
|
||||
u8 authkey[QCE_SHA_MAX_BLOCKSIZE];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct qce_sha_reqctx - holds private ahash objects per request
|
||||
* @buf: used during update, import and export
|
||||
* @tmpbuf: buffer for internal use
|
||||
* @digest: calculated digest buffer
|
||||
* @buflen: length of the buffer
|
||||
* @flags: operation flags
|
||||
* @src_orig: original request sg list
|
||||
* @nbytes_orig: original request number of bytes
|
||||
* @src_chained: is source scatterlist chained
|
||||
* @src_nents: source number of entries
|
||||
* @byte_count: byte count
|
||||
* @count: save count in states during update, import and export
|
||||
* @first_blk: is it the first block
|
||||
* @last_blk: is it the last block
|
||||
* @sg: used to chain sg lists
|
||||
* @authkey: pointer to auth key in sha ctx
|
||||
* @authklen: auth key length
|
||||
* @result_sg: scatterlist used for result buffer
|
||||
*/
|
||||
struct qce_sha_reqctx {
|
||||
u8 buf[QCE_SHA_MAX_BLOCKSIZE];
|
||||
u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE];
|
||||
u8 digest[QCE_SHA_MAX_DIGESTSIZE];
|
||||
unsigned int buflen;
|
||||
unsigned long flags;
|
||||
struct scatterlist *src_orig;
|
||||
unsigned int nbytes_orig;
|
||||
bool src_chained;
|
||||
int src_nents;
|
||||
__be32 byte_count[2];
|
||||
u64 count;
|
||||
bool first_blk;
|
||||
bool last_blk;
|
||||
struct scatterlist sg[2];
|
||||
u8 *authkey;
|
||||
unsigned int authklen;
|
||||
struct scatterlist result_sg;
|
||||
};
|
||||
|
||||
static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
|
||||
struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
|
||||
struct ahash_alg, halg);
|
||||
|
||||
return container_of(alg, struct qce_alg_template, alg.ahash);
|
||||
}
|
||||
|
||||
extern const struct qce_algo_ops ahash_ops;
|
||||
|
||||
#endif /* _SHA_H_ */
|
@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
|
||||
static irqreturn_t cryp_interrupt_handler(int irq, void *param)
|
||||
{
|
||||
struct cryp_ctx *ctx;
|
||||
int i;
|
||||
int count;
|
||||
struct cryp_device_data *device_data;
|
||||
|
||||
if (param == NULL) {
|
||||
@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
|
||||
if (cryp_pending_irq_src(device_data,
|
||||
CRYP_IRQ_SRC_OUTPUT_FIFO)) {
|
||||
if (ctx->outlen / ctx->blocksize > 0) {
|
||||
for (i = 0; i < ctx->blocksize / 4; i++) {
|
||||
*(ctx->outdata) = readl_relaxed(
|
||||
&device_data->base->dout);
|
||||
ctx->outdata += 4;
|
||||
ctx->outlen -= 4;
|
||||
}
|
||||
count = ctx->blocksize / 4;
|
||||
|
||||
readsl(&device_data->base->dout, ctx->outdata, count);
|
||||
ctx->outdata += count;
|
||||
ctx->outlen -= count;
|
||||
|
||||
if (ctx->outlen == 0) {
|
||||
cryp_disable_irq_src(device_data,
|
||||
@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
|
||||
} else if (cryp_pending_irq_src(device_data,
|
||||
CRYP_IRQ_SRC_INPUT_FIFO)) {
|
||||
if (ctx->datalen / ctx->blocksize > 0) {
|
||||
for (i = 0 ; i < ctx->blocksize / 4; i++) {
|
||||
writel_relaxed(ctx->indata,
|
||||
&device_data->base->din);
|
||||
ctx->indata += 4;
|
||||
ctx->datalen -= 4;
|
||||
}
|
||||
count = ctx->blocksize / 4;
|
||||
|
||||
writesl(&device_data->base->din, ctx->indata, count);
|
||||
|
||||
ctx->indata += count;
|
||||
ctx->datalen -= count;
|
||||
|
||||
if (ctx->datalen == 0)
|
||||
cryp_disable_irq_src(device_data,
|
||||
|
@ -75,9 +75,9 @@ static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
|
||||
|
||||
static inline void aead_givcrypt_set_callback(
|
||||
struct aead_givcrypt_request *req, u32 flags,
|
||||
crypto_completion_t complete, void *data)
|
||||
crypto_completion_t compl, void *data)
|
||||
{
|
||||
aead_request_set_callback(&req->areq, flags, complete, data);
|
||||
aead_request_set_callback(&req->areq, flags, compl, data);
|
||||
}
|
||||
|
||||
static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
|
||||
|
@ -410,4 +410,10 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size)
|
||||
return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
|
||||
}
|
||||
|
||||
static inline void crypto_yield(u32 flags)
|
||||
{
|
||||
if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_ALGAPI_H */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user