2019-06-04 08:11:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-06-18 13:46:20 +00:00
|
|
|
/*
|
|
|
|
* Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
|
|
|
|
*
|
|
|
|
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
|
|
|
|
* Author: Arnaud Ebalard <arno@natisbad.org>
|
|
|
|
*
|
|
|
|
* This work is based on an initial version written by
|
|
|
|
* Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
|
|
|
|
*/
|
|
|
|
|
2017-05-19 06:53:26 +00:00
|
|
|
#include <crypto/hmac.h>
|
2015-06-18 13:46:24 +00:00
|
|
|
#include <crypto/md5.h>
|
2020-11-13 05:20:21 +00:00
|
|
|
#include <crypto/sha1.h>
|
|
|
|
#include <crypto/sha2.h>
|
2020-08-19 11:58:20 +00:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
#include "cesa.h"
|
|
|
|
|
2015-06-18 13:46:21 +00:00
|
|
|
struct mv_cesa_ahash_dma_iter {
|
|
|
|
struct mv_cesa_dma_iter base;
|
|
|
|
struct mv_cesa_sg_dma_iter src;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
|
|
|
|
struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
2015-10-18 16:24:26 +00:00
|
|
|
unsigned int len = req->nbytes + creq->cache_ptr;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
|
|
|
if (!creq->last_req)
|
2015-10-18 16:24:26 +00:00
|
|
|
len &= ~CESA_HASH_BLOCK_SIZE_MSK;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
|
|
|
mv_cesa_req_dma_iter_init(&iter->base, len);
|
|
|
|
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
|
|
|
|
iter->src.op_offset = creq->cache_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
|
|
|
|
{
|
|
|
|
iter->src.op_offset = 0;
|
|
|
|
|
|
|
|
return mv_cesa_req_dma_iter_next_op(&iter->base);
|
|
|
|
}
|
|
|
|
|
crypto: marvell/cesa - fix memory leak
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.
Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.
As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-03-17 09:21:34 +00:00
|
|
|
static inline int
|
|
|
|
mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
|
2015-06-18 13:46:21 +00:00
|
|
|
{
|
crypto: marvell/cesa - fix memory leak
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.
Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.
As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-03-17 09:21:34 +00:00
|
|
|
req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
|
|
|
|
&req->cache_dma);
|
|
|
|
if (!req->cache)
|
2015-06-18 13:46:20 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
crypto: marvell/cesa - fix memory leak
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.
Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.
As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-03-17 09:21:34 +00:00
|
|
|
static inline void
|
|
|
|
mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
|
2015-06-18 13:46:20 +00:00
|
|
|
{
|
crypto: marvell/cesa - fix memory leak
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.
Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.
As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-03-17 09:21:34 +00:00
|
|
|
if (!req->cache)
|
2015-06-18 13:46:20 +00:00
|
|
|
return;
|
|
|
|
|
crypto: marvell/cesa - fix memory leak
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.
Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.
As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-03-17 09:21:34 +00:00
|
|
|
dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
|
|
|
|
req->cache_dma);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:21 +00:00
|
|
|
static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
|
|
|
|
gfp_t flags)
|
|
|
|
{
|
|
|
|
if (req->padding)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
|
|
|
|
&req->padding_dma);
|
|
|
|
if (!req->padding)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
|
|
|
|
{
|
|
|
|
if (!req->padding)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
|
|
|
|
req->padding_dma);
|
|
|
|
req->padding = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
mv_cesa_ahash_dma_free_padding(&creq->req.dma);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
|
crypto: marvell/cesa - fix memory leak
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.
Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.
As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-03-17 09:21:34 +00:00
|
|
|
mv_cesa_ahash_dma_free_cache(&creq->req.dma);
|
2016-06-21 08:08:35 +00:00
|
|
|
mv_cesa_dma_cleanup(&creq->base);
|
2015-06-18 13:46:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
2015-06-18 13:46:21 +00:00
|
|
|
mv_cesa_ahash_dma_cleanup(req);
|
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
2015-06-18 13:46:21 +00:00
|
|
|
mv_cesa_ahash_dma_last_cleanup(req);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
|
|
|
|
{
|
|
|
|
unsigned int index, padlen;
|
|
|
|
|
|
|
|
index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
|
|
|
|
padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
|
|
|
|
|
|
|
|
return padlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
|
|
|
|
{
|
2019-03-30 05:28:58 +00:00
|
|
|
unsigned int padlen;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
buf[0] = 0x80;
|
|
|
|
/* Pad out to 56 mod 64 */
|
|
|
|
padlen = mv_cesa_ahash_pad_len(creq);
|
|
|
|
memset(buf + 1, 0, padlen - 1);
|
2015-10-18 16:23:46 +00:00
|
|
|
|
|
|
|
if (creq->algo_le) {
|
|
|
|
__le64 bits = cpu_to_le64(creq->len << 3);
|
2020-03-13 11:47:05 +00:00
|
|
|
|
2015-10-18 16:23:46 +00:00
|
|
|
memcpy(buf + padlen, &bits, sizeof(bits));
|
|
|
|
} else {
|
|
|
|
__be64 bits = cpu_to_be64(creq->len << 3);
|
2020-03-13 11:47:05 +00:00
|
|
|
|
2015-10-18 16:23:46 +00:00
|
|
|
memcpy(buf + padlen, &bits, sizeof(bits));
|
|
|
|
}
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
return padlen + 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mv_cesa_ahash_std_step(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
|
2016-06-21 08:08:35 +00:00
|
|
|
struct mv_cesa_engine *engine = creq->base.engine;
|
2015-06-18 13:46:20 +00:00
|
|
|
struct mv_cesa_op_ctx *op;
|
|
|
|
unsigned int new_cache_ptr = 0;
|
|
|
|
u32 frag_mode;
|
|
|
|
size_t len;
|
2016-06-21 08:08:37 +00:00
|
|
|
unsigned int digsize;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mv_cesa_adjust_op(engine, &creq->op_tmpl);
|
2021-01-21 05:16:46 +00:00
|
|
|
if (engine->pool)
|
|
|
|
memcpy(engine->sram_pool, &creq->op_tmpl,
|
|
|
|
sizeof(creq->op_tmpl));
|
|
|
|
else
|
|
|
|
memcpy_toio(engine->sram, &creq->op_tmpl,
|
|
|
|
sizeof(creq->op_tmpl));
|
2016-06-21 08:08:37 +00:00
|
|
|
|
2016-12-05 08:56:39 +00:00
|
|
|
if (!sreq->offset) {
|
|
|
|
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
|
|
|
|
for (i = 0; i < digsize / 4; i++)
|
2020-03-13 11:47:05 +00:00
|
|
|
writel_relaxed(creq->state[i],
|
|
|
|
engine->regs + CESA_IVDIG(i));
|
2016-12-05 08:56:39 +00:00
|
|
|
}
|
2015-06-18 13:46:20 +00:00
|
|
|
|
2021-01-21 05:16:46 +00:00
|
|
|
if (creq->cache_ptr) {
|
|
|
|
if (engine->pool)
|
|
|
|
memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
|
|
|
|
creq->cache, creq->cache_ptr);
|
|
|
|
else
|
|
|
|
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
|
|
|
|
creq->cache, creq->cache_ptr);
|
|
|
|
}
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
|
|
|
|
CESA_SA_SRAM_PAYLOAD_SIZE);
|
|
|
|
|
|
|
|
if (!creq->last_req) {
|
|
|
|
new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
|
|
|
|
len &= ~CESA_HASH_BLOCK_SIZE_MSK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (len - creq->cache_ptr)
|
2021-01-21 05:16:46 +00:00
|
|
|
sreq->offset += mv_cesa_sg_copy_to_sram(
|
|
|
|
engine, req->src, creq->src_nents,
|
|
|
|
CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
|
|
|
|
len - creq->cache_ptr, sreq->offset);
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
op = &creq->op_tmpl;
|
|
|
|
|
|
|
|
frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
|
|
|
|
|
|
|
|
if (creq->last_req && sreq->offset == req->nbytes &&
|
|
|
|
creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
|
|
|
|
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
|
|
|
|
frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
|
|
|
|
else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
|
|
|
|
frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
|
|
|
|
frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
|
|
|
|
if (len &&
|
|
|
|
creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
|
|
|
|
mv_cesa_set_mac_op_total_len(op, creq->len);
|
|
|
|
} else {
|
|
|
|
int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
|
|
|
|
|
|
|
|
if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
|
|
|
|
len &= CESA_HASH_BLOCK_SIZE_MSK;
|
|
|
|
new_cache_ptr = 64 - trailerlen;
|
2021-01-21 05:16:46 +00:00
|
|
|
if (engine->pool)
|
|
|
|
memcpy(creq->cache,
|
|
|
|
engine->sram_pool +
|
|
|
|
CESA_SA_DATA_SRAM_OFFSET + len,
|
|
|
|
new_cache_ptr);
|
|
|
|
else
|
|
|
|
memcpy_fromio(creq->cache,
|
|
|
|
engine->sram +
|
|
|
|
CESA_SA_DATA_SRAM_OFFSET +
|
|
|
|
len,
|
|
|
|
new_cache_ptr);
|
2015-06-18 13:46:20 +00:00
|
|
|
} else {
|
2020-07-31 13:55:40 +00:00
|
|
|
i = mv_cesa_ahash_pad_req(creq, creq->cache);
|
|
|
|
len += i;
|
2021-01-21 05:16:46 +00:00
|
|
|
if (engine->pool)
|
|
|
|
memcpy(engine->sram_pool + len +
|
|
|
|
CESA_SA_DATA_SRAM_OFFSET,
|
|
|
|
creq->cache, i);
|
|
|
|
else
|
|
|
|
memcpy_toio(engine->sram + len +
|
|
|
|
CESA_SA_DATA_SRAM_OFFSET,
|
|
|
|
creq->cache, i);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
|
|
|
|
frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
|
|
|
|
else
|
|
|
|
frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mv_cesa_set_mac_op_frag_len(op, len);
|
|
|
|
mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
|
|
|
|
|
|
|
|
/* FIXME: only update enc_len field */
|
2021-01-21 05:16:46 +00:00
|
|
|
if (engine->pool)
|
|
|
|
memcpy(engine->sram_pool, op, sizeof(*op));
|
|
|
|
else
|
|
|
|
memcpy_toio(engine->sram, op, sizeof(*op));
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
|
|
|
|
mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
|
|
|
|
CESA_SA_DESC_CFG_FRAG_MSK);
|
|
|
|
|
|
|
|
creq->cache_ptr = new_cache_ptr;
|
|
|
|
|
|
|
|
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
|
2015-10-18 17:31:00 +00:00
|
|
|
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
|
2020-03-13 11:47:05 +00:00
|
|
|
WARN_ON(readl(engine->regs + CESA_SA_CMD) &
|
|
|
|
CESA_SA_CMD_EN_CESA_SA_ACCL0);
|
2015-06-18 13:46:20 +00:00
|
|
|
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
|
|
|
|
|
|
|
|
if (sreq->offset < (req->nbytes - creq->cache_ptr))
|
|
|
|
return -EINPROGRESS;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:21 +00:00
|
|
|
static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
2016-06-21 08:08:35 +00:00
|
|
|
struct mv_cesa_req *basereq = &creq->base;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
mv_cesa_dma_prepare(basereq, basereq->engine);
|
2015-06-18 13:46:21 +00:00
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
|
|
|
|
|
|
|
|
sreq->offset = 0;
|
|
|
|
}
|
|
|
|
|
2016-12-14 14:15:07 +00:00
|
|
|
static void mv_cesa_ahash_dma_step(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
struct mv_cesa_req *base = &creq->base;
|
|
|
|
|
|
|
|
/* We must explicitly set the digest state. */
|
|
|
|
if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
|
|
|
|
struct mv_cesa_engine *engine = base->engine;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Set the hash state in the IVDIG regs. */
|
|
|
|
for (i = 0; i < ARRAY_SIZE(creq->state); i++)
|
|
|
|
writel_relaxed(creq->state[i], engine->regs +
|
|
|
|
CESA_IVDIG(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
mv_cesa_dma_step(base);
|
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
static void mv_cesa_ahash_step(struct crypto_async_request *req)
|
|
|
|
{
|
|
|
|
struct ahash_request *ahashreq = ahash_request_cast(req);
|
2015-06-18 13:46:21 +00:00
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
|
2015-06-18 13:46:20 +00:00
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
2016-12-14 14:15:07 +00:00
|
|
|
mv_cesa_ahash_dma_step(ahashreq);
|
2015-06-18 13:46:21 +00:00
|
|
|
else
|
|
|
|
mv_cesa_ahash_std_step(ahashreq);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
|
|
|
|
{
|
|
|
|
struct ahash_request *ahashreq = ahash_request_cast(req);
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
|
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
2016-06-21 08:08:36 +00:00
|
|
|
return mv_cesa_dma_process(&creq->base, status);
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2016-06-21 08:08:36 +00:00
|
|
|
return mv_cesa_ahash_std_process(ahashreq, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mv_cesa_ahash_complete(struct crypto_async_request *req)
|
|
|
|
{
|
|
|
|
struct ahash_request *ahashreq = ahash_request_cast(req);
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
|
|
|
|
struct mv_cesa_engine *engine = creq->base.engine;
|
|
|
|
unsigned int digsize;
|
|
|
|
int i;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
|
|
|
|
|
2016-10-05 07:56:33 +00:00
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
|
2020-03-13 11:47:05 +00:00
|
|
|
(creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
|
|
|
|
CESA_TDMA_RESULT) {
|
2016-10-05 07:56:33 +00:00
|
|
|
__le32 *data = NULL;
|
|
|
|
|
2015-10-18 16:23:35 +00:00
|
|
|
/*
|
2020-03-13 11:47:05 +00:00
|
|
|
* Result is already in the correct endianness when the SA is
|
2016-10-05 07:56:33 +00:00
|
|
|
* used
|
2015-10-18 16:23:35 +00:00
|
|
|
*/
|
2016-10-05 07:56:33 +00:00
|
|
|
data = creq->base.chain.last->op->ctx.hash.hash;
|
|
|
|
for (i = 0; i < digsize / 4; i++)
|
2020-07-31 13:55:40 +00:00
|
|
|
creq->state[i] = le32_to_cpu(data[i]);
|
2015-10-18 16:23:35 +00:00
|
|
|
|
2016-10-05 07:56:33 +00:00
|
|
|
memcpy(ahashreq->result, data, digsize);
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < digsize / 4; i++)
|
|
|
|
creq->state[i] = readl_relaxed(engine->regs +
|
|
|
|
CESA_IVDIG(i));
|
|
|
|
if (creq->last_req) {
|
|
|
|
/*
|
2020-03-13 11:47:05 +00:00
|
|
|
* Hardware's MD5 digest is in little endian format, but
|
|
|
|
* SHA in big endian format
|
|
|
|
*/
|
2016-10-05 07:56:33 +00:00
|
|
|
if (creq->algo_le) {
|
|
|
|
__le32 *result = (void *)ahashreq->result;
|
|
|
|
|
|
|
|
for (i = 0; i < digsize / 4; i++)
|
|
|
|
result[i] = cpu_to_le32(creq->state[i]);
|
|
|
|
} else {
|
|
|
|
__be32 *result = (void *)ahashreq->result;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
2016-10-05 07:56:33 +00:00
|
|
|
for (i = 0; i < digsize / 4; i++)
|
|
|
|
result[i] = cpu_to_be32(creq->state[i]);
|
|
|
|
}
|
2015-10-18 16:23:35 +00:00
|
|
|
}
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
2016-06-21 08:08:38 +00:00
|
|
|
|
|
|
|
atomic_sub(ahashreq->nbytes, &engine->load);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
|
|
|
|
struct mv_cesa_engine *engine)
|
|
|
|
{
|
|
|
|
struct ahash_request *ahashreq = ahash_request_cast(req);
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
|
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
creq->base.engine = engine;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
2015-06-18 13:46:21 +00:00
|
|
|
mv_cesa_ahash_dma_prepare(ahashreq);
|
|
|
|
else
|
|
|
|
mv_cesa_ahash_std_prepare(ahashreq);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
|
|
|
|
{
|
|
|
|
struct ahash_request *ahashreq = ahash_request_cast(req);
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
|
|
|
|
|
|
|
|
if (creq->last_req)
|
|
|
|
mv_cesa_ahash_last_cleanup(ahashreq);
|
2015-06-18 13:46:21 +00:00
|
|
|
|
|
|
|
mv_cesa_ahash_cleanup(ahashreq);
|
2016-07-22 13:46:24 +00:00
|
|
|
|
|
|
|
if (creq->cache_ptr)
|
|
|
|
sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
|
|
|
|
creq->cache,
|
|
|
|
creq->cache_ptr,
|
|
|
|
ahashreq->nbytes - creq->cache_ptr);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
|
|
|
|
.step = mv_cesa_ahash_step,
|
|
|
|
.process = mv_cesa_ahash_process,
|
|
|
|
.cleanup = mv_cesa_ahash_req_cleanup,
|
2016-06-21 08:08:36 +00:00
|
|
|
.complete = mv_cesa_ahash_complete,
|
2015-06-18 13:46:20 +00:00
|
|
|
};
|
|
|
|
|
2016-08-09 09:03:16 +00:00
|
|
|
static void mv_cesa_ahash_init(struct ahash_request *req,
|
2015-10-18 16:23:40 +00:00
|
|
|
struct mv_cesa_op_ctx *tmpl, bool algo_le)
|
2015-06-18 13:46:20 +00:00
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
memset(creq, 0, sizeof(*creq));
|
|
|
|
mv_cesa_update_op_cfg(tmpl,
|
|
|
|
CESA_SA_DESC_CFG_OP_MAC_ONLY |
|
|
|
|
CESA_SA_DESC_CFG_FIRST_FRAG,
|
|
|
|
CESA_SA_DESC_CFG_OP_MSK |
|
|
|
|
CESA_SA_DESC_CFG_FRAG_MSK);
|
|
|
|
mv_cesa_set_mac_op_total_len(tmpl, 0);
|
|
|
|
mv_cesa_set_mac_op_frag_len(tmpl, 0);
|
|
|
|
creq->op_tmpl = *tmpl;
|
|
|
|
creq->len = 0;
|
2015-10-18 16:23:40 +00:00
|
|
|
creq->algo_le = algo_le;
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
ctx->base.ops = &mv_cesa_ahash_req_ops;
|
|
|
|
|
|
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
sizeof(struct mv_cesa_ahash_req));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-09 09:03:17 +00:00
|
|
|
static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
|
2015-06-18 13:46:20 +00:00
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
2016-08-09 09:03:17 +00:00
|
|
|
bool cached = false;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
2020-03-13 11:47:05 +00:00
|
|
|
if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
|
|
|
|
!creq->last_req) {
|
2016-08-09 09:03:17 +00:00
|
|
|
cached = true;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
if (!req->nbytes)
|
2016-08-09 09:03:17 +00:00
|
|
|
return cached;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
sg_pcopy_to_buffer(req->src, creq->src_nents,
|
|
|
|
creq->cache + creq->cache_ptr,
|
|
|
|
req->nbytes, 0);
|
|
|
|
|
|
|
|
creq->cache_ptr += req->nbytes;
|
|
|
|
}
|
|
|
|
|
2016-08-09 09:03:17 +00:00
|
|
|
return cached;
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:21 +00:00
|
|
|
static struct mv_cesa_op_ctx *
|
2015-10-18 16:24:06 +00:00
|
|
|
mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
|
|
|
|
struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
|
|
|
|
gfp_t flags)
|
2015-06-18 13:46:21 +00:00
|
|
|
{
|
2015-10-18 16:24:06 +00:00
|
|
|
struct mv_cesa_op_ctx *op;
|
2015-06-18 13:46:21 +00:00
|
|
|
int ret;
|
|
|
|
|
2015-10-18 16:24:06 +00:00
|
|
|
op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
|
|
|
|
if (IS_ERR(op))
|
|
|
|
return op;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2015-10-18 16:24:06 +00:00
|
|
|
/* Set the operation block fragment length. */
|
|
|
|
mv_cesa_set_mac_op_frag_len(op, frag_len);
|
|
|
|
|
|
|
|
/* Append dummy desc to launch operation */
|
|
|
|
ret = mv_cesa_dma_add_dummy_launch(chain, flags);
|
2015-06-18 13:46:21 +00:00
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
2015-10-18 16:24:11 +00:00
|
|
|
if (mv_cesa_mac_op_is_first_frag(tmpl))
|
|
|
|
mv_cesa_update_op_cfg(tmpl,
|
|
|
|
CESA_SA_DESC_CFG_MID_FRAG,
|
|
|
|
CESA_SA_DESC_CFG_FRAG_MSK);
|
2015-06-18 13:46:21 +00:00
|
|
|
|
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
2015-10-18 16:24:16 +00:00
|
|
|
static int
|
2015-06-18 13:46:21 +00:00
|
|
|
mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
|
|
|
|
struct mv_cesa_ahash_req *creq,
|
|
|
|
gfp_t flags)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
|
crypto: marvell/cesa - fix memory leak
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.
Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.
As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-03-17 09:21:34 +00:00
|
|
|
int ret;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
|
|
|
if (!creq->cache_ptr)
|
2015-10-18 16:24:16 +00:00
|
|
|
return 0;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
crypto: marvell/cesa - fix memory leak
Crypto requests are not guaranteed to be finalized (->final() call),
and can be freed at any moment, without getting any notification from
the core. This can lead to memory leaks of the ->cache buffer.
Make this buffer part of the request object, and allocate an extra buffer
from the DMA cache pool when doing DMA operations.
As a side effect, this patch also fixes another bug related to cache
allocation and DMA operations. When the core allocates a new request and
import an existing state, a cache buffer can be allocated (depending
on the state). The problem is, at that very moment, we don't know yet
whether the request will use DMA or not, and since everything is
likely to be initialized to zero, mv_cesa_ahash_alloc_cache() thinks it
should allocate a buffer for standard operation. But when
mv_cesa_ahash_free_cache() is called, req->type has been set to
CESA_DMA_REQ in the meantime, thus leading to an invalind dma_pool_free()
call (the buffer passed in argument has not been allocated from the pool).
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Reported-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2016-03-17 09:21:34 +00:00
|
|
|
ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
|
|
|
|
|
2015-10-18 16:24:16 +00:00
|
|
|
return mv_cesa_dma_add_data_transfer(chain,
|
|
|
|
CESA_SA_DATA_SRAM_OFFSET,
|
|
|
|
ahashdreq->cache_dma,
|
|
|
|
creq->cache_ptr,
|
|
|
|
CESA_TDMA_DST_IN_SRAM,
|
|
|
|
flags);
|
2015-06-18 13:46:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mv_cesa_op_ctx *
|
|
|
|
mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
|
|
|
|
struct mv_cesa_ahash_dma_iter *dma_iter,
|
|
|
|
struct mv_cesa_ahash_req *creq,
|
2015-10-18 16:24:37 +00:00
|
|
|
unsigned int frag_len, gfp_t flags)
|
2015-06-18 13:46:21 +00:00
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
|
|
|
|
unsigned int len, trailerlen, padoff = 0;
|
2015-10-18 16:24:37 +00:00
|
|
|
struct mv_cesa_op_ctx *op;
|
2015-06-18 13:46:21 +00:00
|
|
|
int ret;
|
|
|
|
|
2015-10-18 16:24:42 +00:00
|
|
|
/*
|
|
|
|
* If the transfer is smaller than our maximum length, and we have
|
|
|
|
* some data outstanding, we can ask the engine to finish the hash.
|
|
|
|
*/
|
|
|
|
if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
|
2015-10-18 16:24:37 +00:00
|
|
|
op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
|
|
|
|
flags);
|
|
|
|
if (IS_ERR(op))
|
|
|
|
return op;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2015-10-18 16:24:42 +00:00
|
|
|
mv_cesa_set_mac_op_total_len(op, creq->len);
|
|
|
|
mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
|
|
|
|
CESA_SA_DESC_CFG_NOT_FRAG :
|
|
|
|
CESA_SA_DESC_CFG_LAST_FRAG,
|
|
|
|
CESA_SA_DESC_CFG_FRAG_MSK);
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2016-10-05 07:56:33 +00:00
|
|
|
ret = mv_cesa_dma_add_result_op(chain,
|
|
|
|
CESA_SA_CFG_SRAM_OFFSET,
|
|
|
|
CESA_SA_DATA_SRAM_OFFSET,
|
|
|
|
CESA_TDMA_SRC_IN_SRAM, flags);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2015-06-18 13:46:21 +00:00
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
2015-10-18 16:24:42 +00:00
|
|
|
/*
|
|
|
|
* The request is longer than the engine can handle, or we have
|
|
|
|
* no data outstanding. Manually generate the padding, adding it
|
|
|
|
* as a "mid" fragment.
|
|
|
|
*/
|
2015-06-18 13:46:21 +00:00
|
|
|
ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
|
|
|
|
|
2015-10-18 16:24:47 +00:00
|
|
|
len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
|
|
|
|
if (len) {
|
|
|
|
ret = mv_cesa_dma_add_data_transfer(chain,
|
2015-06-18 13:46:21 +00:00
|
|
|
CESA_SA_DATA_SRAM_OFFSET +
|
2015-10-18 16:24:47 +00:00
|
|
|
frag_len,
|
2015-06-18 13:46:21 +00:00
|
|
|
ahashdreq->padding_dma,
|
|
|
|
len, CESA_TDMA_DST_IN_SRAM,
|
|
|
|
flags);
|
2015-10-18 16:24:47 +00:00
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2015-10-18 16:24:47 +00:00
|
|
|
op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
|
|
|
|
flags);
|
|
|
|
if (IS_ERR(op))
|
|
|
|
return op;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2015-10-18 16:24:47 +00:00
|
|
|
if (len == trailerlen)
|
|
|
|
return op;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2015-10-18 16:24:47 +00:00
|
|
|
padoff += len;
|
|
|
|
}
|
2015-06-18 13:46:21 +00:00
|
|
|
|
|
|
|
ret = mv_cesa_dma_add_data_transfer(chain,
|
|
|
|
CESA_SA_DATA_SRAM_OFFSET,
|
|
|
|
ahashdreq->padding_dma +
|
|
|
|
padoff,
|
|
|
|
trailerlen - padoff,
|
|
|
|
CESA_TDMA_DST_IN_SRAM,
|
|
|
|
flags);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
2015-10-18 16:24:06 +00:00
|
|
|
return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
|
|
|
|
flags);
|
2015-06-18 13:46:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
|
|
|
GFP_KERNEL : GFP_ATOMIC;
|
2016-06-21 08:08:35 +00:00
|
|
|
struct mv_cesa_req *basereq = &creq->base;
|
2015-06-18 13:46:21 +00:00
|
|
|
struct mv_cesa_ahash_dma_iter iter;
|
|
|
|
struct mv_cesa_op_ctx *op = NULL;
|
2015-10-18 16:24:32 +00:00
|
|
|
unsigned int frag_len;
|
2016-12-14 14:15:07 +00:00
|
|
|
bool set_state = false;
|
2015-06-18 13:46:21 +00:00
|
|
|
int ret;
|
2016-10-05 07:56:33 +00:00
|
|
|
u32 type;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
basereq->chain.first = NULL;
|
|
|
|
basereq->chain.last = NULL;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2016-12-14 14:15:07 +00:00
|
|
|
if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
|
|
|
|
set_state = true;
|
|
|
|
|
2015-06-18 13:46:21 +00:00
|
|
|
if (creq->src_nents) {
|
|
|
|
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!ret) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
mv_cesa_tdma_desc_iter_init(&basereq->chain);
|
2015-06-18 13:46:21 +00:00
|
|
|
mv_cesa_ahash_req_iter_init(&iter, req);
|
|
|
|
|
2015-10-18 16:24:16 +00:00
|
|
|
/*
|
|
|
|
* Add the cache (left-over data from a previous block) first.
|
|
|
|
* This will never overflow the SRAM size.
|
|
|
|
*/
|
2016-08-09 09:03:15 +00:00
|
|
|
ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
|
2015-10-18 16:24:16 +00:00
|
|
|
if (ret)
|
2015-06-18 13:46:21 +00:00
|
|
|
goto err_free_tdma;
|
|
|
|
|
2015-10-18 16:24:21 +00:00
|
|
|
if (iter.src.sg) {
|
|
|
|
/*
|
|
|
|
* Add all the new data, inserting an operation block and
|
|
|
|
* launch command between each full SRAM block-worth of
|
2015-10-18 16:24:32 +00:00
|
|
|
* data. We intentionally do not add the final op block.
|
2015-10-18 16:24:21 +00:00
|
|
|
*/
|
2015-10-18 16:24:32 +00:00
|
|
|
while (true) {
|
2016-06-21 08:08:35 +00:00
|
|
|
ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
|
2015-10-18 16:24:57 +00:00
|
|
|
&iter.base,
|
2015-10-18 16:24:21 +00:00
|
|
|
&iter.src, flags);
|
|
|
|
if (ret)
|
|
|
|
goto err_free_tdma;
|
|
|
|
|
2015-10-18 16:24:32 +00:00
|
|
|
frag_len = iter.base.op_len;
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2015-10-18 16:24:32 +00:00
|
|
|
if (!mv_cesa_ahash_req_iter_next_op(&iter))
|
|
|
|
break;
|
|
|
|
|
2020-03-13 11:47:05 +00:00
|
|
|
op = mv_cesa_dma_add_frag(&basereq->chain,
|
|
|
|
&creq->op_tmpl,
|
2015-10-18 16:24:32 +00:00
|
|
|
frag_len, flags);
|
2015-10-18 16:24:21 +00:00
|
|
|
if (IS_ERR(op)) {
|
|
|
|
ret = PTR_ERR(op);
|
|
|
|
goto err_free_tdma;
|
|
|
|
}
|
2015-06-18 13:46:21 +00:00
|
|
|
}
|
2015-10-18 16:24:32 +00:00
|
|
|
} else {
|
2015-10-18 16:24:21 +00:00
|
|
|
/* Account for the data that was in the cache. */
|
2015-10-18 16:24:32 +00:00
|
|
|
frag_len = iter.base.op_len;
|
|
|
|
}
|
|
|
|
|
2015-10-18 16:24:37 +00:00
|
|
|
/*
|
|
|
|
* At this point, frag_len indicates whether we have any data
|
|
|
|
* outstanding which needs an operation. Queue up the final
|
|
|
|
* operation, which depends whether this is the final request.
|
|
|
|
*/
|
|
|
|
if (creq->last_req)
|
2016-06-21 08:08:35 +00:00
|
|
|
op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
|
2015-10-18 16:24:57 +00:00
|
|
|
frag_len, flags);
|
2015-10-18 16:24:37 +00:00
|
|
|
else if (frag_len)
|
2016-06-21 08:08:35 +00:00
|
|
|
op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
|
2015-10-18 16:24:57 +00:00
|
|
|
frag_len, flags);
|
2015-06-18 13:46:21 +00:00
|
|
|
|
|
|
|
if (IS_ERR(op)) {
|
|
|
|
ret = PTR_ERR(op);
|
|
|
|
goto err_free_tdma;
|
|
|
|
}
|
|
|
|
|
2016-10-05 07:56:33 +00:00
|
|
|
/*
|
|
|
|
* If results are copied via DMA, this means that this
|
|
|
|
* request can be directly processed by the engine,
|
|
|
|
* without partial updates. So we can chain it at the
|
|
|
|
* DMA level with other requests.
|
|
|
|
*/
|
|
|
|
type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
|
|
|
|
|
|
|
|
if (op && type != CESA_TDMA_RESULT) {
|
2015-06-18 13:46:21 +00:00
|
|
|
/* Add dummy desc to wait for crypto operation end */
|
2016-06-21 08:08:35 +00:00
|
|
|
ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
|
2015-06-18 13:46:21 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_free_tdma;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!creq->last_req)
|
|
|
|
creq->cache_ptr = req->nbytes + creq->cache_ptr -
|
|
|
|
iter.base.len;
|
|
|
|
else
|
|
|
|
creq->cache_ptr = 0;
|
|
|
|
|
2016-10-05 07:56:33 +00:00
|
|
|
basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
|
|
|
|
|
|
|
|
if (type != CESA_TDMA_RESULT)
|
|
|
|
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
|
2016-06-21 08:08:39 +00:00
|
|
|
|
2016-12-14 14:15:07 +00:00
|
|
|
if (set_state) {
|
|
|
|
/*
|
|
|
|
* Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
|
|
|
|
* let the step logic know that the IVDIG registers should be
|
|
|
|
* explicitly set before launching a TDMA chain.
|
|
|
|
*/
|
|
|
|
basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
|
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:21 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_free_tdma:
|
2016-06-21 08:08:35 +00:00
|
|
|
mv_cesa_dma_cleanup(basereq);
|
2015-06-18 13:46:21 +00:00
|
|
|
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
err:
|
|
|
|
mv_cesa_ahash_last_cleanup(req);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
2015-06-18 13:46:21 +00:00
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
|
2015-11-04 20:13:33 +00:00
|
|
|
if (creq->src_nents < 0) {
|
|
|
|
dev_err(cesa_dev->dev, "Invalid number of src SG");
|
|
|
|
return creq->src_nents;
|
|
|
|
}
|
2015-06-18 13:46:20 +00:00
|
|
|
|
2016-08-09 09:03:17 +00:00
|
|
|
*cached = mv_cesa_ahash_cache_req(req);
|
2015-06-18 13:46:21 +00:00
|
|
|
|
|
|
|
if (*cached)
|
|
|
|
return 0;
|
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
if (cesa_dev->caps->has_tdma)
|
2016-08-09 09:03:17 +00:00
|
|
|
return mv_cesa_ahash_dma_req_init(req);
|
|
|
|
else
|
|
|
|
return 0;
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
2016-06-21 08:08:38 +00:00
|
|
|
static int mv_cesa_ahash_queue_req(struct ahash_request *req)
|
2015-06-18 13:46:20 +00:00
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
2016-06-21 08:08:38 +00:00
|
|
|
struct mv_cesa_engine *engine;
|
2015-06-18 13:46:20 +00:00
|
|
|
bool cached = false;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahash_req_init(req, &cached);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (cached)
|
|
|
|
return 0;
|
|
|
|
|
2016-06-21 08:08:38 +00:00
|
|
|
engine = mv_cesa_select_engine(req->nbytes);
|
|
|
|
mv_cesa_ahash_prepare(&req->base, engine);
|
|
|
|
|
2016-06-21 08:08:35 +00:00
|
|
|
ret = mv_cesa_queue_req(&req->base, &creq->base);
|
2016-06-21 08:08:38 +00:00
|
|
|
|
2015-09-18 15:25:36 +00:00
|
|
|
if (mv_cesa_req_needs_cleanup(&req->base, ret))
|
2015-06-18 13:46:21 +00:00
|
|
|
mv_cesa_ahash_cleanup(req);
|
|
|
|
|
|
|
|
return ret;
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
2016-06-21 08:08:38 +00:00
|
|
|
static int mv_cesa_ahash_update(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
creq->len += req->nbytes;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_queue_req(req);
|
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
static int mv_cesa_ahash_final(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
|
|
|
|
|
|
|
|
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
|
|
|
|
creq->last_req = true;
|
|
|
|
req->nbytes = 0;
|
|
|
|
|
2016-06-21 08:08:38 +00:00
|
|
|
return mv_cesa_ahash_queue_req(req);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahash_finup(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
|
|
|
|
|
|
|
|
creq->len += req->nbytes;
|
|
|
|
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
|
|
|
|
creq->last_req = true;
|
|
|
|
|
2016-06-21 08:08:38 +00:00
|
|
|
return mv_cesa_ahash_queue_req(req);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
|
|
|
|
u64 *len, void *cache)
|
2015-06-18 13:46:24 +00:00
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
unsigned int digsize = crypto_ahash_digestsize(ahash);
|
2015-10-09 20:14:22 +00:00
|
|
|
unsigned int blocksize;
|
2015-06-18 13:46:24 +00:00
|
|
|
|
2015-10-18 16:23:30 +00:00
|
|
|
blocksize = crypto_ahash_blocksize(ahash);
|
2015-06-18 13:46:24 +00:00
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
*len = creq->len;
|
|
|
|
memcpy(hash, creq->state, digsize);
|
|
|
|
memset(cache, 0, blocksize);
|
2016-03-21 09:03:43 +00:00
|
|
|
memcpy(cache, creq->cache, creq->cache_ptr);
|
2015-06-18 13:46:24 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
|
|
|
|
u64 len, const void *cache)
|
2015-06-18 13:46:24 +00:00
|
|
|
{
|
|
|
|
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
|
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
|
|
|
unsigned int digsize = crypto_ahash_digestsize(ahash);
|
2015-10-09 20:14:22 +00:00
|
|
|
unsigned int blocksize;
|
2015-06-18 13:46:24 +00:00
|
|
|
unsigned int cache_ptr;
|
|
|
|
int ret;
|
|
|
|
|
2015-10-09 19:43:43 +00:00
|
|
|
ret = crypto_ahash_init(req);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-10-18 16:23:30 +00:00
|
|
|
blocksize = crypto_ahash_blocksize(ahash);
|
2015-10-09 20:14:22 +00:00
|
|
|
if (len >= blocksize)
|
2015-10-09 19:43:48 +00:00
|
|
|
mv_cesa_update_op_cfg(&creq->op_tmpl,
|
|
|
|
CESA_SA_DESC_CFG_MID_FRAG,
|
|
|
|
CESA_SA_DESC_CFG_FRAG_MSK);
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
creq->len = len;
|
|
|
|
memcpy(creq->state, hash, digsize);
|
2015-06-18 13:46:24 +00:00
|
|
|
creq->cache_ptr = 0;
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
cache_ptr = do_div(len, blocksize);
|
2015-06-18 13:46:24 +00:00
|
|
|
if (!cache_ptr)
|
|
|
|
return 0;
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
memcpy(creq->cache, cache, cache_ptr);
|
2015-06-18 13:46:24 +00:00
|
|
|
creq->cache_ptr = cache_ptr;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
static int mv_cesa_md5_init(struct ahash_request *req)
|
|
|
|
{
|
2016-03-17 09:21:35 +00:00
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
2015-10-18 16:23:51 +00:00
|
|
|
struct mv_cesa_op_ctx tmpl = { };
|
2015-10-09 20:14:22 +00:00
|
|
|
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
|
2016-08-09 09:03:19 +00:00
|
|
|
|
|
|
|
mv_cesa_ahash_init(req, &tmpl, true);
|
|
|
|
|
2016-03-17 09:21:35 +00:00
|
|
|
creq->state[0] = MD5_H0;
|
|
|
|
creq->state[1] = MD5_H1;
|
|
|
|
creq->state[2] = MD5_H2;
|
|
|
|
creq->state[3] = MD5_H3;
|
2015-10-09 20:14:22 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_md5_export(struct ahash_request *req, void *out)
|
|
|
|
{
|
|
|
|
struct md5_state *out_state = out;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_export(req, out_state->hash,
|
|
|
|
&out_state->byte_count, out_state->block);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
|
|
|
|
{
|
|
|
|
const struct md5_state *in_state = in;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
|
|
|
|
in_state->block);
|
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:24 +00:00
|
|
|
static int mv_cesa_md5_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mv_cesa_md5_init(req);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ahash_alg mv_md5_alg = {
|
|
|
|
.init = mv_cesa_md5_init,
|
|
|
|
.update = mv_cesa_ahash_update,
|
|
|
|
.final = mv_cesa_ahash_final,
|
|
|
|
.finup = mv_cesa_ahash_finup,
|
|
|
|
.digest = mv_cesa_md5_digest,
|
|
|
|
.export = mv_cesa_md5_export,
|
|
|
|
.import = mv_cesa_md5_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = MD5_DIGEST_SIZE,
|
2015-10-09 19:43:38 +00:00
|
|
|
.statesize = sizeof(struct md5_state),
|
2015-06-18 13:46:24 +00:00
|
|
|
.base = {
|
|
|
|
.cra_name = "md5",
|
|
|
|
.cra_driver_name = "mv-md5",
|
|
|
|
.cra_priority = 300,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
2020-07-10 06:20:41 +00:00
|
|
|
CRYPTO_ALG_ALLOCATES_MEMORY |
|
2015-06-18 13:46:24 +00:00
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
|
|
|
|
.cra_init = mv_cesa_ahash_cra_init,
|
|
|
|
.cra_module = THIS_MODULE,
|
2020-03-13 11:47:05 +00:00
|
|
|
}
|
2015-06-18 13:46:24 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
static int mv_cesa_sha1_init(struct ahash_request *req)
|
|
|
|
{
|
2016-03-17 09:21:35 +00:00
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
2015-10-18 16:23:51 +00:00
|
|
|
struct mv_cesa_op_ctx tmpl = { };
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
|
2016-08-09 09:03:19 +00:00
|
|
|
|
|
|
|
mv_cesa_ahash_init(req, &tmpl, false);
|
|
|
|
|
2016-03-17 09:21:35 +00:00
|
|
|
creq->state[0] = SHA1_H0;
|
|
|
|
creq->state[1] = SHA1_H1;
|
|
|
|
creq->state[2] = SHA1_H2;
|
|
|
|
creq->state[3] = SHA1_H3;
|
|
|
|
creq->state[4] = SHA1_H4;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
|
|
|
|
{
|
|
|
|
struct sha1_state *out_state = out;
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
|
|
|
|
out_state->buffer);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
|
|
|
|
{
|
|
|
|
const struct sha1_state *in_state = in;
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
return mv_cesa_ahash_import(req, in_state->state, in_state->count,
|
|
|
|
in_state->buffer);
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_sha1_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mv_cesa_sha1_init(req);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ahash_alg mv_sha1_alg = {
|
|
|
|
.init = mv_cesa_sha1_init,
|
|
|
|
.update = mv_cesa_ahash_update,
|
|
|
|
.final = mv_cesa_ahash_final,
|
|
|
|
.finup = mv_cesa_ahash_finup,
|
|
|
|
.digest = mv_cesa_sha1_digest,
|
|
|
|
.export = mv_cesa_sha1_export,
|
|
|
|
.import = mv_cesa_sha1_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA1_DIGEST_SIZE,
|
2015-10-09 19:43:38 +00:00
|
|
|
.statesize = sizeof(struct sha1_state),
|
2015-06-18 13:46:20 +00:00
|
|
|
.base = {
|
|
|
|
.cra_name = "sha1",
|
|
|
|
.cra_driver_name = "mv-sha1",
|
|
|
|
.cra_priority = 300,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
2020-07-10 06:20:41 +00:00
|
|
|
CRYPTO_ALG_ALLOCATES_MEMORY |
|
2015-06-18 13:46:20 +00:00
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA1_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
|
|
|
|
.cra_init = mv_cesa_ahash_cra_init,
|
|
|
|
.cra_module = THIS_MODULE,
|
2020-03-13 11:47:05 +00:00
|
|
|
}
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-06-18 13:46:25 +00:00
|
|
|
static int mv_cesa_sha256_init(struct ahash_request *req)
|
|
|
|
{
|
2016-03-17 09:21:35 +00:00
|
|
|
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
2015-10-18 16:23:51 +00:00
|
|
|
struct mv_cesa_op_ctx tmpl = { };
|
2015-06-18 13:46:25 +00:00
|
|
|
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
|
2016-08-09 09:03:19 +00:00
|
|
|
|
|
|
|
mv_cesa_ahash_init(req, &tmpl, false);
|
|
|
|
|
2016-03-17 09:21:35 +00:00
|
|
|
creq->state[0] = SHA256_H0;
|
|
|
|
creq->state[1] = SHA256_H1;
|
|
|
|
creq->state[2] = SHA256_H2;
|
|
|
|
creq->state[3] = SHA256_H3;
|
|
|
|
creq->state[4] = SHA256_H4;
|
|
|
|
creq->state[5] = SHA256_H5;
|
|
|
|
creq->state[6] = SHA256_H6;
|
|
|
|
creq->state[7] = SHA256_H7;
|
2015-06-18 13:46:25 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_sha256_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mv_cesa_sha256_init(req);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
|
|
|
|
{
|
|
|
|
struct sha256_state *out_state = out;
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
|
|
|
|
out_state->buf);
|
2015-06-18 13:46:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
|
|
|
|
{
|
|
|
|
const struct sha256_state *in_state = in;
|
|
|
|
|
2015-10-09 20:14:22 +00:00
|
|
|
return mv_cesa_ahash_import(req, in_state->state, in_state->count,
|
|
|
|
in_state->buf);
|
2015-06-18 13:46:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct ahash_alg mv_sha256_alg = {
|
|
|
|
.init = mv_cesa_sha256_init,
|
|
|
|
.update = mv_cesa_ahash_update,
|
|
|
|
.final = mv_cesa_ahash_final,
|
|
|
|
.finup = mv_cesa_ahash_finup,
|
|
|
|
.digest = mv_cesa_sha256_digest,
|
|
|
|
.export = mv_cesa_sha256_export,
|
|
|
|
.import = mv_cesa_sha256_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA256_DIGEST_SIZE,
|
2015-10-09 19:43:38 +00:00
|
|
|
.statesize = sizeof(struct sha256_state),
|
2015-06-18 13:46:25 +00:00
|
|
|
.base = {
|
|
|
|
.cra_name = "sha256",
|
|
|
|
.cra_driver_name = "mv-sha256",
|
|
|
|
.cra_priority = 300,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
2020-07-10 06:20:41 +00:00
|
|
|
CRYPTO_ALG_ALLOCATES_MEMORY |
|
2015-06-18 13:46:25 +00:00
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA256_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
|
|
|
|
.cra_init = mv_cesa_ahash_cra_init,
|
|
|
|
.cra_module = THIS_MODULE,
|
2020-03-13 11:47:05 +00:00
|
|
|
}
|
2015-06-18 13:46:25 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
|
|
|
|
void *state, unsigned int blocksize)
|
|
|
|
{
|
2023-01-24 09:11:11 +00:00
|
|
|
DECLARE_CRYPTO_WAIT(result);
|
2015-06-18 13:46:20 +00:00
|
|
|
struct scatterlist sg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
2023-01-24 09:11:11 +00:00
|
|
|
crypto_req_done, &result);
|
2015-06-18 13:46:20 +00:00
|
|
|
sg_init_one(&sg, pad, blocksize);
|
|
|
|
ahash_request_set_crypt(req, &sg, pad, blocksize);
|
|
|
|
|
|
|
|
ret = crypto_ahash_init(req);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = crypto_ahash_update(req);
|
2023-01-24 09:11:11 +00:00
|
|
|
ret = crypto_wait_req(ret, &result);
|
2015-06-18 13:46:20 +00:00
|
|
|
|
2023-01-24 09:11:11 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
ret = crypto_ahash_export(req, state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
|
|
|
|
const u8 *key, unsigned int keylen,
|
|
|
|
u8 *ipad, u8 *opad,
|
|
|
|
unsigned int blocksize)
|
|
|
|
{
|
2023-01-24 09:11:11 +00:00
|
|
|
DECLARE_CRYPTO_WAIT(result);
|
2015-06-18 13:46:20 +00:00
|
|
|
struct scatterlist sg;
|
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (keylen <= blocksize) {
|
|
|
|
memcpy(ipad, key, keylen);
|
|
|
|
} else {
|
|
|
|
u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!keydup)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
2023-01-24 09:11:11 +00:00
|
|
|
crypto_req_done, &result);
|
2015-06-18 13:46:20 +00:00
|
|
|
sg_init_one(&sg, keydup, keylen);
|
|
|
|
ahash_request_set_crypt(req, &sg, ipad, keylen);
|
|
|
|
|
|
|
|
ret = crypto_ahash_digest(req);
|
2023-01-24 09:11:11 +00:00
|
|
|
ret = crypto_wait_req(ret, &result);
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
/* Set the memory region to 0 to avoid any leak. */
|
2020-08-07 06:18:13 +00:00
|
|
|
kfree_sensitive(keydup);
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(ipad + keylen, 0, blocksize - keylen);
|
|
|
|
memcpy(opad, ipad, blocksize);
|
|
|
|
|
|
|
|
for (i = 0; i < blocksize; i++) {
|
2017-05-19 06:53:26 +00:00
|
|
|
ipad[i] ^= HMAC_IPAD_VALUE;
|
|
|
|
opad[i] ^= HMAC_OPAD_VALUE;
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
|
|
|
|
const u8 *key, unsigned int keylen,
|
|
|
|
void *istate, void *ostate)
|
|
|
|
{
|
|
|
|
struct ahash_request *req;
|
|
|
|
struct crypto_ahash *tfm;
|
|
|
|
unsigned int blocksize;
|
|
|
|
u8 *ipad = NULL;
|
|
|
|
u8 *opad;
|
|
|
|
int ret;
|
|
|
|
|
2018-06-30 22:16:16 +00:00
|
|
|
tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
|
2015-06-18 13:46:20 +00:00
|
|
|
if (IS_ERR(tfm))
|
|
|
|
return PTR_ERR(tfm);
|
|
|
|
|
|
|
|
req = ahash_request_alloc(tfm, GFP_KERNEL);
|
|
|
|
if (!req) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_ahash;
|
|
|
|
}
|
|
|
|
|
|
|
|
crypto_ahash_clear_flags(tfm, ~0);
|
|
|
|
|
|
|
|
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
|
|
|
|
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
ipad = kcalloc(2, blocksize, GFP_KERNEL);
|
2015-06-18 13:46:20 +00:00
|
|
|
if (!ipad) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_req;
|
|
|
|
}
|
|
|
|
|
|
|
|
opad = ipad + blocksize;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
|
|
|
|
if (ret)
|
|
|
|
goto free_ipad;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
|
|
|
|
if (ret)
|
|
|
|
goto free_ipad;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
|
|
|
|
|
|
|
|
free_ipad:
|
|
|
|
kfree(ipad);
|
|
|
|
free_req:
|
|
|
|
ahash_request_free(req);
|
|
|
|
free_ahash:
|
|
|
|
crypto_free_ahash(tfm);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
ctx->base.ops = &mv_cesa_ahash_req_ops;
|
|
|
|
|
|
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
sizeof(struct mv_cesa_ahash_req));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-18 13:46:24 +00:00
|
|
|
static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
2015-10-18 16:23:51 +00:00
|
|
|
struct mv_cesa_op_ctx tmpl = { };
|
2015-06-18 13:46:24 +00:00
|
|
|
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
|
|
|
|
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
|
|
|
|
|
2015-10-18 16:23:40 +00:00
|
|
|
mv_cesa_ahash_init(req, &tmpl, true);
|
2015-06-18 13:46:24 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
|
|
|
struct md5_state istate, ostate;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
|
2020-07-31 13:55:40 +00:00
|
|
|
ctx->iv[i] = cpu_to_be32(istate.hash[i]);
|
2015-06-18 13:46:24 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
|
2020-07-31 13:55:40 +00:00
|
|
|
ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
|
2015-06-18 13:46:24 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_md5_init(req);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ahash_alg mv_ahmac_md5_alg = {
|
|
|
|
.init = mv_cesa_ahmac_md5_init,
|
|
|
|
.update = mv_cesa_ahash_update,
|
|
|
|
.final = mv_cesa_ahash_final,
|
|
|
|
.finup = mv_cesa_ahash_finup,
|
|
|
|
.digest = mv_cesa_ahmac_md5_digest,
|
|
|
|
.setkey = mv_cesa_ahmac_md5_setkey,
|
|
|
|
.export = mv_cesa_md5_export,
|
|
|
|
.import = mv_cesa_md5_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = MD5_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct md5_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(md5)",
|
|
|
|
.cra_driver_name = "mv-hmac-md5",
|
|
|
|
.cra_priority = 300,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
2020-07-10 06:20:41 +00:00
|
|
|
CRYPTO_ALG_ALLOCATES_MEMORY |
|
2015-06-18 13:46:24 +00:00
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
|
|
|
|
.cra_init = mv_cesa_ahmac_cra_init,
|
|
|
|
.cra_module = THIS_MODULE,
|
2020-03-13 11:47:05 +00:00
|
|
|
}
|
2015-06-18 13:46:24 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-06-18 13:46:20 +00:00
|
|
|
static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
2015-10-18 16:23:51 +00:00
|
|
|
struct mv_cesa_op_ctx tmpl = { };
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
|
|
|
|
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
|
|
|
|
|
2015-10-18 16:23:40 +00:00
|
|
|
mv_cesa_ahash_init(req, &tmpl, false);
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
|
|
|
struct sha1_state istate, ostate;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
|
2020-07-31 13:55:40 +00:00
|
|
|
ctx->iv[i] = cpu_to_be32(istate.state[i]);
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
|
2020-07-31 13:55:40 +00:00
|
|
|
ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
|
2015-06-18 13:46:20 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_sha1_init(req);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ahash_alg mv_ahmac_sha1_alg = {
|
|
|
|
.init = mv_cesa_ahmac_sha1_init,
|
|
|
|
.update = mv_cesa_ahash_update,
|
|
|
|
.final = mv_cesa_ahash_final,
|
|
|
|
.finup = mv_cesa_ahash_finup,
|
|
|
|
.digest = mv_cesa_ahmac_sha1_digest,
|
|
|
|
.setkey = mv_cesa_ahmac_sha1_setkey,
|
|
|
|
.export = mv_cesa_sha1_export,
|
|
|
|
.import = mv_cesa_sha1_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA1_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct sha1_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha1)",
|
|
|
|
.cra_driver_name = "mv-hmac-sha1",
|
|
|
|
.cra_priority = 300,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
2020-07-10 06:20:41 +00:00
|
|
|
CRYPTO_ALG_ALLOCATES_MEMORY |
|
2015-06-18 13:46:20 +00:00
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA1_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
|
|
|
|
.cra_init = mv_cesa_ahmac_cra_init,
|
|
|
|
.cra_module = THIS_MODULE,
|
2020-03-13 11:47:05 +00:00
|
|
|
}
|
2015-06-18 13:46:20 +00:00
|
|
|
}
|
|
|
|
};
|
2015-06-18 13:46:25 +00:00
|
|
|
|
|
|
|
static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
|
|
|
|
struct sha256_state istate, ostate;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(istate.state); i++)
|
2020-07-31 13:55:40 +00:00
|
|
|
ctx->iv[i] = cpu_to_be32(istate.state[i]);
|
2015-06-18 13:46:25 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
|
2020-07-31 13:55:40 +00:00
|
|
|
ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
|
2015-06-18 13:46:25 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
|
2015-10-18 16:23:51 +00:00
|
|
|
struct mv_cesa_op_ctx tmpl = { };
|
2015-06-18 13:46:25 +00:00
|
|
|
|
|
|
|
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
|
|
|
|
memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
|
|
|
|
|
2015-10-18 16:23:40 +00:00
|
|
|
mv_cesa_ahash_init(req, &tmpl, false);
|
2015-06-18 13:46:25 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = mv_cesa_ahmac_sha256_init(req);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return mv_cesa_ahash_finup(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ahash_alg mv_ahmac_sha256_alg = {
|
|
|
|
.init = mv_cesa_ahmac_sha256_init,
|
|
|
|
.update = mv_cesa_ahash_update,
|
|
|
|
.final = mv_cesa_ahash_final,
|
|
|
|
.finup = mv_cesa_ahash_finup,
|
|
|
|
.digest = mv_cesa_ahmac_sha256_digest,
|
|
|
|
.setkey = mv_cesa_ahmac_sha256_setkey,
|
|
|
|
.export = mv_cesa_sha256_export,
|
|
|
|
.import = mv_cesa_sha256_import,
|
|
|
|
.halg = {
|
|
|
|
.digestsize = SHA256_DIGEST_SIZE,
|
|
|
|
.statesize = sizeof(struct sha256_state),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "hmac(sha256)",
|
|
|
|
.cra_driver_name = "mv-hmac-sha256",
|
|
|
|
.cra_priority = 300,
|
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC |
|
2020-07-10 06:20:41 +00:00
|
|
|
CRYPTO_ALG_ALLOCATES_MEMORY |
|
2015-06-18 13:46:25 +00:00
|
|
|
CRYPTO_ALG_KERN_DRIVER_ONLY,
|
|
|
|
.cra_blocksize = SHA256_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
|
|
|
|
.cra_init = mv_cesa_ahmac_cra_init,
|
|
|
|
.cra_module = THIS_MODULE,
|
2020-03-13 11:47:05 +00:00
|
|
|
}
|
2015-06-18 13:46:25 +00:00
|
|
|
}
|
|
|
|
};
|