Crypto/chcr: Calculate src and dst sg lengths separately for dma map

This patch calculates src and dst sg lengths separately for
dma mapping in case of aead operation.

This fixes a panic which occurs due to the accessing of a zero
length sg.
Panic:
[  138.173225] kernel BUG at drivers/iommu/intel-iommu.c:1184!

Signed-off-by: Ayush Sawal <ayush.sawal@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ayush Sawal 2020-06-10 02:54:31 +05:30 committed by David S. Miller
parent 934e36ec5e
commit fb90a1c85d

View File

@ -2590,11 +2590,22 @@ int chcr_aead_dma_map(struct device *dev,
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm);
int dst_size;
int src_len, dst_len;
dst_size = req->assoclen + req->cryptlen + (op_type ?
0 : authsize);
if (!req->cryptlen || !dst_size)
/* calculate and handle src and dst sg length separately
* for inplace and out-of place operations
*/
if (req->src == req->dst) {
src_len = req->assoclen + req->cryptlen + (op_type ?
0 : authsize);
dst_len = src_len;
} else {
src_len = req->assoclen + req->cryptlen;
dst_len = req->assoclen + req->cryptlen + (op_type ?
-authsize : authsize);
}
if (!req->cryptlen || !src_len || !dst_len)
return 0;
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
@ -2606,20 +2617,23 @@ int chcr_aead_dma_map(struct device *dev,
reqctx->b0_dma = 0;
if (req->src == req->dst) {
error = dma_map_sg(dev, req->src,
sg_nents_for_len(req->src, dst_size),
sg_nents_for_len(req->src, src_len),
DMA_BIDIRECTIONAL);
if (!error)
goto err;
} else {
error = dma_map_sg(dev, req->src, sg_nents(req->src),
error = dma_map_sg(dev, req->src,
sg_nents_for_len(req->src, src_len),
DMA_TO_DEVICE);
if (!error)
goto err;
error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
error = dma_map_sg(dev, req->dst,
sg_nents_for_len(req->dst, dst_len),
DMA_FROM_DEVICE);
if (!error) {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
dma_unmap_sg(dev, req->src,
sg_nents_for_len(req->src, src_len),
DMA_TO_DEVICE);
goto err;
}
}
@ -2637,24 +2651,37 @@ void chcr_aead_dma_unmap(struct device *dev,
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(tfm);
int dst_size;
int src_len, dst_len;
dst_size = req->assoclen + req->cryptlen + (op_type ?
0 : authsize);
if (!req->cryptlen || !dst_size)
/* calculate and handle src and dst sg length separately
* for inplace and out-of place operations
*/
if (req->src == req->dst) {
src_len = req->assoclen + req->cryptlen + (op_type ?
0 : authsize);
dst_len = src_len;
} else {
src_len = req->assoclen + req->cryptlen;
dst_len = req->assoclen + req->cryptlen + (op_type ?
-authsize : authsize);
}
if (!req->cryptlen || !src_len || !dst_len)
return;
dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL);
if (req->src == req->dst) {
dma_unmap_sg(dev, req->src,
sg_nents_for_len(req->src, dst_size),
sg_nents_for_len(req->src, src_len),
DMA_BIDIRECTIONAL);
} else {
dma_unmap_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_FROM_DEVICE);
dma_unmap_sg(dev, req->src,
sg_nents_for_len(req->src, src_len),
DMA_TO_DEVICE);
dma_unmap_sg(dev, req->dst,
sg_nents_for_len(req->dst, dst_len),
DMA_FROM_DEVICE);
}
}