forked from Minki/linux
tls: rx: async: hold onto the input skb
Async crypto currently benefits from the fact that we decrypt in place. When we allow input and output to be different skbs we will have to hang onto the input while we move to the next record. Clone the inputs and keep them on a list. Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6ececdc513
commit
c618db2afe
@ -123,6 +123,7 @@ struct tls_sw_context_rx {
|
||||
atomic_t decrypt_pending;
|
||||
/* protect crypto_wait with decrypt_pending*/
|
||||
spinlock_t decrypt_compl_lock;
|
||||
struct sk_buff_head async_hold;
|
||||
struct wait_queue_head wq;
|
||||
};
|
||||
|
||||
|
@ -7,7 +7,7 @@ CFLAGS_trace.o := -I$(src)
|
||||
|
||||
obj-$(CONFIG_TLS) += tls.o
|
||||
|
||||
tls-y := tls_main.o tls_sw.o tls_proc.o trace.o
|
||||
tls-y := tls_main.o tls_sw.o tls_proc.o trace.o tls_strp.o
|
||||
|
||||
tls-$(CONFIG_TLS_TOE) += tls_toe.o
|
||||
tls-$(CONFIG_TLS_DEVICE) += tls_device.o tls_device_fallback.o
|
||||
|
@ -124,6 +124,9 @@ int tls_sw_fallback_init(struct sock *sk,
|
||||
struct tls_offload_context_tx *offload_ctx,
|
||||
struct tls_crypto_info *crypto_info);
|
||||
|
||||
int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb,
|
||||
struct sk_buff_head *dst);
|
||||
|
||||
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
|
||||
{
|
||||
struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb;
|
||||
|
17
net/tls/tls_strp.c
Normal file
17
net/tls/tls_strp.c
Normal file
@ -0,0 +1,17 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
#include "tls.h"
|
||||
|
||||
int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb,
|
||||
struct sk_buff_head *dst)
|
||||
{
|
||||
struct sk_buff *clone;
|
||||
|
||||
clone = skb_clone(skb, sk->sk_allocation);
|
||||
if (!clone)
|
||||
return -ENOMEM;
|
||||
__skb_queue_tail(dst, clone);
|
||||
return 0;
|
||||
}
|
@ -1535,8 +1535,13 @@ fallback_to_reg_recv:
|
||||
goto exit_free_pages;
|
||||
|
||||
darg->skb = tls_strp_msg(ctx);
|
||||
if (darg->async)
|
||||
return 0;
|
||||
|
||||
if (unlikely(darg->async)) {
|
||||
err = tls_strp_msg_hold(sk, skb, &ctx->async_hold);
|
||||
if (err)
|
||||
__skb_queue_tail(&ctx->async_hold, darg->skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (prot->tail_size)
|
||||
darg->tail = dctx->tail;
|
||||
@ -1998,14 +2003,16 @@ recv_end:
|
||||
reinit_completion(&ctx->async_wait.completion);
|
||||
pending = atomic_read(&ctx->decrypt_pending);
|
||||
spin_unlock_bh(&ctx->decrypt_compl_lock);
|
||||
if (pending) {
|
||||
ret = 0;
|
||||
if (pending)
|
||||
ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
|
||||
if (ret) {
|
||||
if (err >= 0 || err == -EINPROGRESS)
|
||||
err = ret;
|
||||
decrypted = 0;
|
||||
goto end;
|
||||
}
|
||||
__skb_queue_purge(&ctx->async_hold);
|
||||
|
||||
if (ret) {
|
||||
if (err >= 0 || err == -EINPROGRESS)
|
||||
err = ret;
|
||||
decrypted = 0;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Drain records from the rx_list & copy if required */
|
||||
@ -2440,6 +2447,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
||||
crypto_info = &ctx->crypto_recv.info;
|
||||
cctx = &ctx->rx;
|
||||
skb_queue_head_init(&sw_ctx_rx->rx_list);
|
||||
skb_queue_head_init(&sw_ctx_rx->async_hold);
|
||||
aead = &sw_ctx_rx->aead_recv;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user