Merge branch 'net-tls-redo-the-RX-resync-locking'
Jakub Kicinski says: ==================== net/tls: redo the RX resync locking Take two of making sure we don't use a NULL netdev pointer for RX resync. This time using a bit and an open coded wait loop. v2: - fix build warning (DaveM). ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2b66552eb2
@ -209,6 +209,10 @@ struct tls_offload_context_tx {
|
|||||||
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
|
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
|
||||||
TLS_DRIVER_STATE_SIZE)
|
TLS_DRIVER_STATE_SIZE)
|
||||||
|
|
||||||
|
enum tls_context_flags {
|
||||||
|
TLS_RX_SYNC_RUNNING = 0,
|
||||||
|
};
|
||||||
|
|
||||||
struct cipher_context {
|
struct cipher_context {
|
||||||
char *iv;
|
char *iv;
|
||||||
char *rec_seq;
|
char *rec_seq;
|
||||||
|
@ -550,11 +550,23 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tls_device_resync_rx(struct tls_context *tls_ctx,
|
||||||
|
struct sock *sk, u32 seq, u64 rcd_sn)
|
||||||
|
{
|
||||||
|
struct net_device *netdev;
|
||||||
|
|
||||||
|
if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
|
||||||
|
return;
|
||||||
|
netdev = READ_ONCE(tls_ctx->netdev);
|
||||||
|
if (netdev)
|
||||||
|
netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
|
||||||
|
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
|
||||||
|
}
|
||||||
|
|
||||||
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
||||||
{
|
{
|
||||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||||
struct tls_offload_context_rx *rx_ctx;
|
struct tls_offload_context_rx *rx_ctx;
|
||||||
struct net_device *netdev;
|
|
||||||
u32 is_req_pending;
|
u32 is_req_pending;
|
||||||
s64 resync_req;
|
s64 resync_req;
|
||||||
u32 req_seq;
|
u32 req_seq;
|
||||||
@ -570,12 +582,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
|||||||
if (unlikely(is_req_pending) && req_seq == seq &&
|
if (unlikely(is_req_pending) && req_seq == seq &&
|
||||||
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
|
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
|
||||||
seq += TLS_HEADER_SIZE - 1;
|
seq += TLS_HEADER_SIZE - 1;
|
||||||
down_read(&device_offload_lock);
|
tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
|
||||||
netdev = tls_ctx->netdev;
|
|
||||||
if (netdev)
|
|
||||||
netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq,
|
|
||||||
rcd_sn);
|
|
||||||
up_read(&device_offload_lock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -977,7 +984,10 @@ static int tls_device_down(struct net_device *netdev)
|
|||||||
if (ctx->rx_conf == TLS_HW)
|
if (ctx->rx_conf == TLS_HW)
|
||||||
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
|
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
|
||||||
TLS_OFFLOAD_CTX_DIR_RX);
|
TLS_OFFLOAD_CTX_DIR_RX);
|
||||||
ctx->netdev = NULL;
|
WRITE_ONCE(ctx->netdev, NULL);
|
||||||
|
smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
|
||||||
|
while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
|
||||||
|
usleep_range(10, 200);
|
||||||
dev_put(netdev);
|
dev_put(netdev);
|
||||||
list_del_init(&ctx->list);
|
list_del_init(&ctx->list);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user