forked from Minki/linux
330bdcfadc
AF_RXRPC has a keepalive message generator that generates a message for a
peer ~20s after the last transmission to that peer to keep firewall ports
open. The implementation is incorrect in the following ways:
(1) It mixes up ktime_t and time64_t types.
(2) It uses ktime_get_real(), the output of which may jump forward or
backward due to adjustments to the time of day.
(3) If the current time jumps forward too much or jumps backwards, the
generator function will crank the base of the time ring round one slot
at a time (ie. a 1s period) until it catches up, spewing out VERSION
packets as it goes.
Fix the problem by:
(1) Only using time64_t. There's no need for sub-second resolution.
(2) Use ktime_get_seconds() rather than ktime_get_real() so that time
isn't perceived to go backwards.
(3) Simplifying rxrpc_peer_keepalive_worker() by splitting it into two
parts:
(a) The "worker" function that manages the buckets and the timer.
(b) The "dispatch" function that takes the pending peers and
potentially transmits a keepalive packet before putting them back
in the ring into the slot appropriate to the revised last-Tx time.
(4) Taking everything that's pending out of the ring and splicing it into
a temporary collector list for processing.
In the case that there's been a significant jump forward, the ring
gets entirely emptied and then the time base can be warped forward
before the peers are processed.
The warping can't happen if the ring isn't empty because the slot a
peer is in is keepalive-time dependent, relative to the base time.
(5) Limit the number of iterations of the bucket array when scanning it.
(6) Set the timer to skip any empty slots as there's no point waking up if
there's nothing to do yet.
This can be triggered by an incoming call from a server after a reboot with
AF_RXRPC and AFS built into the kernel causing a peer record to be set up
before userspace is started. The system clock is then adjusted by
userspace, thereby potentially causing the keepalive generator to have a
meltdown - which leads to a message like:
watchdog: BUG: soft lockup - CPU#0 stuck for 23s! [kworker/0:1:23]
...
Workqueue: krxrpcd rxrpc_peer_keepalive_worker
EIP: lock_acquire+0x69/0x80
...
Call Trace:
? rxrpc_peer_keepalive_worker+0x5e/0x350
? _raw_spin_lock_bh+0x29/0x60
? rxrpc_peer_keepalive_worker+0x5e/0x350
? rxrpc_peer_keepalive_worker+0x5e/0x350
? __lock_acquire+0x3d3/0x870
? process_one_work+0x110/0x340
? process_one_work+0x166/0x340
? process_one_work+0x110/0x340
? worker_thread+0x39/0x3c0
? kthread+0xdb/0x110
? cancel_delayed_work+0x90/0x90
? kthread_stop+0x70/0x70
? ret_from_fork+0x19/0x24
Fixes: ace45bec6d
("rxrpc: Fix firewall route keepalive")
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
135 lines
3.8 KiB
C
135 lines
3.8 KiB
C
/* rxrpc network namespace handling.
|
|
*
|
|
* Copyright (C) 2017 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public Licence
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the Licence, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/proc_fs.h>
|
|
#include "ar-internal.h"
|
|
|
|
unsigned int rxrpc_net_id;
|
|
|
|
static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
|
|
{
|
|
struct rxrpc_net *rxnet =
|
|
container_of(timer, struct rxrpc_net, client_conn_reap_timer);
|
|
|
|
if (rxnet->live)
|
|
rxrpc_queue_work(&rxnet->client_conn_reaper);
|
|
}
|
|
|
|
static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
|
|
{
|
|
struct rxrpc_net *rxnet =
|
|
container_of(timer, struct rxrpc_net, service_conn_reap_timer);
|
|
|
|
if (rxnet->live)
|
|
rxrpc_queue_work(&rxnet->service_conn_reaper);
|
|
}
|
|
|
|
static void rxrpc_peer_keepalive_timeout(struct timer_list *timer)
|
|
{
|
|
struct rxrpc_net *rxnet =
|
|
container_of(timer, struct rxrpc_net, peer_keepalive_timer);
|
|
|
|
if (rxnet->live)
|
|
rxrpc_queue_work(&rxnet->peer_keepalive_work);
|
|
}
|
|
|
|
/*
|
|
* Initialise a per-network namespace record.
|
|
*/
|
|
static __net_init int rxrpc_init_net(struct net *net)
|
|
{
|
|
struct rxrpc_net *rxnet = rxrpc_net(net);
|
|
int ret, i;
|
|
|
|
rxnet->live = true;
|
|
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
|
|
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
|
|
|
|
INIT_LIST_HEAD(&rxnet->calls);
|
|
rwlock_init(&rxnet->call_lock);
|
|
atomic_set(&rxnet->nr_calls, 1);
|
|
|
|
atomic_set(&rxnet->nr_conns, 1);
|
|
INIT_LIST_HEAD(&rxnet->conn_proc_list);
|
|
INIT_LIST_HEAD(&rxnet->service_conns);
|
|
rwlock_init(&rxnet->conn_lock);
|
|
INIT_WORK(&rxnet->service_conn_reaper,
|
|
rxrpc_service_connection_reaper);
|
|
timer_setup(&rxnet->service_conn_reap_timer,
|
|
rxrpc_service_conn_reap_timeout, 0);
|
|
|
|
rxnet->nr_client_conns = 0;
|
|
rxnet->nr_active_client_conns = 0;
|
|
rxnet->kill_all_client_conns = false;
|
|
spin_lock_init(&rxnet->client_conn_cache_lock);
|
|
spin_lock_init(&rxnet->client_conn_discard_lock);
|
|
INIT_LIST_HEAD(&rxnet->waiting_client_conns);
|
|
INIT_LIST_HEAD(&rxnet->active_client_conns);
|
|
INIT_LIST_HEAD(&rxnet->idle_client_conns);
|
|
INIT_WORK(&rxnet->client_conn_reaper,
|
|
rxrpc_discard_expired_client_conns);
|
|
timer_setup(&rxnet->client_conn_reap_timer,
|
|
rxrpc_client_conn_reap_timeout, 0);
|
|
|
|
INIT_LIST_HEAD(&rxnet->local_endpoints);
|
|
mutex_init(&rxnet->local_mutex);
|
|
|
|
hash_init(rxnet->peer_hash);
|
|
spin_lock_init(&rxnet->peer_hash_lock);
|
|
for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
|
|
INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
|
|
INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
|
|
timer_setup(&rxnet->peer_keepalive_timer,
|
|
rxrpc_peer_keepalive_timeout, 0);
|
|
INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
|
|
rxnet->peer_keepalive_base = ktime_get_seconds();
|
|
|
|
ret = -ENOMEM;
|
|
rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
|
|
if (!rxnet->proc_net)
|
|
goto err_proc;
|
|
|
|
proc_create_net("calls", 0444, rxnet->proc_net, &rxrpc_call_seq_ops,
|
|
sizeof(struct seq_net_private));
|
|
proc_create_net("conns", 0444, rxnet->proc_net,
|
|
&rxrpc_connection_seq_ops,
|
|
sizeof(struct seq_net_private));
|
|
return 0;
|
|
|
|
err_proc:
|
|
rxnet->live = false;
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Clean up a per-network namespace record.
|
|
*/
|
|
static __net_exit void rxrpc_exit_net(struct net *net)
|
|
{
|
|
struct rxrpc_net *rxnet = rxrpc_net(net);
|
|
|
|
rxnet->live = false;
|
|
del_timer_sync(&rxnet->peer_keepalive_timer);
|
|
cancel_work_sync(&rxnet->peer_keepalive_work);
|
|
rxrpc_destroy_all_calls(rxnet);
|
|
rxrpc_destroy_all_connections(rxnet);
|
|
rxrpc_destroy_all_peers(rxnet);
|
|
rxrpc_destroy_all_locals(rxnet);
|
|
proc_remove(rxnet->proc_net);
|
|
}
|
|
|
|
struct pernet_operations rxrpc_net_ops = {
|
|
.init = rxrpc_init_net,
|
|
.exit = rxrpc_exit_net,
|
|
.id = &rxrpc_net_id,
|
|
.size = sizeof(struct rxrpc_net),
|
|
};
|