2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2007-04-26 22:48:28 +00:00
|
|
|
/* /proc/net/ support for AF_RXRPC
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/af_rxrpc.h>
|
|
|
|
#include "ar-internal.h"
|
|
|
|
|
2016-06-27 09:32:02 +00:00
|
|
|
static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
|
|
|
|
[RXRPC_CONN_UNUSED] = "Unused ",
|
2022-10-19 08:45:43 +00:00
|
|
|
[RXRPC_CONN_CLIENT_UNSECURED] = "ClUnsec ",
|
2016-06-27 09:32:02 +00:00
|
|
|
[RXRPC_CONN_CLIENT] = "Client ",
|
2016-09-08 10:10:12 +00:00
|
|
|
[RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc",
|
2016-06-27 09:32:02 +00:00
|
|
|
[RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ",
|
|
|
|
[RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ",
|
|
|
|
[RXRPC_CONN_SERVICE] = "SvSecure",
|
2022-10-20 08:56:36 +00:00
|
|
|
[RXRPC_CONN_ABORTED] = "Aborted ",
|
2007-04-26 22:48:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* generate a list of extant and dead calls in /proc/net/rxrpc_calls
|
|
|
|
*/
|
|
|
|
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
|
2018-03-30 20:05:17 +00:00
|
|
|
__acquires(rcu)
|
2007-04-26 22:48:28 +00:00
|
|
|
{
|
2017-05-24 16:02:32 +00:00
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
2016-09-07 08:19:31 +00:00
|
|
|
rcu_read_lock();
|
2022-05-21 07:45:28 +00:00
|
|
|
return seq_list_start_head_rcu(&rxnet->calls, *_pos);
|
2007-04-26 22:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
2017-05-24 16:02:32 +00:00
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
2022-05-21 07:45:28 +00:00
|
|
|
return seq_list_next_rcu(v, &rxnet->calls, pos);
|
2007-04-26 22:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
|
2018-03-30 20:05:17 +00:00
|
|
|
__releases(rcu)
|
2007-04-26 22:48:28 +00:00
|
|
|
{
|
2016-09-07 08:19:31 +00:00
|
|
|
rcu_read_unlock();
|
2007-04-26 22:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
2016-08-24 13:31:43 +00:00
|
|
|
struct rxrpc_local *local;
|
2007-04-26 22:48:28 +00:00
|
|
|
struct rxrpc_call *call;
|
2017-05-24 16:02:32 +00:00
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
2022-10-27 10:25:55 +00:00
|
|
|
enum rxrpc_call_state state;
|
2022-03-31 22:55:08 +00:00
|
|
|
rxrpc_seq_t acks_hard_ack;
|
2016-09-13 07:49:05 +00:00
|
|
|
char lbuff[50], rbuff[50];
|
2024-01-30 21:37:16 +00:00
|
|
|
long timeout = 0;
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2017-05-24 16:02:32 +00:00
|
|
|
if (v == &rxnet->calls) {
|
2007-04-26 22:48:28 +00:00
|
|
|
seq_puts(seq,
|
2016-09-13 07:49:05 +00:00
|
|
|
"Proto Local "
|
|
|
|
" Remote "
|
2007-04-26 22:48:28 +00:00
|
|
|
" SvID ConnID CallID End Use State Abort "
|
2022-11-11 13:47:35 +00:00
|
|
|
" DebugId TxSeq TW RxSeq RW RxSerial CW RxTimo\n");
|
2007-04-26 22:48:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
call = list_entry(v, struct rxrpc_call, link);
|
|
|
|
|
2022-10-20 20:58:36 +00:00
|
|
|
local = call->local;
|
|
|
|
if (local)
|
|
|
|
sprintf(lbuff, "%pISpc", &local->srx.transport);
|
2016-06-17 10:07:55 +00:00
|
|
|
else
|
2022-10-20 20:58:36 +00:00
|
|
|
strcpy(lbuff, "no_local");
|
|
|
|
|
|
|
|
sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2022-10-27 10:25:55 +00:00
|
|
|
state = rxrpc_call_state(call);
|
2024-01-30 21:37:16 +00:00
|
|
|
if (state != RXRPC_CALL_SERVER_PREALLOC)
|
|
|
|
timeout = ktime_ms_delta(READ_ONCE(call->expect_rx_by), ktime_get_real());
|
2018-07-23 16:18:36 +00:00
|
|
|
|
2022-03-31 22:55:08 +00:00
|
|
|
acks_hard_ack = READ_ONCE(call->acks_hard_ack);
|
2007-04-26 22:48:28 +00:00
|
|
|
seq_printf(seq,
|
2016-09-13 07:49:05 +00:00
|
|
|
"UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
|
2022-11-11 13:47:35 +00:00
|
|
|
" %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n",
|
2007-04-26 22:48:28 +00:00
|
|
|
lbuff,
|
|
|
|
rbuff,
|
2022-10-20 20:58:36 +00:00
|
|
|
call->dest_srx.srx_service,
|
2016-03-04 15:53:46 +00:00
|
|
|
call->cid,
|
|
|
|
call->call_id,
|
2016-08-23 14:27:24 +00:00
|
|
|
rxrpc_is_service_call(call) ? "Svc" : "Clt",
|
2022-05-21 07:45:22 +00:00
|
|
|
refcount_read(&call->ref),
|
2022-10-27 10:25:55 +00:00
|
|
|
rxrpc_call_states[state],
|
2016-08-30 08:49:28 +00:00
|
|
|
call->abort_code,
|
2020-05-02 12:38:23 +00:00
|
|
|
call->debug_id,
|
2022-03-31 22:55:08 +00:00
|
|
|
acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
|
2022-10-17 10:44:22 +00:00
|
|
|
call->ackr_window, call->ackr_wtop - call->ackr_window,
|
2018-07-23 16:18:36 +00:00
|
|
|
call->rx_serial,
|
2022-11-11 13:47:35 +00:00
|
|
|
call->cong_cwnd,
|
2018-07-23 16:18:36 +00:00
|
|
|
timeout);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-10 17:42:55 +00:00
|
|
|
const struct seq_operations rxrpc_call_seq_ops = {
|
2007-04-26 22:48:28 +00:00
|
|
|
.start = rxrpc_call_seq_start,
|
|
|
|
.next = rxrpc_call_seq_next,
|
|
|
|
.stop = rxrpc_call_seq_stop,
|
|
|
|
.show = rxrpc_call_seq_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* generate a list of extant virtual connections in /proc/net/rxrpc_conns
|
|
|
|
*/
|
|
|
|
static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
|
2018-03-30 20:05:17 +00:00
|
|
|
__acquires(rxnet->conn_lock)
|
2007-04-26 22:48:28 +00:00
|
|
|
{
|
2017-05-24 16:02:32 +00:00
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
|
|
|
read_lock(&rxnet->conn_lock);
|
|
|
|
return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
|
2007-04-26 22:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
|
|
|
|
loff_t *pos)
|
|
|
|
{
|
2017-05-24 16:02:32 +00:00
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
|
|
|
return seq_list_next(v, &rxnet->conn_proc_list, pos);
|
2007-04-26 22:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
|
2018-03-30 20:05:17 +00:00
|
|
|
__releases(rxnet->conn_lock)
|
2007-04-26 22:48:28 +00:00
|
|
|
{
|
2017-05-24 16:02:32 +00:00
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
|
|
|
read_unlock(&rxnet->conn_lock);
|
2007-04-26 22:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct rxrpc_connection *conn;
|
2017-05-24 16:02:32 +00:00
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
2022-10-20 08:56:36 +00:00
|
|
|
const char *state;
|
2016-09-13 07:49:05 +00:00
|
|
|
char lbuff[50], rbuff[50];
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2017-05-24 16:02:32 +00:00
|
|
|
if (v == &rxnet->conn_proc_list) {
|
2007-04-26 22:48:28 +00:00
|
|
|
seq_puts(seq,
|
2016-09-13 07:49:05 +00:00
|
|
|
"Proto Local "
|
|
|
|
" Remote "
|
2022-11-25 12:43:50 +00:00
|
|
|
" SvID ConnID End Ref Act State Key "
|
rxrpc: Rewrite the client connection manager
Rewrite the rxrpc client connection manager so that it can support multiple
connections for a given security key to a peer. The following changes are
made:
(1) For each open socket, the code currently maintains an rbtree with the
connections placed into it, keyed by communications parameters. This
is tricky to maintain as connections can be culled from the tree or
replaced within it. Connections can require replacement for a number
of reasons, e.g. their IDs span too great a range for the IDR data
type to represent efficiently, the call ID numbers on that conn would
overflow or the conn got aborted.
This is changed so that there's now a connection bundle object placed
in the tree, keyed on the same parameters. The bundle, however, does
not need to be replaced.
(2) An rxrpc_bundle object can now manage the available channels for a set
of parallel connections. The lock that manages this is moved there
from the rxrpc_connection struct (channel_lock).
(3) There'a a dummy bundle for all incoming connections to share so that
they have a channel_lock too. It might be better to give each
incoming connection its own bundle. This bundle is not needed to
manage which channels incoming calls are made on because that's the
solely at whim of the client.
(4) The restrictions on how many client connections are around are
removed. Instead, a previous patch limits the number of client calls
that can be allocated. Ordinarily, client connections are reaped
after 2 minutes on the idle queue, but when more than a certain number
of connections are in existence, the reaper starts reaping them after
2s of idleness instead to get the numbers back down.
It could also be made such that new call allocations are forced to
wait until the number of outstanding connections subsides.
Signed-off-by: David Howells <dhowells@redhat.com>
2020-07-01 10:15:32 +00:00
|
|
|
" Serial ISerial CallId0 CallId1 CallId2 CallId3\n"
|
2007-04-26 22:48:28 +00:00
|
|
|
);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-24 06:30:52 +00:00
|
|
|
conn = list_entry(v, struct rxrpc_connection, proc_link);
|
2016-09-08 10:10:12 +00:00
|
|
|
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
|
|
|
|
strcpy(lbuff, "no_local");
|
|
|
|
strcpy(rbuff, "no_connection");
|
|
|
|
goto print;
|
|
|
|
}
|
2007-04-26 22:48:28 +00:00
|
|
|
|
2022-10-19 12:49:02 +00:00
|
|
|
sprintf(lbuff, "%pISpc", &conn->local->srx.transport);
|
|
|
|
sprintf(rbuff, "%pISpc", &conn->peer->srx.transport);
|
2016-09-08 10:10:12 +00:00
|
|
|
print:
|
2022-10-20 08:56:36 +00:00
|
|
|
state = rxrpc_is_conn_aborted(conn) ?
|
|
|
|
rxrpc_call_completions[conn->completion] :
|
|
|
|
rxrpc_conn_states[conn->state];
|
2007-04-26 22:48:28 +00:00
|
|
|
seq_printf(seq,
|
2022-11-25 12:43:50 +00:00
|
|
|
"UDP %-47.47s %-47.47s %4x %08x %s %3u %3d"
|
2018-07-23 16:18:36 +00:00
|
|
|
" %s %08x %08x %08x %08x %08x %08x %08x\n",
|
2007-04-26 22:48:28 +00:00
|
|
|
lbuff,
|
|
|
|
rbuff,
|
2017-06-05 13:30:49 +00:00
|
|
|
conn->service_id,
|
2016-04-04 13:00:36 +00:00
|
|
|
conn->proto.cid,
|
|
|
|
rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
|
2022-05-21 07:45:22 +00:00
|
|
|
refcount_read(&conn->ref),
|
2022-11-25 12:43:50 +00:00
|
|
|
atomic_read(&conn->active),
|
2022-10-20 08:56:36 +00:00
|
|
|
state,
|
2022-10-19 12:49:02 +00:00
|
|
|
key_serial(conn->key),
|
2024-02-02 15:19:13 +00:00
|
|
|
conn->tx_serial,
|
2018-07-23 16:18:36 +00:00
|
|
|
conn->hi_serial,
|
|
|
|
conn->channels[0].call_id,
|
|
|
|
conn->channels[1].call_id,
|
|
|
|
conn->channels[2].call_id,
|
|
|
|
conn->channels[3].call_id);
|
2007-04-26 22:48:28 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-10 17:42:55 +00:00
|
|
|
const struct seq_operations rxrpc_connection_seq_ops = {
|
2007-04-26 22:48:28 +00:00
|
|
|
.start = rxrpc_connection_seq_start,
|
|
|
|
.next = rxrpc_connection_seq_next,
|
|
|
|
.stop = rxrpc_connection_seq_stop,
|
|
|
|
.show = rxrpc_connection_seq_show,
|
|
|
|
};
|
2018-10-15 10:31:03 +00:00
|
|
|
|
2023-10-26 21:53:02 +00:00
|
|
|
/*
|
|
|
|
* generate a list of extant virtual bundles in /proc/net/rxrpc/bundles
|
|
|
|
*/
|
|
|
|
static void *rxrpc_bundle_seq_start(struct seq_file *seq, loff_t *_pos)
|
|
|
|
__acquires(rxnet->conn_lock)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
|
|
|
read_lock(&rxnet->conn_lock);
|
|
|
|
return seq_list_start_head(&rxnet->bundle_proc_list, *_pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *rxrpc_bundle_seq_next(struct seq_file *seq, void *v,
|
|
|
|
loff_t *pos)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
|
|
|
return seq_list_next(v, &rxnet->bundle_proc_list, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rxrpc_bundle_seq_stop(struct seq_file *seq, void *v)
|
|
|
|
__releases(rxnet->conn_lock)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
|
|
|
read_unlock(&rxnet->conn_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rxrpc_bundle_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct rxrpc_bundle *bundle;
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
char lbuff[50], rbuff[50];
|
|
|
|
|
|
|
|
if (v == &rxnet->bundle_proc_list) {
|
|
|
|
seq_puts(seq,
|
|
|
|
"Proto Local "
|
|
|
|
" Remote "
|
|
|
|
" SvID Ref Act Flg Key |"
|
|
|
|
" Bundle Conn_0 Conn_1 Conn_2 Conn_3\n"
|
|
|
|
);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
bundle = list_entry(v, struct rxrpc_bundle, proc_link);
|
|
|
|
|
|
|
|
sprintf(lbuff, "%pISpc", &bundle->local->srx.transport);
|
|
|
|
sprintf(rbuff, "%pISpc", &bundle->peer->srx.transport);
|
|
|
|
seq_printf(seq,
|
|
|
|
"UDP %-47.47s %-47.47s %4x %3u %3d"
|
|
|
|
" %c%c%c %08x | %08x %08x %08x %08x %08x\n",
|
|
|
|
lbuff,
|
|
|
|
rbuff,
|
|
|
|
bundle->service_id,
|
|
|
|
refcount_read(&bundle->ref),
|
|
|
|
atomic_read(&bundle->active),
|
|
|
|
bundle->try_upgrade ? 'U' : '-',
|
|
|
|
bundle->exclusive ? 'e' : '-',
|
|
|
|
bundle->upgrade ? 'u' : '-',
|
|
|
|
key_serial(bundle->key),
|
|
|
|
bundle->debug_id,
|
|
|
|
bundle->conn_ids[0],
|
|
|
|
bundle->conn_ids[1],
|
|
|
|
bundle->conn_ids[2],
|
|
|
|
bundle->conn_ids[3]);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct seq_operations rxrpc_bundle_seq_ops = {
|
|
|
|
.start = rxrpc_bundle_seq_start,
|
|
|
|
.next = rxrpc_bundle_seq_next,
|
|
|
|
.stop = rxrpc_bundle_seq_stop,
|
|
|
|
.show = rxrpc_bundle_seq_show,
|
|
|
|
};
|
|
|
|
|
2018-10-15 10:31:03 +00:00
|
|
|
/*
|
|
|
|
* generate a list of extant virtual peers in /proc/net/rxrpc/peers
|
|
|
|
*/
|
|
|
|
static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct rxrpc_peer *peer;
|
|
|
|
time64_t now;
|
|
|
|
char lbuff[50], rbuff[50];
|
|
|
|
|
|
|
|
if (v == SEQ_START_TOKEN) {
|
|
|
|
seq_puts(seq,
|
|
|
|
"Proto Local "
|
|
|
|
" Remote "
|
2022-10-03 17:49:11 +00:00
|
|
|
" Use SST MTU LastUse RTT RTO\n"
|
2018-10-15 10:31:03 +00:00
|
|
|
);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
peer = list_entry(v, struct rxrpc_peer, hash_link);
|
|
|
|
|
|
|
|
sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
|
|
|
|
|
|
|
|
sprintf(rbuff, "%pISpc", &peer->srx.transport);
|
|
|
|
|
|
|
|
now = ktime_get_seconds();
|
|
|
|
seq_printf(seq,
|
|
|
|
"UDP %-47.47s %-47.47s %3u"
|
2020-05-11 13:54:34 +00:00
|
|
|
" %3u %5u %6llus %8u %8u\n",
|
2018-10-15 10:31:03 +00:00
|
|
|
lbuff,
|
|
|
|
rbuff,
|
2022-05-21 07:45:22 +00:00
|
|
|
refcount_read(&peer->ref),
|
2022-10-03 17:49:11 +00:00
|
|
|
peer->cong_ssthresh,
|
2018-10-15 10:31:03 +00:00
|
|
|
peer->mtu,
|
|
|
|
now - peer->last_tx_at,
|
2020-05-11 13:54:34 +00:00
|
|
|
peer->srtt_us >> 3,
|
2024-01-30 21:37:16 +00:00
|
|
|
peer->rto_us);
|
2018-10-15 10:31:03 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
|
|
|
|
__acquires(rcu)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
unsigned int bucket, n;
|
|
|
|
unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (*_pos >= UINT_MAX)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
n = *_pos & ((1U << shift) - 1);
|
|
|
|
bucket = *_pos >> shift;
|
|
|
|
for (;;) {
|
|
|
|
if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
|
|
|
|
*_pos = UINT_MAX;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (n == 0) {
|
|
|
|
if (bucket == 0)
|
|
|
|
return SEQ_START_TOKEN;
|
|
|
|
*_pos += 1;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
|
|
|
|
if (p)
|
|
|
|
return p;
|
|
|
|
bucket++;
|
|
|
|
n = 1;
|
|
|
|
*_pos = (bucket << shift) | n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
unsigned int bucket, n;
|
|
|
|
unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
if (*_pos >= UINT_MAX)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
bucket = *_pos >> shift;
|
|
|
|
|
|
|
|
p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
|
|
|
|
if (p)
|
|
|
|
return p;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
bucket++;
|
|
|
|
n = 1;
|
|
|
|
*_pos = (bucket << shift) | n;
|
|
|
|
|
|
|
|
if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
|
|
|
|
*_pos = UINT_MAX;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (n == 0) {
|
|
|
|
*_pos += 1;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
|
|
|
|
if (p)
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
|
|
|
|
__releases(rcu)
|
|
|
|
{
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const struct seq_operations rxrpc_peer_seq_ops = {
|
|
|
|
.start = rxrpc_peer_seq_start,
|
|
|
|
.next = rxrpc_peer_seq_next,
|
|
|
|
.stop = rxrpc_peer_seq_stop,
|
|
|
|
.show = rxrpc_peer_seq_show,
|
|
|
|
};
|
2022-05-21 07:45:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
|
|
|
|
*/
|
|
|
|
static int rxrpc_local_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct rxrpc_local *local;
|
|
|
|
char lbuff[50];
|
|
|
|
|
|
|
|
if (v == SEQ_START_TOKEN) {
|
|
|
|
seq_puts(seq,
|
|
|
|
"Proto Local "
|
2022-10-10 07:45:20 +00:00
|
|
|
" Use Act RxQ\n");
|
2022-05-21 07:45:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
local = hlist_entry(v, struct rxrpc_local, link);
|
|
|
|
|
|
|
|
sprintf(lbuff, "%pISpc", &local->srx.transport);
|
|
|
|
|
|
|
|
seq_printf(seq,
|
2022-10-10 07:45:20 +00:00
|
|
|
"UDP %-47.47s %3u %3u %3u\n",
|
2022-05-21 07:45:15 +00:00
|
|
|
lbuff,
|
2022-05-21 07:45:22 +00:00
|
|
|
refcount_read(&local->ref),
|
2022-10-10 07:45:20 +00:00
|
|
|
atomic_read(&local->active_users),
|
|
|
|
local->rx_queue.qlen);
|
2022-05-21 07:45:15 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos)
|
|
|
|
__acquires(rcu)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
unsigned int n;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
if (*_pos >= UINT_MAX)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
n = *_pos;
|
|
|
|
if (n == 0)
|
|
|
|
return SEQ_START_TOKEN;
|
|
|
|
|
|
|
|
return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
|
|
|
|
|
|
|
|
if (*_pos >= UINT_MAX)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rxrpc_local_seq_stop(struct seq_file *seq, void *v)
|
|
|
|
__releases(rcu)
|
|
|
|
{
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct seq_operations rxrpc_local_seq_ops = {
|
|
|
|
.start = rxrpc_local_seq_start,
|
|
|
|
.next = rxrpc_local_seq_next,
|
|
|
|
.stop = rxrpc_local_seq_stop,
|
|
|
|
.show = rxrpc_local_seq_show,
|
|
|
|
};
|
2022-05-11 13:01:25 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Display stats in /proc/net/rxrpc/stats
|
|
|
|
*/
|
|
|
|
int rxrpc_stats_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(seq));
|
|
|
|
|
|
|
|
seq_printf(seq,
|
2022-11-11 13:47:35 +00:00
|
|
|
"Data : send=%u sendf=%u fail=%u\n",
|
2022-05-11 13:01:25 +00:00
|
|
|
atomic_read(&rxnet->stat_tx_data_send),
|
2022-11-11 13:47:35 +00:00
|
|
|
atomic_read(&rxnet->stat_tx_data_send_frag),
|
|
|
|
atomic_read(&rxnet->stat_tx_data_send_fail));
|
2022-05-11 13:01:25 +00:00
|
|
|
seq_printf(seq,
|
2022-11-11 13:47:35 +00:00
|
|
|
"Data-Tx : nr=%u retrans=%u uf=%u cwr=%u\n",
|
2022-05-11 13:01:25 +00:00
|
|
|
atomic_read(&rxnet->stat_tx_data),
|
2022-11-11 13:47:35 +00:00
|
|
|
atomic_read(&rxnet->stat_tx_data_retrans),
|
|
|
|
atomic_read(&rxnet->stat_tx_data_underflow),
|
|
|
|
atomic_read(&rxnet->stat_tx_data_cwnd_reset));
|
2022-05-11 13:01:25 +00:00
|
|
|
seq_printf(seq,
|
|
|
|
"Data-Rx : nr=%u reqack=%u jumbo=%u\n",
|
|
|
|
atomic_read(&rxnet->stat_rx_data),
|
|
|
|
atomic_read(&rxnet->stat_rx_data_reqack),
|
|
|
|
atomic_read(&rxnet->stat_rx_data_jumbo));
|
2022-05-11 13:01:25 +00:00
|
|
|
seq_printf(seq,
|
|
|
|
"Ack : fill=%u send=%u skip=%u\n",
|
|
|
|
atomic_read(&rxnet->stat_tx_ack_fill),
|
|
|
|
atomic_read(&rxnet->stat_tx_ack_send),
|
|
|
|
atomic_read(&rxnet->stat_tx_ack_skip));
|
|
|
|
seq_printf(seq,
|
|
|
|
"Ack-Tx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_REQUESTED]),
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DUPLICATE]),
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_EXCEEDS_WINDOW]),
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_NOSPACE]),
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING]),
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING_RESPONSE]),
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DELAY]),
|
|
|
|
atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_IDLE]));
|
|
|
|
seq_printf(seq,
|
|
|
|
"Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_REQUESTED]),
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DUPLICATE]),
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_EXCEEDS_WINDOW]),
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_NOSPACE]),
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING]),
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING_RESPONSE]),
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DELAY]),
|
|
|
|
atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE]));
|
2022-08-18 10:52:36 +00:00
|
|
|
seq_printf(seq,
|
|
|
|
"Why-Req-A: acklost=%u already=%u mrtt=%u ortt=%u\n",
|
|
|
|
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]),
|
|
|
|
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_already_on]),
|
|
|
|
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]),
|
|
|
|
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt]));
|
|
|
|
seq_printf(seq,
|
|
|
|
"Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n",
|
|
|
|
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]),
|
|
|
|
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_retrans]),
|
|
|
|
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_slow_start]),
|
|
|
|
atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_small_txwin]));
|
2022-05-11 13:01:25 +00:00
|
|
|
seq_printf(seq,
|
2022-03-31 22:55:08 +00:00
|
|
|
"Buffers : txb=%u rxb=%u\n",
|
2022-04-05 20:16:32 +00:00
|
|
|
atomic_read(&rxrpc_nr_txbuf),
|
2022-05-11 13:01:25 +00:00
|
|
|
atomic_read(&rxrpc_n_rx_skbs));
|
2022-10-10 07:45:20 +00:00
|
|
|
seq_printf(seq,
|
|
|
|
"IO-thread: loops=%u\n",
|
|
|
|
atomic_read(&rxnet->stat_io_loop));
|
2022-05-11 13:01:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear stats if /proc/net/rxrpc/stats is written to.
|
|
|
|
*/
|
|
|
|
int rxrpc_stats_clear(struct file *file, char *buf, size_t size)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(m));
|
|
|
|
|
|
|
|
if (size > 1 || (size == 1 && buf[0] != '\n'))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
atomic_set(&rxnet->stat_tx_data, 0);
|
|
|
|
atomic_set(&rxnet->stat_tx_data_retrans, 0);
|
2022-11-11 13:47:35 +00:00
|
|
|
atomic_set(&rxnet->stat_tx_data_underflow, 0);
|
|
|
|
atomic_set(&rxnet->stat_tx_data_cwnd_reset, 0);
|
2022-05-11 13:01:25 +00:00
|
|
|
atomic_set(&rxnet->stat_tx_data_send, 0);
|
|
|
|
atomic_set(&rxnet->stat_tx_data_send_frag, 0);
|
2022-11-11 13:47:35 +00:00
|
|
|
atomic_set(&rxnet->stat_tx_data_send_fail, 0);
|
2022-05-11 13:01:25 +00:00
|
|
|
atomic_set(&rxnet->stat_rx_data, 0);
|
|
|
|
atomic_set(&rxnet->stat_rx_data_reqack, 0);
|
|
|
|
atomic_set(&rxnet->stat_rx_data_jumbo, 0);
|
2022-05-11 13:01:25 +00:00
|
|
|
|
|
|
|
atomic_set(&rxnet->stat_tx_ack_fill, 0);
|
|
|
|
atomic_set(&rxnet->stat_tx_ack_send, 0);
|
|
|
|
atomic_set(&rxnet->stat_tx_ack_skip, 0);
|
|
|
|
memset(&rxnet->stat_tx_acks, 0, sizeof(rxnet->stat_tx_acks));
|
|
|
|
memset(&rxnet->stat_rx_acks, 0, sizeof(rxnet->stat_rx_acks));
|
2022-08-18 10:52:36 +00:00
|
|
|
|
|
|
|
memset(&rxnet->stat_why_req_ack, 0, sizeof(rxnet->stat_why_req_ack));
|
2022-10-10 07:45:20 +00:00
|
|
|
|
|
|
|
atomic_set(&rxnet->stat_io_loop, 0);
|
2022-05-11 13:01:25 +00:00
|
|
|
return size;
|
|
|
|
}
|