net: nexthop: Expose nexthop group stats to user space

Add netlink support for reading NH group stats.

This data is only for statistics of the traffic in the SW datapath. HW
nexthop group statistics will be added in the following patches.

Emission of the stats is keyed to a new op_stats flag to avoid cluttering
the netlink message with stats if the user doesn't need them:
NHA_OP_FLAG_DUMP_STATS.

Co-developed-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ido Schimmel 2024-03-06 13:49:18 +01:00 committed by David S. Miller
parent f4676ea74b
commit 95fedd7685
2 changed files with 117 additions and 8 deletions

View File

@ -30,6 +30,8 @@ enum {
#define NEXTHOP_GRP_TYPE_MAX (__NEXTHOP_GRP_TYPE_MAX - 1) #define NEXTHOP_GRP_TYPE_MAX (__NEXTHOP_GRP_TYPE_MAX - 1)
#define NHA_OP_FLAG_DUMP_STATS BIT(0)
enum { enum {
NHA_UNSPEC, NHA_UNSPEC,
NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */ NHA_ID, /* u32; id for nexthop. id == 0 means auto-assign */
@ -63,6 +65,9 @@ enum {
/* u32; operation-specific flags */ /* u32; operation-specific flags */
NHA_OP_FLAGS, NHA_OP_FLAGS,
/* nested; nexthop group stats */
NHA_GROUP_STATS,
__NHA_MAX, __NHA_MAX,
}; };
@ -104,4 +109,29 @@ enum {
#define NHA_RES_BUCKET_MAX (__NHA_RES_BUCKET_MAX - 1) #define NHA_RES_BUCKET_MAX (__NHA_RES_BUCKET_MAX - 1)
enum {
NHA_GROUP_STATS_UNSPEC,
/* nested; nexthop group entry stats */
NHA_GROUP_STATS_ENTRY,
__NHA_GROUP_STATS_MAX,
};
#define NHA_GROUP_STATS_MAX (__NHA_GROUP_STATS_MAX - 1)
enum {
NHA_GROUP_STATS_ENTRY_UNSPEC,
/* u32; nexthop id of the nexthop group entry */
NHA_GROUP_STATS_ENTRY_ID,
/* uint; number of packets forwarded via the nexthop group entry */
NHA_GROUP_STATS_ENTRY_PACKETS,
__NHA_GROUP_STATS_ENTRY_MAX,
};
#define NHA_GROUP_STATS_ENTRY_MAX (__NHA_GROUP_STATS_ENTRY_MAX - 1)
#endif #endif

View File

@ -26,6 +26,8 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
#define NH_DEV_HASHBITS 8 #define NH_DEV_HASHBITS 8
#define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS) #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
#define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS)
static const struct nla_policy rtm_nh_policy_new[] = { static const struct nla_policy rtm_nh_policy_new[] = {
[NHA_ID] = { .type = NLA_U32 }, [NHA_ID] = { .type = NLA_U32 },
[NHA_GROUP] = { .type = NLA_BINARY }, [NHA_GROUP] = { .type = NLA_BINARY },
@ -41,7 +43,8 @@ static const struct nla_policy rtm_nh_policy_new[] = {
static const struct nla_policy rtm_nh_policy_get[] = { static const struct nla_policy rtm_nh_policy_get[] = {
[NHA_ID] = { .type = NLA_U32 }, [NHA_ID] = { .type = NLA_U32 },
[NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, 0), [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
NHA_OP_FLAGS_DUMP_ALL),
}; };
static const struct nla_policy rtm_nh_policy_del[] = { static const struct nla_policy rtm_nh_policy_del[] = {
@ -53,7 +56,8 @@ static const struct nla_policy rtm_nh_policy_dump[] = {
[NHA_GROUPS] = { .type = NLA_FLAG }, [NHA_GROUPS] = { .type = NLA_FLAG },
[NHA_MASTER] = { .type = NLA_U32 }, [NHA_MASTER] = { .type = NLA_U32 },
[NHA_FDB] = { .type = NLA_FLAG }, [NHA_FDB] = { .type = NLA_FLAG },
[NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32, 0), [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
NHA_OP_FLAGS_DUMP_ALL),
}; };
static const struct nla_policy rtm_nh_res_policy_new[] = { static const struct nla_policy rtm_nh_res_policy_new[] = {
@ -671,8 +675,78 @@ static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
u64_stats_update_end(&cpu_stats->syncp); u64_stats_update_end(&cpu_stats->syncp);
} }
static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg) static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
u64 *ret_packets)
{ {
int i;
*ret_packets = 0;
for_each_possible_cpu(i) {
struct nh_grp_entry_stats *cpu_stats;
unsigned int start;
u64 packets;
cpu_stats = per_cpu_ptr(nhge->stats, i);
do {
start = u64_stats_fetch_begin(&cpu_stats->syncp);
packets = u64_stats_read(&cpu_stats->packets);
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
*ret_packets += packets;
}
}
static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
struct nh_grp_entry *nhge)
{
struct nlattr *nest;
u64 packets;
nh_grp_entry_stats_read(nhge, &packets);
nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
if (!nest)
return -EMSGSIZE;
if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS, packets))
goto nla_put_failure;
nla_nest_end(skb, nest);
return 0;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh)
{
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
struct nlattr *nest;
int i;
nest = nla_nest_start(skb, NHA_GROUP_STATS);
if (!nest)
return -EMSGSIZE;
for (i = 0; i < nhg->num_nh; i++)
if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i]))
goto cancel_out;
nla_nest_end(skb, nest);
return 0;
cancel_out:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
u32 op_flags)
{
struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
struct nexthop_grp *p; struct nexthop_grp *p;
size_t len = nhg->num_nh * sizeof(*p); size_t len = nhg->num_nh * sizeof(*p);
struct nlattr *nla; struct nlattr *nla;
@ -701,6 +775,10 @@ static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
if (nhg->resilient && nla_put_nh_group_res(skb, nhg)) if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
goto nla_put_failure; goto nla_put_failure;
if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
nla_put_nh_group_stats(skb, nh))
goto nla_put_failure;
return 0; return 0;
nla_put_failure: nla_put_failure:
@ -708,7 +786,8 @@ nla_put_failure:
} }
static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh, static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
int event, u32 portid, u32 seq, unsigned int nlflags) int event, u32 portid, u32 seq, unsigned int nlflags,
u32 op_flags)
{ {
struct fib6_nh *fib6_nh; struct fib6_nh *fib6_nh;
struct fib_nh *fib_nh; struct fib_nh *fib_nh;
@ -735,7 +814,7 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB)) if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_nh_group(skb, nhg)) if (nla_put_nh_group(skb, nh, op_flags))
goto nla_put_failure; goto nla_put_failure;
goto out; goto out;
} }
@ -866,7 +945,7 @@ static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
if (!skb) if (!skb)
goto errout; goto errout;
err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags); err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
if (err < 0) { if (err < 0) {
/* -EMSGSIZE implies BUG in nh_nlmsg_size() */ /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
WARN_ON(err == -EMSGSIZE); WARN_ON(err == -EMSGSIZE);
@ -3095,7 +3174,7 @@ static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
goto errout_free; goto errout_free;
err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid, err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0); nlh->nlmsg_seq, 0, op_flags);
if (err < 0) { if (err < 0) {
WARN_ON(err == -EMSGSIZE); WARN_ON(err == -EMSGSIZE);
goto errout_free; goto errout_free;
@ -3265,7 +3344,7 @@ static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
return nh_fill_node(skb, nh, RTM_NEWNEXTHOP, return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
NETLINK_CB(cb->skb).portid, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI); cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
} }
/* rtnl */ /* rtnl */