mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
2c321f3f70
Main goal of memory allocation profiling patchset is to provide accounting that is cheap enough to run in production. To achieve that we inject counters using codetags at the allocation call sites to account every time allocation is made. This injection allows us to perform accounting efficiently because injected counters are immediately available as opposed to the alternative methods, such as using _RET_IP_, which would require counter lookup and appropriate locking that makes accounting much more expensive. This method requires all allocation functions to inject separate counters at their call sites so that their callers can be individually accounted. Counter injection is implemented by allocation hooks which should wrap all allocation functions. Inlined functions which perform allocations but do not use allocation hooks are directly charged for the allocations they perform. In most cases these functions are just specialized allocation wrappers used from multiple places to allocate objects of a specific type. It would be more useful to do the accounting at their call sites instead. Instrument these helpers to do accounting at the call site. Simple inlined allocation wrappers are converted directly into macros. More complex allocators or allocators with documentation are converted into _noprof versions and allocation hooks are added. This allows memory allocation profiling mechanism to charge allocations to the callers of these functions. Link: https://lkml.kernel.org/r/20240415020731.1152108-1-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Jan Kara <jack@suse.cz> [jbd2] Cc: Anna Schumaker <anna@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Eric Dumazet <edumazet@google.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Jakub Kicinski <kuba@kernel.org> Cc: Jakub Sitnicki <jakub@cloudflare.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
203 lines
4.3 KiB
C
203 lines
4.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/* Copyright (c) 2023 Isovalent */
|
|
#ifndef __NET_TCX_H
|
|
#define __NET_TCX_H
|
|
|
|
#include <linux/bpf.h>
|
|
#include <linux/bpf_mprog.h>
|
|
|
|
#include <net/sch_generic.h>
|
|
|
|
struct mini_Qdisc;
|
|
|
|
struct tcx_entry {
|
|
struct mini_Qdisc __rcu *miniq;
|
|
struct bpf_mprog_bundle bundle;
|
|
bool miniq_active;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
struct tcx_link {
|
|
struct bpf_link link;
|
|
struct net_device *dev;
|
|
u32 location;
|
|
};
|
|
|
|
static inline void tcx_set_ingress(struct sk_buff *skb, bool ingress)
|
|
{
|
|
#ifdef CONFIG_NET_XGRESS
|
|
skb->tc_at_ingress = ingress;
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_NET_XGRESS
|
|
static inline struct tcx_entry *tcx_entry(struct bpf_mprog_entry *entry)
|
|
{
|
|
struct bpf_mprog_bundle *bundle = entry->parent;
|
|
|
|
return container_of(bundle, struct tcx_entry, bundle);
|
|
}
|
|
|
|
static inline struct tcx_link *tcx_link(const struct bpf_link *link)
|
|
{
|
|
return container_of(link, struct tcx_link, link);
|
|
}
|
|
|
|
void tcx_inc(void);
|
|
void tcx_dec(void);
|
|
|
|
static inline void tcx_entry_sync(void)
|
|
{
|
|
/* bpf_mprog_entry got a/b swapped, therefore ensure that
|
|
* there are no inflight users on the old one anymore.
|
|
*/
|
|
synchronize_rcu();
|
|
}
|
|
|
|
static inline void
|
|
tcx_entry_update(struct net_device *dev, struct bpf_mprog_entry *entry,
|
|
bool ingress)
|
|
{
|
|
ASSERT_RTNL();
|
|
if (ingress)
|
|
rcu_assign_pointer(dev->tcx_ingress, entry);
|
|
else
|
|
rcu_assign_pointer(dev->tcx_egress, entry);
|
|
}
|
|
|
|
static inline struct bpf_mprog_entry *
|
|
tcx_entry_fetch(struct net_device *dev, bool ingress)
|
|
{
|
|
ASSERT_RTNL();
|
|
if (ingress)
|
|
return rcu_dereference_rtnl(dev->tcx_ingress);
|
|
else
|
|
return rcu_dereference_rtnl(dev->tcx_egress);
|
|
}
|
|
|
|
static inline struct bpf_mprog_entry *tcx_entry_create_noprof(void)
|
|
{
|
|
struct tcx_entry *tcx = kzalloc_noprof(sizeof(*tcx), GFP_KERNEL);
|
|
|
|
if (tcx) {
|
|
bpf_mprog_bundle_init(&tcx->bundle);
|
|
return &tcx->bundle.a;
|
|
}
|
|
return NULL;
|
|
}
|
|
#define tcx_entry_create(...) alloc_hooks(tcx_entry_create_noprof(__VA_ARGS__))
|
|
|
|
static inline void tcx_entry_free(struct bpf_mprog_entry *entry)
|
|
{
|
|
kfree_rcu(tcx_entry(entry), rcu);
|
|
}
|
|
|
|
static inline struct bpf_mprog_entry *
|
|
tcx_entry_fetch_or_create(struct net_device *dev, bool ingress, bool *created)
|
|
{
|
|
struct bpf_mprog_entry *entry = tcx_entry_fetch(dev, ingress);
|
|
|
|
*created = false;
|
|
if (!entry) {
|
|
entry = tcx_entry_create();
|
|
if (!entry)
|
|
return NULL;
|
|
*created = true;
|
|
}
|
|
return entry;
|
|
}
|
|
|
|
static inline void tcx_skeys_inc(bool ingress)
|
|
{
|
|
tcx_inc();
|
|
if (ingress)
|
|
net_inc_ingress_queue();
|
|
else
|
|
net_inc_egress_queue();
|
|
}
|
|
|
|
static inline void tcx_skeys_dec(bool ingress)
|
|
{
|
|
if (ingress)
|
|
net_dec_ingress_queue();
|
|
else
|
|
net_dec_egress_queue();
|
|
tcx_dec();
|
|
}
|
|
|
|
static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry,
|
|
const bool active)
|
|
{
|
|
ASSERT_RTNL();
|
|
tcx_entry(entry)->miniq_active = active;
|
|
}
|
|
|
|
static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry)
|
|
{
|
|
ASSERT_RTNL();
|
|
return bpf_mprog_total(entry) || tcx_entry(entry)->miniq_active;
|
|
}
|
|
|
|
static inline enum tcx_action_base tcx_action_code(struct sk_buff *skb,
|
|
int code)
|
|
{
|
|
switch (code) {
|
|
case TCX_PASS:
|
|
skb->tc_index = qdisc_skb_cb(skb)->tc_classid;
|
|
fallthrough;
|
|
case TCX_DROP:
|
|
case TCX_REDIRECT:
|
|
return code;
|
|
case TCX_NEXT:
|
|
default:
|
|
return TCX_NEXT;
|
|
}
|
|
}
|
|
#endif /* CONFIG_NET_XGRESS */
|
|
|
|
#if defined(CONFIG_NET_XGRESS) && defined(CONFIG_BPF_SYSCALL)
|
|
int tcx_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
|
int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
|
|
int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
|
|
void tcx_uninstall(struct net_device *dev, bool ingress);
|
|
|
|
int tcx_prog_query(const union bpf_attr *attr,
|
|
union bpf_attr __user *uattr);
|
|
|
|
static inline void dev_tcx_uninstall(struct net_device *dev)
|
|
{
|
|
ASSERT_RTNL();
|
|
tcx_uninstall(dev, true);
|
|
tcx_uninstall(dev, false);
|
|
}
|
|
#else
|
|
static inline int tcx_prog_attach(const union bpf_attr *attr,
|
|
struct bpf_prog *prog)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline int tcx_link_attach(const union bpf_attr *attr,
|
|
struct bpf_prog *prog)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline int tcx_prog_detach(const union bpf_attr *attr,
|
|
struct bpf_prog *prog)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline int tcx_prog_query(const union bpf_attr *attr,
|
|
union bpf_attr __user *uattr)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline void dev_tcx_uninstall(struct net_device *dev)
|
|
{
|
|
}
|
|
#endif /* CONFIG_NET_XGRESS && CONFIG_BPF_SYSCALL */
|
|
#endif /* __NET_TCX_H */
|