linux/net/rxrpc/skbuff.c
David Howells 9a36a6bc22 rxrpc: trace: Don't use __builtin_return_address for sk_buff tracing
In rxrpc tracing, use enums to generate lists of points of interest rather
than __builtin_return_address() for the sk_buff tracepoint.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
2022-12-01 13:36:39 +00:00

84 lines
2.0 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/* Socket buffer accounting
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
#define select_skb_count(skb) (&rxrpc_n_rx_skbs)
/*
* Note the allocation or reception of a socket buffer.
*/
void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
int n = atomic_inc_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
}
/*
* Note the re-emergence of a socket buffer from a queue or buffer.
*/
void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
if (skb) {
int n = atomic_read(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
}
}
/*
* Note the addition of a ref on a socket buffer.
*/
void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
int n = atomic_inc_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
skb_get(skb);
}
/*
* Note the dropping of a ref on a socket buffer by the core.
*/
void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
int n = atomic_inc_return(&rxrpc_n_rx_skbs);
trace_rxrpc_skb(skb, 0, n, why);
}
/*
* Note the destruction of a socket buffer.
*/
void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
{
if (skb) {
int n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
kfree_skb(skb);
}
}
/*
* Clear a queue of socket buffers.
*/
void rxrpc_purge_queue(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = skb_dequeue((list))) != NULL) {
int n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, refcount_read(&skb->users), n,
rxrpc_skb_put_purge);
kfree_skb(skb);
}
}