2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2022-10-21 14:31:21 +00:00
|
|
|
/* Socket buffer accounting
|
2007-04-26 22:48:28 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
2016-06-02 19:08:52 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2007-04-26 22:48:28 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/af_rxrpc.h>
|
|
|
|
#include "ar-internal.h"
|
|
|
|
|
2022-10-07 12:52:06 +00:00
|
|
|
#define select_skb_count(skb) (&rxrpc_n_rx_skbs)
|
2016-09-17 09:49:14 +00:00
|
|
|
|
2016-08-23 14:27:24 +00:00
|
|
|
/*
|
2016-09-17 09:49:14 +00:00
|
|
|
* Note the allocation or reception of a socket buffer.
|
2016-08-23 14:27:24 +00:00
|
|
|
*/
|
2022-10-21 14:31:21 +00:00
|
|
|
void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
2016-08-23 14:27:24 +00:00
|
|
|
{
|
2019-08-19 08:25:38 +00:00
|
|
|
int n = atomic_inc_return(select_skb_count(skb));
|
2022-10-21 14:31:21 +00:00
|
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
|
2016-08-23 14:27:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note the re-emergence of a socket buffer from a queue or buffer.
|
|
|
|
*/
|
2022-10-21 14:31:21 +00:00
|
|
|
void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
2016-08-23 14:27:24 +00:00
|
|
|
{
|
|
|
|
if (skb) {
|
2019-08-19 08:25:38 +00:00
|
|
|
int n = atomic_read(select_skb_count(skb));
|
2022-10-21 14:31:21 +00:00
|
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
|
2016-08-23 14:27:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note the addition of a ref on a socket buffer.
|
|
|
|
*/
|
2022-10-21 14:31:21 +00:00
|
|
|
void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
2016-08-23 14:27:24 +00:00
|
|
|
{
|
2019-08-19 08:25:38 +00:00
|
|
|
int n = atomic_inc_return(select_skb_count(skb));
|
2022-10-21 14:31:21 +00:00
|
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
|
2016-08-23 14:27:24 +00:00
|
|
|
skb_get(skb);
|
|
|
|
}
|
|
|
|
|
2019-08-27 09:13:46 +00:00
|
|
|
/*
|
|
|
|
* Note the dropping of a ref on a socket buffer by the core.
|
|
|
|
*/
|
2022-10-21 14:31:21 +00:00
|
|
|
void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
2019-08-27 09:13:46 +00:00
|
|
|
{
|
|
|
|
int n = atomic_inc_return(&rxrpc_n_rx_skbs);
|
2022-10-21 14:31:21 +00:00
|
|
|
trace_rxrpc_skb(skb, 0, n, why);
|
2019-08-27 09:13:46 +00:00
|
|
|
}
|
|
|
|
|
2016-08-23 14:27:24 +00:00
|
|
|
/*
|
|
|
|
* Note the destruction of a socket buffer.
|
|
|
|
*/
|
2022-10-21 14:31:21 +00:00
|
|
|
void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why)
|
2016-08-23 14:27:24 +00:00
|
|
|
{
|
|
|
|
if (skb) {
|
2022-10-21 14:31:21 +00:00
|
|
|
int n = atomic_dec_return(select_skb_count(skb));
|
|
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why);
|
2023-02-07 22:11:30 +00:00
|
|
|
consume_skb(skb);
|
2016-08-23 14:27:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear a queue of socket buffers.
|
|
|
|
*/
|
|
|
|
void rxrpc_purge_queue(struct sk_buff_head *list)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
2022-10-21 14:31:21 +00:00
|
|
|
|
2016-08-23 14:27:24 +00:00
|
|
|
while ((skb = skb_dequeue((list))) != NULL) {
|
2019-08-19 08:25:38 +00:00
|
|
|
int n = atomic_dec_return(select_skb_count(skb));
|
2022-10-21 14:31:21 +00:00
|
|
|
trace_rxrpc_skb(skb, refcount_read(&skb->users), n,
|
|
|
|
rxrpc_skb_put_purge);
|
2023-02-07 22:11:30 +00:00
|
|
|
consume_skb(skb);
|
2016-08-23 14:27:24 +00:00
|
|
|
}
|
|
|
|
}
|