forked from Minki/linux
aebe4426cc
A following patch introduces qevents, points in qdisc algorithm where packet can be processed by user-defined filters. Should this processing lead to a situation where a new packet is to be enqueued on the same port, holding the root lock would lead to deadlocks. To solve the issue, qevent handler needs to unlock and relock the root lock when necessary. To that end, add the root lock argument to the qdisc op enqueue, and propagate throughout. Signed-off-by: Petr Machata <petrm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
42 lines
947 B
C
42 lines
947 B
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* net/sched/sch_blackhole.c Black hole queue
|
|
*
|
|
* Authors: Thomas Graf <tgraf@suug.ch>
|
|
*
|
|
* Note: Quantum tunneling is not supported.
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/skbuff.h>
|
|
#include <net/pkt_sched.h>
|
|
|
|
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
|
struct sk_buff **to_free)
|
|
{
|
|
qdisc_drop(skb, sch, to_free);
|
|
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
|
}
|
|
|
|
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static struct Qdisc_ops blackhole_qdisc_ops __read_mostly = {
|
|
.id = "blackhole",
|
|
.priv_size = 0,
|
|
.enqueue = blackhole_enqueue,
|
|
.dequeue = blackhole_dequeue,
|
|
.peek = blackhole_dequeue,
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
static int __init blackhole_init(void)
|
|
{
|
|
return register_qdisc(&blackhole_qdisc_ops);
|
|
}
|
|
device_initcall(blackhole_init)
|