mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
ae0be8de9a
Even if the NLA_F_NESTED flag was introduced more than 11 years ago, most netlink based interfaces (including recently added ones) are still not setting it in kernel generated messages. Without the flag, message parsers not aware of attribute semantics (e.g. wireshark dissector or libmnl's mnl_nlmsg_fprintf()) cannot recognize nested attributes and won't display the structure of their contents. Unfortunately we cannot just add the flag everywhere as there may be userspace applications which check nlattr::nla_type directly rather than through a helper masking out the flags. Therefore the patch renames nla_nest_start() to nla_nest_start_noflag() and introduces nla_nest_start() as a wrapper adding NLA_F_NESTED. The calls which add NLA_F_NESTED manually are rewritten to use nla_nest_start(). Except for changes in include/net/netlink.h, the patch was generated using this semantic patch: @@ expression E1, E2; @@ -nla_nest_start(E1, E2) +nla_nest_start_noflag(E1, E2) @@ expression E1, E2; @@ -nla_nest_start_noflag(E1, E2 | NLA_F_NESTED) +nla_nest_start(E1, E2) Signed-off-by: Michal Kubecek <mkubecek@suse.cz> Acked-by: Jiri Pirko <jiri@mellanox.com> Acked-by: David Ahern <dsahern@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
301 lines
7.5 KiB
C
301 lines
7.5 KiB
C
/* net/sched/sch_ingress.c - Ingress and clsact qdisc
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* Authors: Jamal Hadi Salim 1999
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/types.h>
|
|
#include <linux/list.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <net/netlink.h>
|
|
#include <net/pkt_sched.h>
|
|
#include <net/pkt_cls.h>
|
|
|
|
struct ingress_sched_data {
|
|
struct tcf_block *block;
|
|
struct tcf_block_ext_info block_info;
|
|
struct mini_Qdisc_pair miniqp;
|
|
};
|
|
|
|
static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
|
|
{
|
|
return TC_H_MIN(classid) + 1;
|
|
}
|
|
|
|
static unsigned long ingress_bind_filter(struct Qdisc *sch,
|
|
unsigned long parent, u32 classid)
|
|
{
|
|
return ingress_find(sch, classid);
|
|
}
|
|
|
|
static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
|
|
{
|
|
}
|
|
|
|
static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
|
|
{
|
|
}
|
|
|
|
static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct ingress_sched_data *q = qdisc_priv(sch);
|
|
|
|
return q->block;
|
|
}
|
|
|
|
static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
|
|
{
|
|
struct mini_Qdisc_pair *miniqp = priv;
|
|
|
|
mini_qdisc_pair_swap(miniqp, tp_head);
|
|
};
|
|
|
|
static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
|
|
{
|
|
struct ingress_sched_data *q = qdisc_priv(sch);
|
|
|
|
q->block_info.block_index = block_index;
|
|
}
|
|
|
|
static u32 ingress_ingress_block_get(struct Qdisc *sch)
|
|
{
|
|
struct ingress_sched_data *q = qdisc_priv(sch);
|
|
|
|
return q->block_info.block_index;
|
|
}
|
|
|
|
static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct ingress_sched_data *q = qdisc_priv(sch);
|
|
struct net_device *dev = qdisc_dev(sch);
|
|
|
|
net_inc_ingress_queue();
|
|
|
|
mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
|
|
|
|
q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
|
q->block_info.chain_head_change = clsact_chain_head_change;
|
|
q->block_info.chain_head_change_priv = &q->miniqp;
|
|
|
|
return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
|
|
}
|
|
|
|
static void ingress_destroy(struct Qdisc *sch)
|
|
{
|
|
struct ingress_sched_data *q = qdisc_priv(sch);
|
|
|
|
tcf_block_put_ext(q->block, sch, &q->block_info);
|
|
net_dec_ingress_queue();
|
|
}
|
|
|
|
static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|
{
|
|
struct nlattr *nest;
|
|
|
|
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
|
|
if (nest == NULL)
|
|
goto nla_put_failure;
|
|
|
|
return nla_nest_end(skb, nest);
|
|
|
|
nla_put_failure:
|
|
nla_nest_cancel(skb, nest);
|
|
return -1;
|
|
}
|
|
|
|
static const struct Qdisc_class_ops ingress_class_ops = {
|
|
.leaf = ingress_leaf,
|
|
.find = ingress_find,
|
|
.walk = ingress_walk,
|
|
.tcf_block = ingress_tcf_block,
|
|
.bind_tcf = ingress_bind_filter,
|
|
.unbind_tcf = ingress_unbind_filter,
|
|
};
|
|
|
|
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
|
|
.cl_ops = &ingress_class_ops,
|
|
.id = "ingress",
|
|
.priv_size = sizeof(struct ingress_sched_data),
|
|
.static_flags = TCQ_F_CPUSTATS,
|
|
.init = ingress_init,
|
|
.destroy = ingress_destroy,
|
|
.dump = ingress_dump,
|
|
.ingress_block_set = ingress_ingress_block_set,
|
|
.ingress_block_get = ingress_ingress_block_get,
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
struct clsact_sched_data {
|
|
struct tcf_block *ingress_block;
|
|
struct tcf_block *egress_block;
|
|
struct tcf_block_ext_info ingress_block_info;
|
|
struct tcf_block_ext_info egress_block_info;
|
|
struct mini_Qdisc_pair miniqp_ingress;
|
|
struct mini_Qdisc_pair miniqp_egress;
|
|
};
|
|
|
|
static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
|
|
{
|
|
switch (TC_H_MIN(classid)) {
|
|
case TC_H_MIN(TC_H_MIN_INGRESS):
|
|
case TC_H_MIN(TC_H_MIN_EGRESS):
|
|
return TC_H_MIN(classid);
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static unsigned long clsact_bind_filter(struct Qdisc *sch,
|
|
unsigned long parent, u32 classid)
|
|
{
|
|
return clsact_find(sch, classid);
|
|
}
|
|
|
|
static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct clsact_sched_data *q = qdisc_priv(sch);
|
|
|
|
switch (cl) {
|
|
case TC_H_MIN(TC_H_MIN_INGRESS):
|
|
return q->ingress_block;
|
|
case TC_H_MIN(TC_H_MIN_EGRESS):
|
|
return q->egress_block;
|
|
default:
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
|
|
{
|
|
struct clsact_sched_data *q = qdisc_priv(sch);
|
|
|
|
q->ingress_block_info.block_index = block_index;
|
|
}
|
|
|
|
static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
|
|
{
|
|
struct clsact_sched_data *q = qdisc_priv(sch);
|
|
|
|
q->egress_block_info.block_index = block_index;
|
|
}
|
|
|
|
static u32 clsact_ingress_block_get(struct Qdisc *sch)
|
|
{
|
|
struct clsact_sched_data *q = qdisc_priv(sch);
|
|
|
|
return q->ingress_block_info.block_index;
|
|
}
|
|
|
|
static u32 clsact_egress_block_get(struct Qdisc *sch)
|
|
{
|
|
struct clsact_sched_data *q = qdisc_priv(sch);
|
|
|
|
return q->egress_block_info.block_index;
|
|
}
|
|
|
|
static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct clsact_sched_data *q = qdisc_priv(sch);
|
|
struct net_device *dev = qdisc_dev(sch);
|
|
int err;
|
|
|
|
net_inc_ingress_queue();
|
|
net_inc_egress_queue();
|
|
|
|
mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
|
|
|
|
q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
|
|
q->ingress_block_info.chain_head_change = clsact_chain_head_change;
|
|
q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
|
|
|
|
err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
|
|
extack);
|
|
if (err)
|
|
return err;
|
|
|
|
mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
|
|
|
|
q->egress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
|
|
q->egress_block_info.chain_head_change = clsact_chain_head_change;
|
|
q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
|
|
|
|
return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
|
|
}
|
|
|
|
static void clsact_destroy(struct Qdisc *sch)
|
|
{
|
|
struct clsact_sched_data *q = qdisc_priv(sch);
|
|
|
|
tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
|
|
tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
|
|
|
|
net_dec_ingress_queue();
|
|
net_dec_egress_queue();
|
|
}
|
|
|
|
static const struct Qdisc_class_ops clsact_class_ops = {
|
|
.leaf = ingress_leaf,
|
|
.find = clsact_find,
|
|
.walk = ingress_walk,
|
|
.tcf_block = clsact_tcf_block,
|
|
.bind_tcf = clsact_bind_filter,
|
|
.unbind_tcf = ingress_unbind_filter,
|
|
};
|
|
|
|
static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
|
|
.cl_ops = &clsact_class_ops,
|
|
.id = "clsact",
|
|
.priv_size = sizeof(struct clsact_sched_data),
|
|
.static_flags = TCQ_F_CPUSTATS,
|
|
.init = clsact_init,
|
|
.destroy = clsact_destroy,
|
|
.dump = ingress_dump,
|
|
.ingress_block_set = clsact_ingress_block_set,
|
|
.egress_block_set = clsact_egress_block_set,
|
|
.ingress_block_get = clsact_ingress_block_get,
|
|
.egress_block_get = clsact_egress_block_get,
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
static int __init ingress_module_init(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = register_qdisc(&ingress_qdisc_ops);
|
|
if (!ret) {
|
|
ret = register_qdisc(&clsact_qdisc_ops);
|
|
if (ret)
|
|
unregister_qdisc(&ingress_qdisc_ops);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void __exit ingress_module_exit(void)
|
|
{
|
|
unregister_qdisc(&ingress_qdisc_ops);
|
|
unregister_qdisc(&clsact_qdisc_ops);
|
|
}
|
|
|
|
module_init(ingress_module_init);
|
|
module_exit(ingress_module_exit);
|
|
|
|
MODULE_ALIAS("sch_clsact");
|
|
MODULE_LICENSE("GPL");
|