2019-05-28 17:10:04 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2008-09-12 23:30:20 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2008, Intel Corporation.
|
|
|
|
*
|
|
|
|
* Author: Alexander Duyck <alexander.h.duyck@intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <net/netlink.h>
|
|
|
|
#include <net/pkt_sched.h>
|
2018-07-01 19:16:27 +00:00
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/dsfield.h>
|
2019-03-20 14:00:11 +00:00
|
|
|
#include <net/pkt_cls.h>
|
2022-12-06 13:55:12 +00:00
|
|
|
#include <net/tc_wrapper.h>
|
2008-09-12 23:30:20 +00:00
|
|
|
|
|
|
|
#include <linux/tc_act/tc_skbedit.h>
|
|
|
|
#include <net/tc_act/tc_skbedit.h>
|
|
|
|
|
2016-07-25 23:09:41 +00:00
|
|
|
static struct tc_action_ops act_skbedit_ops;
|
2016-02-22 23:57:53 +00:00
|
|
|
|
2022-04-15 16:40:46 +00:00
|
|
|
static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
u16 queue_mapping = params->queue_mapping;
|
|
|
|
|
|
|
|
if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
|
|
|
|
u32 hash = skb_get_hash(skb);
|
|
|
|
|
|
|
|
queue_mapping += hash % params->mapping_mod;
|
|
|
|
}
|
|
|
|
|
|
|
|
return netdev_cap_txqueue(skb->dev, queue_mapping);
|
|
|
|
}
|
|
|
|
|
2022-12-06 13:55:12 +00:00
|
|
|
TC_INDIRECT_SCOPE int tcf_skbedit_act(struct sk_buff *skb,
|
|
|
|
const struct tc_action *a,
|
|
|
|
struct tcf_result *res)
|
2008-09-12 23:30:20 +00:00
|
|
|
{
|
2016-07-25 23:09:41 +00:00
|
|
|
struct tcf_skbedit *d = to_skbedit(a);
|
2018-07-11 14:04:50 +00:00
|
|
|
struct tcf_skbedit_params *params;
|
|
|
|
int action;
|
2008-09-12 23:30:20 +00:00
|
|
|
|
2016-06-06 10:32:53 +00:00
|
|
|
tcf_lastuse_update(&d->tcf_tm);
|
2021-10-16 08:49:09 +00:00
|
|
|
bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb);
|
2008-09-12 23:30:20 +00:00
|
|
|
|
2018-07-30 12:30:43 +00:00
|
|
|
params = rcu_dereference_bh(d->params);
|
2018-07-11 14:04:50 +00:00
|
|
|
action = READ_ONCE(d->tcf_action);
|
|
|
|
|
|
|
|
if (params->flags & SKBEDIT_F_PRIORITY)
|
|
|
|
skb->priority = params->priority;
|
|
|
|
if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
|
2018-07-01 19:16:27 +00:00
|
|
|
int wlen = skb_network_offset(skb);
|
|
|
|
|
sched: consistently handle layer3 header accesses in the presence of VLANs
There are a couple of places in net/sched/ that check skb->protocol and act
on the value there. However, in the presence of VLAN tags, the value stored
in skb->protocol can be inconsistent based on whether VLAN acceleration is
enabled. The commit quoted in the Fixes tag below fixed the users of
skb->protocol to use a helper that will always see the VLAN ethertype.
However, most of the callers don't actually handle the VLAN ethertype, but
expect to find the IP header type in the protocol field. This means that
things like changing the ECN field, or parsing diffserv values, stops
working if there's a VLAN tag, or if there are multiple nested VLAN
tags (QinQ).
To fix this, change the helper to take an argument that indicates whether
the caller wants to skip the VLAN tags or not. When skipping VLAN tags, we
make sure to skip all of them, so behaviour is consistent even in QinQ
mode.
To make the helper usable from the ECN code, move it to if_vlan.h instead
of pkt_sched.h.
v3:
- Remove empty lines
- Move vlan variable definitions inside loop in skb_protocol()
- Also use skb_protocol() helper in IP{,6}_ECN_decapsulate() and
bpf_skb_ecn_set_ce()
v2:
- Use eth_type_vlan() helper in skb_protocol()
- Also fix code that reads skb->protocol directly
- Change a couple of 'if/else if' statements to switch constructs to avoid
calling the helper twice
Reported-by: Ilya Ponetayev <i.ponetaev@ndmsystems.com>
Fixes: d8b9605d2697 ("net: sched: fix skb->protocol use in case of accelerated vlan path")
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-03 20:26:43 +00:00
|
|
|
switch (skb_protocol(skb, true)) {
|
2018-07-01 19:16:27 +00:00
|
|
|
case htons(ETH_P_IP):
|
|
|
|
wlen += sizeof(struct iphdr);
|
|
|
|
if (!pskb_may_pull(skb, wlen))
|
|
|
|
goto err;
|
|
|
|
skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
wlen += sizeof(struct ipv6hdr);
|
|
|
|
if (!pskb_may_pull(skb, wlen))
|
|
|
|
goto err;
|
|
|
|
skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-07-11 14:04:50 +00:00
|
|
|
if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
|
net: sched: use queue_mapping to pick tx queue
This patch fixes issue:
* If we install tc filters with act_skbedit in clsact hook.
It doesn't work, because netdev_core_pick_tx() overwrites
queue_mapping.
$ tc filter ... action skbedit queue_mapping 1
And this patch is useful:
* We can use FQ + EDT to implement efficient policies. Tx queues
are picked by xps, ndo_select_queue of netdev driver, or skb hash
in netdev_core_pick_tx(). In fact, the netdev driver, and skb
hash are _not_ under control. xps uses the CPUs map to select Tx
queues, but we can't figure out which task_struct of pod/containter
running on this cpu in most case. We can use clsact filters to classify
one pod/container traffic to one Tx queue. Why ?
In containter networking environment, there are two kinds of pod/
containter/net-namespace. One kind (e.g. P1, P2), the high throughput
is key in these applications. But avoid running out of network resource,
the outbound traffic of these pods is limited, using or sharing one
dedicated Tx queues assigned HTB/TBF/FQ Qdisc. Other kind of pods
(e.g. Pn), the low latency of data access is key. And the traffic is not
limited. Pods use or share other dedicated Tx queues assigned FIFO Qdisc.
This choice provides two benefits. First, contention on the HTB/FQ Qdisc
lock is significantly reduced since fewer CPUs contend for the same queue.
More importantly, Qdisc contention can be eliminated completely if each
CPU has its own FIFO Qdisc for the second kind of pods.
There must be a mechanism in place to support classifying traffic based on
pods/container to different Tx queues. Note that clsact is outside of Qdisc
while Qdisc can run a classifier to select a sub-queue under the lock.
In general recording the decision in the skb seems a little heavy handed.
This patch introduces a per-CPU variable, suggested by Eric.
The xmit.skip_txqueue flag is firstly cleared in __dev_queue_xmit().
- Tx Qdisc may install that skbedit actions, then xmit.skip_txqueue flag
is set in qdisc->enqueue() though tx queue has been selected in
netdev_tx_queue_mapping() or netdev_core_pick_tx(). That flag is cleared
firstly in __dev_queue_xmit(), is useful:
- Avoid picking Tx queue with netdev_tx_queue_mapping() in next netdev
in such case: eth0 macvlan - eth0.3 vlan - eth0 ixgbe-phy:
For example, eth0, macvlan in pod, which root Qdisc install skbedit
queue_mapping, send packets to eth0.3, vlan in host. In __dev_queue_xmit() of
eth0.3, clear the flag, does not select tx queue according to skb->queue_mapping
because there is no filters in clsact or tx Qdisc of this netdev.
Same action taked in eth0, ixgbe in Host.
- Avoid picking Tx queue for next packet. If we set xmit.skip_txqueue
in tx Qdisc (qdisc->enqueue()), the proper way to clear it is clearing it
in __dev_queue_xmit when processing next packets.
For performance reasons, use the static key. If user does not config the NET_EGRESS,
the patch will not be compiled.
+----+ +----+ +----+
| P1 | | P2 | | Pn |
+----+ +----+ +----+
| | |
+-----------+-----------+
|
| clsact/skbedit
| MQ
v
+-----------+-----------+
| q0 | q1 | qn
v v v
HTB/FQ HTB/FQ ... FIFO
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Jiri Pirko <jiri@resnulli.us>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Talal Ahmad <talalahmad@google.com>
Cc: Kevin Hao <haokexin@gmail.com>
Cc: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Cc: Antoine Tenart <atenart@kernel.org>
Cc: Wei Wang <weiwan@google.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-04-15 16:40:45 +00:00
|
|
|
skb->dev->real_num_tx_queues > params->queue_mapping) {
|
|
|
|
#ifdef CONFIG_NET_EGRESS
|
|
|
|
netdev_xmit_skip_txqueue(true);
|
|
|
|
#endif
|
2022-04-15 16:40:46 +00:00
|
|
|
skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb));
|
net: sched: use queue_mapping to pick tx queue
This patch fixes issue:
* If we install tc filters with act_skbedit in clsact hook.
It doesn't work, because netdev_core_pick_tx() overwrites
queue_mapping.
$ tc filter ... action skbedit queue_mapping 1
And this patch is useful:
* We can use FQ + EDT to implement efficient policies. Tx queues
are picked by xps, ndo_select_queue of netdev driver, or skb hash
in netdev_core_pick_tx(). In fact, the netdev driver, and skb
hash are _not_ under control. xps uses the CPUs map to select Tx
queues, but we can't figure out which task_struct of pod/containter
running on this cpu in most case. We can use clsact filters to classify
one pod/container traffic to one Tx queue. Why ?
In containter networking environment, there are two kinds of pod/
containter/net-namespace. One kind (e.g. P1, P2), the high throughput
is key in these applications. But avoid running out of network resource,
the outbound traffic of these pods is limited, using or sharing one
dedicated Tx queues assigned HTB/TBF/FQ Qdisc. Other kind of pods
(e.g. Pn), the low latency of data access is key. And the traffic is not
limited. Pods use or share other dedicated Tx queues assigned FIFO Qdisc.
This choice provides two benefits. First, contention on the HTB/FQ Qdisc
lock is significantly reduced since fewer CPUs contend for the same queue.
More importantly, Qdisc contention can be eliminated completely if each
CPU has its own FIFO Qdisc for the second kind of pods.
There must be a mechanism in place to support classifying traffic based on
pods/container to different Tx queues. Note that clsact is outside of Qdisc
while Qdisc can run a classifier to select a sub-queue under the lock.
In general recording the decision in the skb seems a little heavy handed.
This patch introduces a per-CPU variable, suggested by Eric.
The xmit.skip_txqueue flag is firstly cleared in __dev_queue_xmit().
- Tx Qdisc may install that skbedit actions, then xmit.skip_txqueue flag
is set in qdisc->enqueue() though tx queue has been selected in
netdev_tx_queue_mapping() or netdev_core_pick_tx(). That flag is cleared
firstly in __dev_queue_xmit(), is useful:
- Avoid picking Tx queue with netdev_tx_queue_mapping() in next netdev
in such case: eth0 macvlan - eth0.3 vlan - eth0 ixgbe-phy:
For example, eth0, macvlan in pod, which root Qdisc install skbedit
queue_mapping, send packets to eth0.3, vlan in host. In __dev_queue_xmit() of
eth0.3, clear the flag, does not select tx queue according to skb->queue_mapping
because there is no filters in clsact or tx Qdisc of this netdev.
Same action taked in eth0, ixgbe in Host.
- Avoid picking Tx queue for next packet. If we set xmit.skip_txqueue
in tx Qdisc (qdisc->enqueue()), the proper way to clear it is clearing it
in __dev_queue_xmit when processing next packets.
For performance reasons, use the static key. If user does not config the NET_EGRESS,
the patch will not be compiled.
+----+ +----+ +----+
| P1 | | P2 | | Pn |
+----+ +----+ +----+
| | |
+-----------+-----------+
|
| clsact/skbedit
| MQ
v
+-----------+-----------+
| q0 | q1 | qn
v v v
HTB/FQ HTB/FQ ... FIFO
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Jiri Pirko <jiri@resnulli.us>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Talal Ahmad <talalahmad@google.com>
Cc: Kevin Hao <haokexin@gmail.com>
Cc: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Cc: Antoine Tenart <atenart@kernel.org>
Cc: Wei Wang <weiwan@google.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-04-15 16:40:45 +00:00
|
|
|
}
|
2018-07-11 14:04:50 +00:00
|
|
|
if (params->flags & SKBEDIT_F_MARK) {
|
|
|
|
skb->mark &= ~params->mask;
|
|
|
|
skb->mark |= params->mark & params->mask;
|
2016-10-24 12:32:57 +00:00
|
|
|
}
|
2018-07-11 14:04:50 +00:00
|
|
|
if (params->flags & SKBEDIT_F_PTYPE)
|
|
|
|
skb->pkt_type = params->ptype;
|
|
|
|
return action;
|
2018-07-30 12:30:43 +00:00
|
|
|
|
2018-07-01 19:16:27 +00:00
|
|
|
err:
|
2018-07-11 14:04:49 +00:00
|
|
|
qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
|
2018-07-30 12:30:43 +00:00
|
|
|
return TC_ACT_SHOT;
|
2008-09-12 23:30:20 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 20:45:55 +00:00
|
|
|
static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes,
|
2020-06-19 06:01:07 +00:00
|
|
|
u64 packets, u64 drops,
|
|
|
|
u64 lastuse, bool hw)
|
2020-03-26 20:45:55 +00:00
|
|
|
{
|
|
|
|
struct tcf_skbedit *d = to_skbedit(a);
|
|
|
|
struct tcf_t *tm = &d->tcf_tm;
|
|
|
|
|
2020-06-19 06:01:07 +00:00
|
|
|
tcf_action_update_stats(a, bytes, packets, drops, hw);
|
2020-03-26 20:45:55 +00:00
|
|
|
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
|
|
|
|
}
|
|
|
|
|
2008-09-12 23:30:20 +00:00
|
|
|
static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
|
|
|
|
[TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) },
|
|
|
|
[TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) },
|
|
|
|
[TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) },
|
2009-10-15 03:09:18 +00:00
|
|
|
[TCA_SKBEDIT_MARK] = { .len = sizeof(u32) },
|
2016-07-02 10:43:15 +00:00
|
|
|
[TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) },
|
2016-10-24 12:32:57 +00:00
|
|
|
[TCA_SKBEDIT_MASK] = { .len = sizeof(u32) },
|
2018-07-01 19:16:27 +00:00
|
|
|
[TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) },
|
2022-04-15 16:40:46 +00:00
|
|
|
[TCA_SKBEDIT_QUEUE_MAPPING_MAX] = { .len = sizeof(u16) },
|
2008-09-12 23:30:20 +00:00
|
|
|
};
|
|
|
|
|
2013-01-14 05:15:39 +00:00
|
|
|
static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
|
2016-07-25 23:09:41 +00:00
|
|
|
struct nlattr *est, struct tc_action **a,
|
2019-10-30 14:09:05 +00:00
|
|
|
struct tcf_proto *tp, u32 act_flags,
|
2018-07-05 14:24:25 +00:00
|
|
|
struct netlink_ext_ack *extack)
|
2008-09-12 23:30:20 +00:00
|
|
|
{
|
2022-09-08 04:14:33 +00:00
|
|
|
struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
|
2021-07-29 23:12:14 +00:00
|
|
|
bool bind = act_flags & TCA_ACT_FLAGS_BIND;
|
2018-09-03 07:07:15 +00:00
|
|
|
struct tcf_skbedit_params *params_new;
|
2008-09-12 23:30:20 +00:00
|
|
|
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
|
2019-03-20 14:00:11 +00:00
|
|
|
struct tcf_chain *goto_ch = NULL;
|
2008-09-12 23:30:20 +00:00
|
|
|
struct tc_skbedit *parm;
|
|
|
|
struct tcf_skbedit *d;
|
2016-10-24 12:32:57 +00:00
|
|
|
u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
|
2016-07-02 10:43:15 +00:00
|
|
|
u16 *queue_mapping = NULL, *ptype = NULL;
|
2022-04-15 16:40:46 +00:00
|
|
|
u16 mapping_mod = 1;
|
2016-06-13 20:46:28 +00:00
|
|
|
bool exists = false;
|
|
|
|
int ret = 0, err;
|
2019-08-01 13:02:51 +00:00
|
|
|
u32 index;
|
2008-09-12 23:30:20 +00:00
|
|
|
|
|
|
|
if (nla == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla,
|
|
|
|
skbedit_policy, NULL);
|
2008-09-12 23:30:20 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (tb[TCA_SKBEDIT_PARMS] == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
|
|
|
|
flags |= SKBEDIT_F_PRIORITY;
|
|
|
|
priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
|
2022-10-21 07:58:39 +00:00
|
|
|
if (is_tcf_skbedit_ingress(act_flags) &&
|
|
|
|
!(act_flags & TCA_ACT_FLAGS_SKIP_SW)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "\"queue_mapping\" option on receive side is hardware only, use skip_sw");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
2008-09-12 23:30:20 +00:00
|
|
|
flags |= SKBEDIT_F_QUEUE_MAPPING;
|
|
|
|
queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
|
|
|
|
}
|
2009-10-15 03:09:18 +00:00
|
|
|
|
2016-07-02 10:43:15 +00:00
|
|
|
if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
|
|
|
|
ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
|
|
|
|
if (!skb_pkt_type_ok(*ptype))
|
|
|
|
return -EINVAL;
|
|
|
|
flags |= SKBEDIT_F_PTYPE;
|
|
|
|
}
|
|
|
|
|
2009-10-15 03:09:18 +00:00
|
|
|
if (tb[TCA_SKBEDIT_MARK] != NULL) {
|
|
|
|
flags |= SKBEDIT_F_MARK;
|
|
|
|
mark = nla_data(tb[TCA_SKBEDIT_MARK]);
|
|
|
|
}
|
|
|
|
|
2016-10-24 12:32:57 +00:00
|
|
|
if (tb[TCA_SKBEDIT_MASK] != NULL) {
|
|
|
|
flags |= SKBEDIT_F_MASK;
|
|
|
|
mask = nla_data(tb[TCA_SKBEDIT_MASK]);
|
|
|
|
}
|
|
|
|
|
2018-07-01 19:16:27 +00:00
|
|
|
if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
|
|
|
|
u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
|
|
|
|
|
2022-04-15 16:40:46 +00:00
|
|
|
if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) {
|
|
|
|
u16 *queue_mapping_max;
|
|
|
|
|
|
|
|
if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] ||
|
|
|
|
!tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
queue_mapping_max =
|
|
|
|
nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]);
|
|
|
|
if (*queue_mapping_max < *queue_mapping) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
mapping_mod = *queue_mapping_max - *queue_mapping + 1;
|
|
|
|
flags |= SKBEDIT_F_TXQ_SKBHASH;
|
|
|
|
}
|
2018-07-01 19:16:27 +00:00
|
|
|
if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
|
|
|
|
flags |= SKBEDIT_F_INHERITDSFIELD;
|
|
|
|
}
|
|
|
|
|
2008-09-12 23:30:20 +00:00
|
|
|
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
|
2019-08-01 13:02:51 +00:00
|
|
|
index = parm->index;
|
|
|
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
2018-07-05 14:24:32 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
exists = err;
|
2016-05-10 20:49:30 +00:00
|
|
|
if (exists && bind)
|
2023-12-29 13:26:41 +00:00
|
|
|
return ACT_P_BOUND;
|
2016-05-10 20:49:30 +00:00
|
|
|
|
|
|
|
if (!flags) {
|
2018-05-11 14:55:09 +00:00
|
|
|
if (exists)
|
|
|
|
tcf_idr_release(*a, bind);
|
2018-07-05 14:24:32 +00:00
|
|
|
else
|
2019-08-01 13:02:51 +00:00
|
|
|
tcf_idr_cleanup(tn, index);
|
2016-05-10 20:49:30 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!exists) {
|
2019-08-01 13:02:51 +00:00
|
|
|
ret = tcf_idr_create(tn, index, est, a,
|
2021-12-17 18:16:17 +00:00
|
|
|
&act_skbedit_ops, bind, true, act_flags);
|
2018-07-05 14:24:32 +00:00
|
|
|
if (ret) {
|
2019-08-01 13:02:51 +00:00
|
|
|
tcf_idr_cleanup(tn, index);
|
2014-02-12 01:07:31 +00:00
|
|
|
return ret;
|
2018-07-05 14:24:32 +00:00
|
|
|
}
|
2008-09-12 23:30:20 +00:00
|
|
|
|
2016-07-25 23:09:41 +00:00
|
|
|
d = to_skbedit(*a);
|
2008-09-12 23:30:20 +00:00
|
|
|
ret = ACT_P_CREATED;
|
|
|
|
} else {
|
2016-07-25 23:09:41 +00:00
|
|
|
d = to_skbedit(*a);
|
2021-07-29 23:12:14 +00:00
|
|
|
if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
|
2018-07-05 14:24:30 +00:00
|
|
|
tcf_idr_release(*a, bind);
|
2008-09-12 23:30:20 +00:00
|
|
|
return -EEXIST;
|
2018-07-05 14:24:30 +00:00
|
|
|
}
|
2008-09-12 23:30:20 +00:00
|
|
|
}
|
2019-03-20 14:00:11 +00:00
|
|
|
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
|
|
|
|
if (err < 0)
|
|
|
|
goto release_idr;
|
2008-09-12 23:30:20 +00:00
|
|
|
|
2018-07-11 14:04:50 +00:00
|
|
|
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
|
|
|
|
if (unlikely(!params_new)) {
|
2019-03-20 14:00:11 +00:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto put_chain;
|
2018-07-11 14:04:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
params_new->flags = flags;
|
2008-09-12 23:30:20 +00:00
|
|
|
if (flags & SKBEDIT_F_PRIORITY)
|
2018-07-11 14:04:50 +00:00
|
|
|
params_new->priority = *priority;
|
2022-04-15 16:40:46 +00:00
|
|
|
if (flags & SKBEDIT_F_QUEUE_MAPPING) {
|
2018-07-11 14:04:50 +00:00
|
|
|
params_new->queue_mapping = *queue_mapping;
|
2022-04-15 16:40:46 +00:00
|
|
|
params_new->mapping_mod = mapping_mod;
|
|
|
|
}
|
2009-10-15 03:09:18 +00:00
|
|
|
if (flags & SKBEDIT_F_MARK)
|
2018-07-11 14:04:50 +00:00
|
|
|
params_new->mark = *mark;
|
2016-07-02 10:43:15 +00:00
|
|
|
if (flags & SKBEDIT_F_PTYPE)
|
2018-07-11 14:04:50 +00:00
|
|
|
params_new->ptype = *ptype;
|
2016-10-24 12:32:57 +00:00
|
|
|
/* default behaviour is to use all the bits */
|
2018-07-11 14:04:50 +00:00
|
|
|
params_new->mask = 0xffffffff;
|
2016-10-24 12:32:57 +00:00
|
|
|
if (flags & SKBEDIT_F_MASK)
|
2018-07-11 14:04:50 +00:00
|
|
|
params_new->mask = *mask;
|
2009-10-15 03:09:18 +00:00
|
|
|
|
2018-09-03 07:07:15 +00:00
|
|
|
spin_lock_bh(&d->tcf_lock);
|
2019-03-20 14:00:11 +00:00
|
|
|
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
|
2019-09-23 23:09:18 +00:00
|
|
|
params_new = rcu_replace_pointer(d->params, params_new,
|
|
|
|
lockdep_is_held(&d->tcf_lock));
|
2018-09-03 07:07:15 +00:00
|
|
|
spin_unlock_bh(&d->tcf_lock);
|
|
|
|
if (params_new)
|
|
|
|
kfree_rcu(params_new, rcu);
|
2019-03-20 14:00:11 +00:00
|
|
|
if (goto_ch)
|
|
|
|
tcf_chain_put_by_act(goto_ch);
|
2008-09-12 23:30:20 +00:00
|
|
|
|
|
|
|
return ret;
|
2019-03-20 14:00:11 +00:00
|
|
|
put_chain:
|
|
|
|
if (goto_ch)
|
|
|
|
tcf_chain_put_by_act(goto_ch);
|
|
|
|
release_idr:
|
|
|
|
tcf_idr_release(*a, bind);
|
|
|
|
return err;
|
2008-09-12 23:30:20 +00:00
|
|
|
}
|
|
|
|
|
2011-01-19 19:26:56 +00:00
|
|
|
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
|
|
|
|
int bind, int ref)
|
2008-09-12 23:30:20 +00:00
|
|
|
{
|
|
|
|
unsigned char *b = skb_tail_pointer(skb);
|
2016-07-25 23:09:41 +00:00
|
|
|
struct tcf_skbedit *d = to_skbedit(a);
|
2018-07-11 14:04:50 +00:00
|
|
|
struct tcf_skbedit_params *params;
|
2010-08-16 20:04:22 +00:00
|
|
|
struct tc_skbedit opt = {
|
|
|
|
.index = d->tcf_index,
|
2018-07-05 14:24:24 +00:00
|
|
|
.refcnt = refcount_read(&d->tcf_refcnt) - ref,
|
|
|
|
.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
|
2010-08-16 20:04:22 +00:00
|
|
|
};
|
2018-07-01 19:16:27 +00:00
|
|
|
u64 pure_flags = 0;
|
2018-07-11 14:04:50 +00:00
|
|
|
struct tcf_t t;
|
|
|
|
|
2018-09-03 07:07:15 +00:00
|
|
|
spin_lock_bh(&d->tcf_lock);
|
|
|
|
params = rcu_dereference_protected(d->params,
|
|
|
|
lockdep_is_held(&d->tcf_lock));
|
|
|
|
opt.action = d->tcf_action;
|
2008-09-12 23:30:20 +00:00
|
|
|
|
2012-03-29 09:11:39 +00:00
|
|
|
if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
|
|
|
|
goto nla_put_failure;
|
2018-07-11 14:04:50 +00:00
|
|
|
if ((params->flags & SKBEDIT_F_PRIORITY) &&
|
|
|
|
nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
|
2012-03-29 09:11:39 +00:00
|
|
|
goto nla_put_failure;
|
2018-07-11 14:04:50 +00:00
|
|
|
if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
|
|
|
|
nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
|
2012-03-29 09:11:39 +00:00
|
|
|
goto nla_put_failure;
|
2018-07-11 14:04:50 +00:00
|
|
|
if ((params->flags & SKBEDIT_F_MARK) &&
|
|
|
|
nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
|
2012-03-29 09:11:39 +00:00
|
|
|
goto nla_put_failure;
|
2018-07-11 14:04:50 +00:00
|
|
|
if ((params->flags & SKBEDIT_F_PTYPE) &&
|
|
|
|
nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
|
2016-07-02 10:43:15 +00:00
|
|
|
goto nla_put_failure;
|
2018-07-11 14:04:50 +00:00
|
|
|
if ((params->flags & SKBEDIT_F_MASK) &&
|
|
|
|
nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
|
2016-10-24 12:32:57 +00:00
|
|
|
goto nla_put_failure;
|
2018-07-11 14:04:50 +00:00
|
|
|
if (params->flags & SKBEDIT_F_INHERITDSFIELD)
|
2018-07-01 19:16:27 +00:00
|
|
|
pure_flags |= SKBEDIT_F_INHERITDSFIELD;
|
2022-04-15 16:40:46 +00:00
|
|
|
if (params->flags & SKBEDIT_F_TXQ_SKBHASH) {
|
|
|
|
if (nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING_MAX,
|
|
|
|
params->queue_mapping + params->mapping_mod - 1))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
pure_flags |= SKBEDIT_F_TXQ_SKBHASH;
|
|
|
|
}
|
2018-07-01 19:16:27 +00:00
|
|
|
if (pure_flags != 0 &&
|
|
|
|
nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
|
|
|
|
goto nla_put_failure;
|
2016-06-06 10:32:55 +00:00
|
|
|
|
|
|
|
tcf_tm_dump(&t, &d->tcf_tm);
|
2016-04-26 08:06:18 +00:00
|
|
|
if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
|
2012-03-29 09:11:39 +00:00
|
|
|
goto nla_put_failure;
|
2018-09-03 07:07:15 +00:00
|
|
|
spin_unlock_bh(&d->tcf_lock);
|
|
|
|
|
2008-09-12 23:30:20 +00:00
|
|
|
return skb->len;
|
|
|
|
|
|
|
|
nla_put_failure:
|
2018-09-03 07:07:15 +00:00
|
|
|
spin_unlock_bh(&d->tcf_lock);
|
2008-09-12 23:30:20 +00:00
|
|
|
nlmsg_trim(skb, b);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-07-11 14:04:50 +00:00
|
|
|
static void tcf_skbedit_cleanup(struct tc_action *a)
|
|
|
|
{
|
|
|
|
struct tcf_skbedit *d = to_skbedit(a);
|
|
|
|
struct tcf_skbedit_params *params;
|
|
|
|
|
|
|
|
params = rcu_dereference_protected(d->params, 1);
|
|
|
|
if (params)
|
|
|
|
kfree_rcu(params, rcu);
|
|
|
|
}
|
|
|
|
|
2019-08-07 19:57:28 +00:00
|
|
|
static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
|
|
|
|
{
|
|
|
|
return nla_total_size(sizeof(struct tc_skbedit))
|
|
|
|
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
|
|
|
|
+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
|
2022-04-15 16:40:46 +00:00
|
|
|
+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */
|
2019-08-07 19:57:28 +00:00
|
|
|
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
|
|
|
|
+ nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
|
|
|
|
+ nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
|
|
|
|
+ nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
|
|
|
|
}
|
|
|
|
|
2021-12-17 18:16:21 +00:00
|
|
|
static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data,
|
2022-04-07 07:35:22 +00:00
|
|
|
u32 *index_inc, bool bind,
|
|
|
|
struct netlink_ext_ack *extack)
|
2021-12-17 18:16:21 +00:00
|
|
|
{
|
|
|
|
if (bind) {
|
|
|
|
struct flow_action_entry *entry = entry_data;
|
|
|
|
|
|
|
|
if (is_tcf_skbedit_mark(act)) {
|
|
|
|
entry->id = FLOW_ACTION_MARK;
|
|
|
|
entry->mark = tcf_skbedit_mark(act);
|
|
|
|
} else if (is_tcf_skbedit_ptype(act)) {
|
|
|
|
entry->id = FLOW_ACTION_PTYPE;
|
|
|
|
entry->ptype = tcf_skbedit_ptype(act);
|
|
|
|
} else if (is_tcf_skbedit_priority(act)) {
|
|
|
|
entry->id = FLOW_ACTION_PRIORITY;
|
|
|
|
entry->priority = tcf_skbedit_priority(act);
|
2022-10-21 07:58:39 +00:00
|
|
|
} else if (is_tcf_skbedit_tx_queue_mapping(act)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used on transmit side");
|
2022-04-07 07:35:28 +00:00
|
|
|
return -EOPNOTSUPP;
|
2022-10-21 07:58:39 +00:00
|
|
|
} else if (is_tcf_skbedit_rx_queue_mapping(act)) {
|
|
|
|
entry->id = FLOW_ACTION_RX_QUEUE_MAPPING;
|
|
|
|
entry->rx_queue = tcf_skbedit_rx_queue_mapping(act);
|
2022-04-07 07:35:28 +00:00
|
|
|
} else if (is_tcf_skbedit_inheritdsfield(act)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used");
|
|
|
|
return -EOPNOTSUPP;
|
2021-12-17 18:16:21 +00:00
|
|
|
} else {
|
2022-04-07 07:35:28 +00:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Unsupported skbedit option offload");
|
2021-12-17 18:16:21 +00:00
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
*index_inc = 1;
|
|
|
|
} else {
|
2021-12-17 18:16:22 +00:00
|
|
|
struct flow_offload_action *fl_action = entry_data;
|
|
|
|
|
|
|
|
if (is_tcf_skbedit_mark(act))
|
|
|
|
fl_action->id = FLOW_ACTION_MARK;
|
|
|
|
else if (is_tcf_skbedit_ptype(act))
|
|
|
|
fl_action->id = FLOW_ACTION_PTYPE;
|
|
|
|
else if (is_tcf_skbedit_priority(act))
|
|
|
|
fl_action->id = FLOW_ACTION_PRIORITY;
|
2022-10-21 07:58:39 +00:00
|
|
|
else if (is_tcf_skbedit_rx_queue_mapping(act))
|
|
|
|
fl_action->id = FLOW_ACTION_RX_QUEUE_MAPPING;
|
2021-12-17 18:16:22 +00:00
|
|
|
else
|
|
|
|
return -EOPNOTSUPP;
|
2021-12-17 18:16:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-09-12 23:30:20 +00:00
|
|
|
static struct tc_action_ops act_skbedit_ops = {
|
|
|
|
.kind = "skbedit",
|
2019-02-10 12:25:00 +00:00
|
|
|
.id = TCA_ID_SKBEDIT,
|
2008-09-12 23:30:20 +00:00
|
|
|
.owner = THIS_MODULE,
|
2018-08-12 13:34:58 +00:00
|
|
|
.act = tcf_skbedit_act,
|
2020-03-26 20:45:55 +00:00
|
|
|
.stats_update = tcf_skbedit_stats_update,
|
2008-09-12 23:30:20 +00:00
|
|
|
.dump = tcf_skbedit_dump,
|
|
|
|
.init = tcf_skbedit_init,
|
2018-07-11 14:04:50 +00:00
|
|
|
.cleanup = tcf_skbedit_cleanup,
|
2019-08-07 19:57:28 +00:00
|
|
|
.get_fill_size = tcf_skbedit_get_fill_size,
|
2021-12-17 18:16:21 +00:00
|
|
|
.offload_act_setup = tcf_skbedit_offload_act_setup,
|
2016-07-25 23:09:41 +00:00
|
|
|
.size = sizeof(struct tcf_skbedit),
|
2016-02-22 23:57:53 +00:00
|
|
|
};
|
2024-02-01 13:09:41 +00:00
|
|
|
MODULE_ALIAS_NET_ACT("skbedit");
|
2016-02-22 23:57:53 +00:00
|
|
|
|
|
|
|
static __net_init int skbedit_init_net(struct net *net)
|
|
|
|
{
|
2022-09-08 04:14:33 +00:00
|
|
|
struct tc_action_net *tn = net_generic(net, act_skbedit_ops.net_id);
|
2016-02-22 23:57:53 +00:00
|
|
|
|
2019-08-25 17:01:32 +00:00
|
|
|
return tc_action_net_init(net, tn, &act_skbedit_ops);
|
2016-02-22 23:57:53 +00:00
|
|
|
}
|
|
|
|
|
2017-12-11 23:35:03 +00:00
|
|
|
static void __net_exit skbedit_exit_net(struct list_head *net_list)
|
2016-02-22 23:57:53 +00:00
|
|
|
{
|
2022-09-08 04:14:33 +00:00
|
|
|
tc_action_net_exit(net_list, act_skbedit_ops.net_id);
|
2016-02-22 23:57:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations skbedit_net_ops = {
|
|
|
|
.init = skbedit_init_net,
|
2017-12-11 23:35:03 +00:00
|
|
|
.exit_batch = skbedit_exit_net,
|
2022-09-08 04:14:33 +00:00
|
|
|
.id = &act_skbedit_ops.net_id,
|
2016-02-22 23:57:53 +00:00
|
|
|
.size = sizeof(struct tc_action_net),
|
2008-09-12 23:30:20 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
|
|
|
|
MODULE_DESCRIPTION("SKB Editing");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
|
|
|
static int __init skbedit_init_module(void)
|
|
|
|
{
|
2016-02-22 23:57:53 +00:00
|
|
|
return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
|
2008-09-12 23:30:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit skbedit_cleanup_module(void)
|
|
|
|
{
|
2016-02-22 23:57:53 +00:00
|
|
|
tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
|
2008-09-12 23:30:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(skbedit_init_module);
|
|
|
|
module_exit(skbedit_cleanup_module);
|