2019-05-28 17:10:04 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2008-11-21 04:52:10 +00:00
|
|
|
/*
|
2011-03-14 09:01:02 +00:00
|
|
|
* Copyright (c) 2008-2011, Intel Corporation.
|
2008-11-21 04:52:10 +00:00
|
|
|
*
|
2015-10-07 21:27:44 +00:00
|
|
|
* Description: Data Center Bridging netlink interface
|
2008-11-21 04:52:10 +00:00
|
|
|
* Author: Lucy Liu <lucy.liu@intel.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/netlink.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2008-11-21 04:52:10 +00:00
|
|
|
#include <net/netlink.h>
|
|
|
|
#include <net/rtnetlink.h>
|
|
|
|
#include <linux/dcbnl.h>
|
2010-12-30 09:26:37 +00:00
|
|
|
#include <net/dcbevent.h>
|
2008-11-21 04:52:10 +00:00
|
|
|
#include <linux/rtnetlink.h>
|
2015-10-07 21:27:44 +00:00
|
|
|
#include <linux/init.h>
|
2008-11-21 04:52:10 +00:00
|
|
|
#include <net/sock.h>
|
|
|
|
|
2012-07-10 10:55:35 +00:00
|
|
|
/* Data Center Bridging (DCB) is a collection of Ethernet enhancements
|
2008-11-21 04:52:10 +00:00
|
|
|
* intended to allow network traffic with differing requirements
|
|
|
|
* (highly reliable, no drops vs. best effort vs. low latency) to operate
|
|
|
|
* and co-exist on Ethernet. Current DCB features are:
|
|
|
|
*
|
|
|
|
* Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
|
|
|
|
* framework for assigning bandwidth guarantees to traffic classes.
|
|
|
|
*
|
|
|
|
* Priority-based Flow Control (PFC) - provides a flow control mechanism which
|
|
|
|
* can work independently for each 802.1p priority.
|
|
|
|
*
|
|
|
|
* Congestion Notification - provides a mechanism for end-to-end congestion
|
|
|
|
* control for protocols which do not have built-in congestion management.
|
|
|
|
*
|
|
|
|
* More information about the emerging standards for these Ethernet features
|
|
|
|
* can be found at: http://www.ieee802.org/1/pages/dcbridges.html
|
|
|
|
*
|
|
|
|
* This file implements an rtnetlink interface to allow configuration of DCB
|
|
|
|
* features for capable devices.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**************** DCB attribute policies *************************************/
|
|
|
|
|
|
|
|
/* DCB netlink attributes policy */
|
2010-02-18 08:14:31 +00:00
|
|
|
static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
|
2008-11-21 05:10:23 +00:00
|
|
|
[DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
|
|
|
|
[DCB_ATTR_STATE] = {.type = NLA_U8},
|
|
|
|
[DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
|
|
|
|
[DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
|
|
|
|
[DCB_ATTR_SET_ALL] = {.type = NLA_U8},
|
2008-11-21 04:52:10 +00:00
|
|
|
[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
|
2008-11-21 05:10:23 +00:00
|
|
|
[DCB_ATTR_CAP] = {.type = NLA_NESTED},
|
|
|
|
[DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
|
|
|
|
[DCB_ATTR_BCN] = {.type = NLA_NESTED},
|
2009-08-31 12:33:20 +00:00
|
|
|
[DCB_ATTR_APP] = {.type = NLA_NESTED},
|
2010-12-30 09:25:46 +00:00
|
|
|
[DCB_ATTR_IEEE] = {.type = NLA_NESTED},
|
2010-12-30 06:26:48 +00:00
|
|
|
[DCB_ATTR_DCBX] = {.type = NLA_U8},
|
2010-12-30 06:26:55 +00:00
|
|
|
[DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
|
2008-11-21 04:52:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* DCB priority flow control to User Priority nested attributes */
|
2010-02-18 08:14:31 +00:00
|
|
|
static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
|
2008-11-21 04:52:10 +00:00
|
|
|
[DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
|
|
|
|
[DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
|
|
|
|
[DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
|
|
|
|
[DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
|
|
|
|
[DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
|
|
|
|
[DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
|
|
|
|
[DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
|
|
|
|
[DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
|
|
|
|
[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
|
|
|
|
};
|
|
|
|
|
|
|
|
/* DCB priority grouping nested attributes */
|
2010-02-18 08:14:31 +00:00
|
|
|
static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
|
2008-11-21 04:52:10 +00:00
|
|
|
[DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
|
|
|
|
[DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
|
|
|
|
[DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
|
|
|
|
[DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
|
|
|
|
[DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
|
|
|
|
[DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
|
|
|
|
[DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
|
|
|
|
[DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
|
|
|
|
[DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
|
|
|
|
[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
|
|
|
|
};
|
|
|
|
|
|
|
|
/* DCB traffic class nested attributes. */
|
2010-02-18 08:14:31 +00:00
|
|
|
static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
|
2008-11-21 04:52:10 +00:00
|
|
|
[DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
|
|
|
|
[DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
|
|
|
|
[DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
|
|
|
|
[DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
|
|
|
|
[DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
|
|
|
|
};
|
|
|
|
|
2008-11-21 05:05:08 +00:00
|
|
|
/* DCB capabilities nested attributes. */
|
2010-02-18 08:14:31 +00:00
|
|
|
static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
|
2008-11-21 05:05:08 +00:00
|
|
|
[DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
|
|
|
|
[DCB_CAP_ATTR_PG] = {.type = NLA_U8},
|
|
|
|
[DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
|
|
|
|
[DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
|
|
|
|
[DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
|
|
|
|
[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
|
|
|
|
[DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
|
|
|
|
[DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
|
2010-12-30 06:26:48 +00:00
|
|
|
[DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
|
2008-11-21 05:05:08 +00:00
|
|
|
};
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2008-11-21 05:08:19 +00:00
|
|
|
/* DCB capabilities nested attributes. */
|
2010-02-18 08:14:31 +00:00
|
|
|
static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
|
2008-11-21 05:08:19 +00:00
|
|
|
[DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
|
|
|
|
[DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
|
|
|
|
[DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
|
|
|
|
};
|
|
|
|
|
2008-11-21 05:10:23 +00:00
|
|
|
/* DCB BCN nested attributes. */
|
2010-02-18 08:14:31 +00:00
|
|
|
static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
|
2008-11-21 05:10:23 +00:00
|
|
|
[DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
|
|
|
|
[DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
|
|
|
|
[DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
|
|
|
|
[DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
|
|
|
|
[DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
|
|
|
|
[DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
|
|
|
|
[DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
|
|
|
|
[DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
|
|
|
|
[DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
|
2008-12-22 04:10:29 +00:00
|
|
|
[DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
|
2008-11-21 05:10:23 +00:00
|
|
|
[DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_GD] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_GI] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_TD] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_W] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_RD] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_RU] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_RI] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_C] = {.type = NLA_U32},
|
|
|
|
[DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
|
|
|
|
};
|
|
|
|
|
2009-08-31 12:33:20 +00:00
|
|
|
/* DCB APP nested attributes. */
|
2010-02-18 08:14:31 +00:00
|
|
|
static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
|
2009-08-31 12:33:20 +00:00
|
|
|
[DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
|
|
|
|
[DCB_APP_ATTR_ID] = {.type = NLA_U16},
|
|
|
|
[DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
|
|
|
|
};
|
|
|
|
|
2010-12-30 09:25:46 +00:00
|
|
|
/* IEEE 802.1Qaz nested attributes. */
|
|
|
|
static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
|
|
|
|
[DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
|
|
|
|
[DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
|
|
|
|
[DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
|
2012-04-04 21:33:30 +00:00
|
|
|
[DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
|
2015-03-05 18:16:11 +00:00
|
|
|
[DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
|
|
|
|
[DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
|
net/dcb: Add dcbnl buffer attribute
In this patch, we add dcbnl buffer attribute to allow user
change the NIC's buffer configuration such as priority
to buffer mapping and buffer size of individual buffer.
This attribute combined with pfc attribute allows advanced user to
fine tune the qos setting for specific priority queue. For example,
user can give dedicated buffer for one or more priorities or user
can give large buffer to certain priorities.
The dcb buffer configuration will be controlled by lldptool.
lldptool -T -i eth2 -V BUFFER prio 0,2,5,7,1,2,3,6
maps priorities 0,1,2,3,4,5,6,7 to receive buffer 0,2,5,7,1,2,3,6
lldptool -T -i eth2 -V BUFFER size 87296,87296,0,87296,0,0,0,0
sets receive buffer size for buffer 0,1,2,3,4,5,6,7 respectively
After discussion on mailing list with Jakub, Jiri, Ido and John, we agreed to
choose dcbnl over devlink interface since this feature is intended to set
port attributes which are governed by the netdev instance of that port, where
devlink API is more suitable for global ASIC configurations.
We present an use case scenario where dcbnl buffer attribute configured
by advance user helps reduce the latency of messages of different sizes.
Scenarios description:
On ConnectX-5, we run latency sensitive traffic with
small/medium message sizes ranging from 64B to 256KB and bandwidth sensitive
traffic with large messages sizes 512KB and 1MB. We group small, medium,
and large message sizes to their own pfc enables priorities as follow.
Priorities 1 & 2 (64B, 256B and 1KB)
Priorities 3 & 4 (4KB, 8KB, 16KB, 64KB, 128KB and 256KB)
Priorities 5 & 6 (512KB and 1MB)
By default, ConnectX-5 maps all pfc enabled priorities to a single
lossless fixed buffer size of 50% of total available buffer space. The
other 50% is assigned to lossy buffer. Using dcbnl buffer attribute,
we create three equal size lossless buffers. Each buffer has 25% of total
available buffer space. Thus, the lossy buffer size reduces to 25%. Priority
to lossless buffer mappings are set as follow.
Priorities 1 & 2 on lossless buffer #1
Priorities 3 & 4 on lossless buffer #2
Priorities 5 & 6 on lossless buffer #3
We observe improvements in latency for small and medium message sizes
as follows. Please note that the large message sizes bandwidth performance is
reduced but the total bandwidth remains the same.
256B message size (42 % latency reduction)
4K message size (21% latency reduction)
64K message size (16% latency reduction)
CC: Ido Schimmel <idosch@idosch.org>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Or Gerlitz <gerlitz.or@gmail.com>
CC: Parav Pandit <parav@mellanox.com>
CC: Aron Silverton <aron.silverton@oracle.com>
Signed-off-by: Huy Nguyen <huyn@mellanox.com>
Reviewed-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2018-02-22 17:57:10 +00:00
|
|
|
[DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)},
|
2022-11-01 09:48:30 +00:00
|
|
|
[DCB_ATTR_DCB_APP_TRUST_TABLE] = {.type = NLA_NESTED},
|
2010-12-30 09:25:46 +00:00
|
|
|
};
|
|
|
|
|
2010-12-30 06:26:55 +00:00
|
|
|
/* DCB number of traffic classes nested attributes. */
|
|
|
|
static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
|
|
|
|
[DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
|
|
|
|
[DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
|
|
|
|
[DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
|
|
|
|
[DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
|
|
|
|
};
|
|
|
|
|
2010-12-30 09:26:31 +00:00
|
|
|
static LIST_HEAD(dcb_app_list);
|
2023-01-18 21:08:27 +00:00
|
|
|
static LIST_HEAD(dcb_rewr_list);
|
2010-12-30 09:26:31 +00:00
|
|
|
static DEFINE_SPINLOCK(dcb_lock);
|
|
|
|
|
2022-11-01 09:48:29 +00:00
|
|
|
static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector)
|
|
|
|
{
|
|
|
|
switch (selector) {
|
|
|
|
case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
|
|
|
|
case IEEE_8021QAZ_APP_SEL_STREAM:
|
|
|
|
case IEEE_8021QAZ_APP_SEL_DGRAM:
|
|
|
|
case IEEE_8021QAZ_APP_SEL_ANY:
|
|
|
|
case IEEE_8021QAZ_APP_SEL_DSCP:
|
|
|
|
return DCB_ATTR_IEEE_APP;
|
|
|
|
case DCB_APP_SEL_PCP:
|
|
|
|
return DCB_ATTR_DCB_APP;
|
|
|
|
default:
|
|
|
|
return DCB_ATTR_IEEE_APP_UNSPEC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool dcbnl_app_attr_type_validate(enum ieee_attrs_app type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case DCB_ATTR_IEEE_APP:
|
|
|
|
case DCB_ATTR_DCB_APP:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool dcbnl_app_selector_validate(enum ieee_attrs_app type, u8 selector)
|
|
|
|
{
|
|
|
|
return dcbnl_app_attr_type_get(selector) == type;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:54 +00:00
|
|
|
static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
|
|
|
|
u32 flags, struct nlmsghdr **nlhp)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct dcbmsg *dcb;
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
|
|
|
|
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
|
2012-06-13 22:40:15 +00:00
|
|
|
BUG_ON(!nlh);
|
2012-06-13 02:54:54 +00:00
|
|
|
|
|
|
|
dcb = nlmsg_data(nlh);
|
|
|
|
dcb->dcb_family = AF_UNSPEC;
|
|
|
|
dcb->cmd = cmd;
|
|
|
|
dcb->dcb_pad = 0;
|
|
|
|
|
|
|
|
if (nlhp)
|
|
|
|
*nlhp = nlh;
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
|
|
|
/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
|
|
|
|
if (!netdev->dcbnl_ops->getstate)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_STATE,
|
|
|
|
netdev->dcbnl_ops->getstate(netdev));
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
|
|
|
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
|
|
|
|
u8 value;
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
int i;
|
|
|
|
int getall = 0;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_PFC_CFG])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->getpfccfg)
|
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_PFC_CFG],
|
|
|
|
dcbnl_pfc_up_nest, NULL);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (!nest)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
if (data[DCB_PFC_UP_ATTR_ALL])
|
|
|
|
getall = 1;
|
|
|
|
|
|
|
|
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
|
|
|
|
if (!getall && !data[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
|
|
|
|
&value);
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, i, value);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret) {
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_cancel(skb, nest);
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
}
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_end(skb, nest);
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
|
|
|
u8 perm_addr[MAX_ADDR_LEN];
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->getpermhwaddr)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2013-03-09 05:52:21 +00:00
|
|
|
memset(perm_addr, 0, sizeof(perm_addr));
|
2008-11-21 04:52:10 +00:00
|
|
|
netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 05:05:08 +00:00
|
|
|
{
|
|
|
|
struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
|
|
|
|
u8 value;
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 05:05:08 +00:00
|
|
|
int i;
|
|
|
|
int getall = 0;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_CAP])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->getcap)
|
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 05:05:08 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_CAP], dcbnl_cap_nest,
|
|
|
|
NULL);
|
2008-11-21 05:05:08 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 05:05:08 +00:00
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
|
2008-11-21 05:05:08 +00:00
|
|
|
if (!nest)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2008-11-21 05:05:08 +00:00
|
|
|
|
|
|
|
if (data[DCB_CAP_ATTR_ALL])
|
|
|
|
getall = 1;
|
|
|
|
|
|
|
|
for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
|
|
|
|
if (!getall && !data[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, i, value);
|
2008-11-21 05:05:08 +00:00
|
|
|
if (ret) {
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_cancel(skb, nest);
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 05:05:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_end(skb, nest);
|
2008-11-21 05:05:08 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 05:08:19 +00:00
|
|
|
{
|
|
|
|
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
|
|
|
|
u8 value;
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 05:08:19 +00:00
|
|
|
int i;
|
|
|
|
int getall = 0;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_NUMTCS])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->getnumtcs)
|
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 05:08:19 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_NUMTCS],
|
|
|
|
dcbnl_numtcs_nest, NULL);
|
2012-06-13 02:54:58 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-11-21 05:08:19 +00:00
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!nest)
|
|
|
|
return -EMSGSIZE;
|
2008-11-21 05:08:19 +00:00
|
|
|
|
|
|
|
if (data[DCB_NUMTCS_ATTR_ALL])
|
|
|
|
getall = 1;
|
|
|
|
|
|
|
|
for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
|
|
|
|
if (!getall && !data[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
|
|
|
|
if (!ret) {
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, i, value);
|
2008-11-21 05:08:19 +00:00
|
|
|
if (ret) {
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_cancel(skb, nest);
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 05:08:19 +00:00
|
|
|
}
|
2012-06-13 02:54:58 +00:00
|
|
|
} else
|
|
|
|
return -EINVAL;
|
2008-11-21 05:08:19 +00:00
|
|
|
}
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_end(skb, nest);
|
2008-11-21 05:08:19 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 05:08:19 +00:00
|
|
|
{
|
|
|
|
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 05:08:19 +00:00
|
|
|
u8 value;
|
|
|
|
int i;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_NUMTCS])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->setnumtcs)
|
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 05:08:19 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_NUMTCS],
|
|
|
|
dcbnl_numtcs_nest, NULL);
|
2012-06-13 02:54:55 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 05:08:19 +00:00
|
|
|
|
|
|
|
for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
|
|
|
|
if (data[i] == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
value = nla_get_u8(data[i]);
|
|
|
|
|
|
|
|
ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
|
|
|
|
if (ret)
|
2012-06-13 02:54:55 +00:00
|
|
|
break;
|
2008-11-21 05:08:19 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
|
2008-11-21 05:08:19 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 05:09:23 +00:00
|
|
|
{
|
|
|
|
if (!netdev->dcbnl_ops->getpfcstate)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 05:09:23 +00:00
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
|
|
|
|
netdev->dcbnl_ops->getpfcstate(netdev));
|
2008-11-21 05:09:23 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 05:09:23 +00:00
|
|
|
{
|
|
|
|
u8 value;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_PFC_STATE])
|
2012-06-13 02:54:55 +00:00
|
|
|
return -EINVAL;
|
2008-11-21 05:09:23 +00:00
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!netdev->dcbnl_ops->setpfcstate)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2008-11-21 05:09:23 +00:00
|
|
|
value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
|
|
|
|
|
|
|
|
netdev->dcbnl_ops->setpfcstate(netdev, value);
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
|
2008-11-21 05:09:23 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2009-08-31 12:33:40 +00:00
|
|
|
{
|
|
|
|
struct nlattr *app_nest;
|
|
|
|
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
|
|
|
|
u16 id;
|
|
|
|
u8 up, idtype;
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
2011-01-21 16:35:18 +00:00
|
|
|
if (!tb[DCB_ATTR_APP])
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EINVAL;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_APP], dcbnl_app_nest,
|
|
|
|
NULL);
|
2009-08-31 12:33:40 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
|
|
|
/* all must be non-null */
|
|
|
|
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
|
|
|
|
(!app_tb[DCB_APP_ATTR_ID]))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EINVAL;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
|
|
|
/* either by eth type or by socket number */
|
|
|
|
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
|
|
|
|
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
|
|
|
|
(idtype != DCB_APP_IDTYPE_PORTNUM))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EINVAL;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
|
|
|
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
|
2011-01-21 16:35:18 +00:00
|
|
|
|
|
|
|
if (netdev->dcbnl_ops->getapp) {
|
2014-07-17 05:32:39 +00:00
|
|
|
ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
else
|
|
|
|
up = ret;
|
2011-01-21 16:35:18 +00:00
|
|
|
} else {
|
|
|
|
struct dcb_app app = {
|
|
|
|
.selector = idtype,
|
|
|
|
.protocol = id,
|
|
|
|
};
|
|
|
|
up = dcb_getapp(netdev, &app);
|
|
|
|
}
|
2009-08-31 12:33:40 +00:00
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
|
2011-02-10 11:57:16 +00:00
|
|
|
if (!app_nest)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-02-10 11:57:16 +00:00
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
|
2009-08-31 12:33:40 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_cancel;
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
|
2009-08-31 12:33:40 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_cancel;
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
|
2009-08-31 12:33:40 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_cancel;
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_end(skb, app_nest);
|
2009-08-31 12:33:40 +00:00
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
return 0;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
|
|
|
out_cancel:
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_cancel(skb, app_nest);
|
2009-08-31 12:33:40 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2009-08-31 12:33:40 +00:00
|
|
|
{
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2009-08-31 12:33:40 +00:00
|
|
|
u16 id;
|
|
|
|
u8 up, idtype;
|
|
|
|
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
|
|
|
|
|
2010-12-30 09:26:31 +00:00
|
|
|
if (!tb[DCB_ATTR_APP])
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EINVAL;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_APP], dcbnl_app_nest,
|
|
|
|
NULL);
|
2009-08-31 12:33:40 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
|
|
|
/* all must be non-null */
|
|
|
|
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
|
|
|
|
(!app_tb[DCB_APP_ATTR_ID]) ||
|
|
|
|
(!app_tb[DCB_APP_ATTR_PRIORITY]))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EINVAL;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
|
|
|
/* either by eth type or by socket number */
|
|
|
|
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
|
|
|
|
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
|
|
|
|
(idtype != DCB_APP_IDTYPE_PORTNUM))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EINVAL;
|
2009-08-31 12:33:40 +00:00
|
|
|
|
|
|
|
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
|
|
|
|
up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
|
|
|
|
|
2010-12-30 09:26:31 +00:00
|
|
|
if (netdev->dcbnl_ops->setapp) {
|
2012-06-13 02:54:58 +00:00
|
|
|
ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
|
2014-07-17 05:32:39 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2010-12-30 09:26:31 +00:00
|
|
|
} else {
|
|
|
|
struct dcb_app app;
|
|
|
|
app.selector = idtype;
|
|
|
|
app.protocol = id;
|
|
|
|
app.priority = up;
|
2012-06-13 02:54:58 +00:00
|
|
|
ret = dcb_setapp(netdev, &app);
|
2010-12-30 09:26:31 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
|
2012-04-20 09:49:23 +00:00
|
|
|
dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
|
2012-06-13 02:54:58 +00:00
|
|
|
|
2009-08-31 12:33:40 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
struct nlattr **tb, struct sk_buff *skb, int dir)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
|
|
|
struct nlattr *pg_nest, *param_nest, *data;
|
|
|
|
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
|
|
|
|
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
|
|
|
|
u8 prio, pgid, tc_pct, up_map;
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
int getall = 0;
|
|
|
|
int i;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_PG_CFG])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->getpgtccfgtx ||
|
2008-11-21 04:52:10 +00:00
|
|
|
!netdev->dcbnl_ops->getpgtccfgrx ||
|
|
|
|
!netdev->dcbnl_ops->getpgbwgcfgtx ||
|
|
|
|
!netdev->dcbnl_ops->getpgbwgcfgrx)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
|
|
|
|
NULL);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (!pg_nest)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
if (pg_tb[DCB_PG_ATTR_TC_ALL])
|
|
|
|
getall = 1;
|
|
|
|
|
|
|
|
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
|
|
|
|
if (!getall && !pg_tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (pg_tb[DCB_PG_ATTR_TC_ALL])
|
|
|
|
data = pg_tb[DCB_PG_ATTR_TC_ALL];
|
|
|
|
else
|
|
|
|
data = pg_tb[i];
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(param_tb,
|
|
|
|
DCB_TC_ATTR_PARAM_MAX, data,
|
|
|
|
dcbnl_tc_param_nest, NULL);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_pg;
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
param_nest = nla_nest_start_noflag(skb, i);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (!param_nest)
|
|
|
|
goto err_pg;
|
|
|
|
|
|
|
|
pgid = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
prio = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
up_map = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
|
|
|
|
if (dir) {
|
|
|
|
/* Rx */
|
|
|
|
netdev->dcbnl_ops->getpgtccfgrx(netdev,
|
|
|
|
i - DCB_PG_ATTR_TC_0, &prio,
|
|
|
|
&pgid, &tc_pct, &up_map);
|
|
|
|
} else {
|
|
|
|
/* Tx */
|
|
|
|
netdev->dcbnl_ops->getpgtccfgtx(netdev,
|
|
|
|
i - DCB_PG_ATTR_TC_0, &prio,
|
|
|
|
&pgid, &tc_pct, &up_map);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
|
|
|
|
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb,
|
2008-11-21 04:52:10 +00:00
|
|
|
DCB_TC_ATTR_PARAM_PGID, pgid);
|
|
|
|
if (ret)
|
|
|
|
goto err_param;
|
|
|
|
}
|
|
|
|
if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
|
|
|
|
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb,
|
2008-11-21 04:52:10 +00:00
|
|
|
DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
|
|
|
|
if (ret)
|
|
|
|
goto err_param;
|
|
|
|
}
|
|
|
|
if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
|
|
|
|
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb,
|
2008-11-21 04:52:10 +00:00
|
|
|
DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
|
|
|
|
if (ret)
|
|
|
|
goto err_param;
|
|
|
|
}
|
|
|
|
if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
|
|
|
|
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
|
2008-11-21 04:52:10 +00:00
|
|
|
tc_pct);
|
|
|
|
if (ret)
|
|
|
|
goto err_param;
|
|
|
|
}
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_end(skb, param_nest);
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
|
|
|
|
getall = 1;
|
|
|
|
else
|
|
|
|
getall = 0;
|
|
|
|
|
|
|
|
for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
|
|
|
|
if (!getall && !pg_tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
|
|
|
|
if (dir) {
|
|
|
|
/* Rx */
|
|
|
|
netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
|
|
|
|
i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
|
|
|
|
} else {
|
|
|
|
/* Tx */
|
|
|
|
netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
|
|
|
|
i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
|
|
|
|
}
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, i, tc_pct);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_pg;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_end(skb, pg_nest);
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_param:
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_cancel(skb, param_nest);
|
2008-11-21 04:52:10 +00:00
|
|
|
err_pg:
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_cancel(skb, pg_nest);
|
2012-06-13 02:54:58 +00:00
|
|
|
|
|
|
|
return -EMSGSIZE;
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
2012-06-13 02:54:55 +00:00
|
|
|
return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
2012-06-13 02:54:55 +00:00
|
|
|
return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
|
|
|
u8 value;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_STATE])
|
2012-06-13 02:54:55 +00:00
|
|
|
return -EINVAL;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!netdev->dcbnl_ops->setstate)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2008-11-21 04:52:10 +00:00
|
|
|
value = nla_get_u8(tb[DCB_ATTR_STATE]);
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_STATE,
|
|
|
|
netdev->dcbnl_ops->setstate(netdev, value));
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
|
|
|
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
|
|
|
|
int i;
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
u8 value;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_PFC_CFG])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->setpfccfg)
|
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_PFC_CFG],
|
|
|
|
dcbnl_pfc_up_nest, NULL);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
|
|
|
|
if (data[i] == NULL)
|
|
|
|
continue;
|
|
|
|
value = nla_get_u8(data[i]);
|
|
|
|
netdev->dcbnl_ops->setpfccfg(netdev,
|
|
|
|
data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_SET_ALL])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->setall)
|
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
|
|
|
|
netdev->dcbnl_ops->setall(netdev));
|
2012-04-20 09:49:23 +00:00
|
|
|
dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb,
|
|
|
|
int dir)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
|
|
|
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
|
|
|
|
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
int i;
|
|
|
|
u8 pgid;
|
|
|
|
u8 up_map;
|
|
|
|
u8 prio;
|
|
|
|
u8 tc_pct;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_PG_CFG])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->setpgtccfgtx ||
|
2008-11-21 04:52:10 +00:00
|
|
|
!netdev->dcbnl_ops->setpgtccfgrx ||
|
|
|
|
!netdev->dcbnl_ops->setpgbwgcfgtx ||
|
|
|
|
!netdev->dcbnl_ops->setpgbwgcfgrx)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
|
|
|
|
NULL);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
|
|
|
|
if (!pg_tb[i])
|
|
|
|
continue;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(param_tb,
|
|
|
|
DCB_TC_ATTR_PARAM_MAX,
|
|
|
|
pg_tb[i],
|
|
|
|
dcbnl_tc_param_nest, NULL);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
pgid = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
prio = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
up_map = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
|
|
|
|
if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
|
|
|
|
prio =
|
|
|
|
nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
|
|
|
|
|
|
|
|
if (param_tb[DCB_TC_ATTR_PARAM_PGID])
|
|
|
|
pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
|
|
|
|
|
|
|
|
if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
|
|
|
|
tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
|
|
|
|
|
|
|
|
if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
|
|
|
|
up_map =
|
|
|
|
nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
|
|
|
|
|
|
|
|
/* dir: Tx = 0, Rx = 1 */
|
|
|
|
if (dir) {
|
|
|
|
/* Rx */
|
|
|
|
netdev->dcbnl_ops->setpgtccfgrx(netdev,
|
|
|
|
i - DCB_PG_ATTR_TC_0,
|
|
|
|
prio, pgid, tc_pct, up_map);
|
|
|
|
} else {
|
|
|
|
/* Tx */
|
|
|
|
netdev->dcbnl_ops->setpgtccfgtx(netdev,
|
|
|
|
i - DCB_PG_ATTR_TC_0,
|
|
|
|
prio, pgid, tc_pct, up_map);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
|
|
|
|
if (!pg_tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
tc_pct = nla_get_u8(pg_tb[i]);
|
|
|
|
|
|
|
|
/* dir: Tx = 0, Rx = 1 */
|
|
|
|
if (dir) {
|
|
|
|
/* Rx */
|
|
|
|
netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
|
|
|
|
i - DCB_PG_ATTR_BW_ID_0, tc_pct);
|
|
|
|
} else {
|
|
|
|
/* Tx */
|
|
|
|
netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
|
|
|
|
i - DCB_PG_ATTR_BW_ID_0, tc_pct);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-20 19:56:21 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
2012-06-13 02:54:55 +00:00
|
|
|
return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
2012-06-13 02:54:55 +00:00
|
|
|
return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
|
2008-11-21 04:52:10 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 05:10:23 +00:00
|
|
|
{
|
|
|
|
struct nlattr *bcn_nest;
|
|
|
|
struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
|
|
|
|
u8 value_byte;
|
|
|
|
u32 value_integer;
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 05:10:23 +00:00
|
|
|
bool getall = false;
|
|
|
|
int i;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_BCN])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->getbcnrp ||
|
2008-11-21 05:10:23 +00:00
|
|
|
!netdev->dcbnl_ops->getbcncfg)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 05:10:23 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
|
|
|
|
NULL);
|
2008-11-21 05:10:23 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 05:10:23 +00:00
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
|
2008-11-21 05:10:23 +00:00
|
|
|
if (!bcn_nest)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2008-11-21 05:10:23 +00:00
|
|
|
|
|
|
|
if (bcn_tb[DCB_BCN_ATTR_ALL])
|
|
|
|
getall = true;
|
|
|
|
|
|
|
|
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
|
|
|
|
if (!getall && !bcn_tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
|
|
|
|
&value_byte);
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, i, value_byte);
|
2008-11-21 05:10:23 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_bcn;
|
|
|
|
}
|
|
|
|
|
2008-12-22 04:10:29 +00:00
|
|
|
for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
|
2008-11-21 05:10:23 +00:00
|
|
|
if (!getall && !bcn_tb[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
netdev->dcbnl_ops->getbcncfg(netdev, i,
|
|
|
|
&value_integer);
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u32(skb, i, value_integer);
|
2008-11-21 05:10:23 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_bcn;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_end(skb, bcn_nest);
|
2008-11-21 05:10:23 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_bcn:
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_cancel(skb, bcn_nest);
|
2008-11-21 05:10:23 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2008-11-21 05:10:23 +00:00
|
|
|
{
|
|
|
|
struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
|
|
|
|
int i;
|
2012-06-13 02:54:58 +00:00
|
|
|
int ret;
|
2008-11-21 05:10:23 +00:00
|
|
|
u8 value_byte;
|
|
|
|
u32 value_int;
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
if (!tb[DCB_ATTR_BCN])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!netdev->dcbnl_ops->setbcncfg ||
|
2009-11-30 00:55:45 +00:00
|
|
|
!netdev->dcbnl_ops->setbcnrp)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 05:10:23 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
|
net: dcb: choose correct policy to parse DCB_ATTR_BCN
The dcbnl_bcn_setcfg uses erroneous policy to parse tb[DCB_ATTR_BCN],
which is introduced in commit 859ee3c43812 ("DCB: Add support for DCB
BCN"). Please see the comment in below code
static int dcbnl_bcn_setcfg(...)
{
...
ret = nla_parse_nested_deprecated(..., dcbnl_pfc_up_nest, .. )
// !!! dcbnl_pfc_up_nest for attributes
// DCB_PFC_UP_ATTR_0 to DCB_PFC_UP_ATTR_ALL in enum dcbnl_pfc_up_attrs
...
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
// !!! DCB_BCN_ATTR_RP_0 to DCB_BCN_ATTR_RP_7 in enum dcbnl_bcn_attrs
...
value_byte = nla_get_u8(data[i]);
...
}
...
for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
// !!! DCB_BCN_ATTR_BCNA_0 to DCB_BCN_ATTR_RI in enum dcbnl_bcn_attrs
...
value_int = nla_get_u32(data[i]);
...
}
...
}
That is, the nla_parse_nested_deprecated uses dcbnl_pfc_up_nest
attributes to parse nlattr defined in dcbnl_pfc_up_attrs. But the
following access code fetch each nlattr as dcbnl_bcn_attrs attributes.
By looking up the associated nla_policy for dcbnl_bcn_attrs. We can find
the beginning part of these two policies are "same".
static const struct nla_policy dcbnl_pfc_up_nest[...] = {
[DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
};
static const struct nla_policy dcbnl_bcn_nest[...] = {
[DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
// from here is somewhat different
[DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
...
[DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
};
Therefore, the current code is buggy and this
nla_parse_nested_deprecated could overflow the dcbnl_pfc_up_nest and use
the adjacent nla_policy to parse attributes from DCB_BCN_ATTR_BCNA_0.
Hence use the correct policy dcbnl_bcn_nest to parse the nested
tb[DCB_ATTR_BCN] TLV.
Fixes: 859ee3c43812 ("DCB: Add support for DCB BCN")
Signed-off-by: Lin Ma <linma@zju.edu.cn>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://lore.kernel.org/r/20230801013248.87240-1-linma@zju.edu.cn
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-08-01 01:32:48 +00:00
|
|
|
tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
NULL);
|
2008-11-21 05:10:23 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:58 +00:00
|
|
|
return ret;
|
2008-11-21 05:10:23 +00:00
|
|
|
|
|
|
|
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
|
|
|
|
if (data[i] == NULL)
|
|
|
|
continue;
|
|
|
|
value_byte = nla_get_u8(data[i]);
|
|
|
|
netdev->dcbnl_ops->setbcnrp(netdev,
|
|
|
|
data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
|
|
|
|
}
|
|
|
|
|
2008-12-22 04:10:29 +00:00
|
|
|
for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
|
2008-11-21 05:10:23 +00:00
|
|
|
if (data[i] == NULL)
|
|
|
|
continue;
|
|
|
|
value_int = nla_get_u32(data[i]);
|
|
|
|
netdev->dcbnl_ops->setbcncfg(netdev,
|
|
|
|
i, value_int);
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:58 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_BCN, 0);
|
2008-11-21 05:10:23 +00:00
|
|
|
}
|
|
|
|
|
2011-02-27 05:04:38 +00:00
|
|
|
static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
|
|
|
|
int app_nested_type, int app_info_type,
|
|
|
|
int app_entry_type)
|
2011-02-27 05:04:31 +00:00
|
|
|
{
|
|
|
|
struct dcb_peer_app_info info;
|
|
|
|
struct dcb_app *table = NULL;
|
|
|
|
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
|
|
|
|
u16 app_count;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* retrieve the peer app configuration form the driver. If the driver
|
|
|
|
* handlers fail exit without doing anything
|
|
|
|
*/
|
|
|
|
err = ops->peer_getappinfo(netdev, &info, &app_count);
|
|
|
|
if (!err && app_count) {
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
table = kmalloc_array(app_count, sizeof(struct dcb_app),
|
|
|
|
GFP_KERNEL);
|
2011-02-27 05:04:31 +00:00
|
|
|
if (!table)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = ops->peer_getapptable(netdev, table);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
u16 i;
|
|
|
|
struct nlattr *app;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* build the message, from here on the only possible failure
|
|
|
|
* is due to the skb size
|
|
|
|
*/
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
app = nla_nest_start_noflag(skb, app_nested_type);
|
2011-02-27 05:04:31 +00:00
|
|
|
if (!app)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2012-04-02 00:03:01 +00:00
|
|
|
if (app_info_type &&
|
|
|
|
nla_put(skb, app_info_type, sizeof(info), &info))
|
|
|
|
goto nla_put_failure;
|
2011-02-27 05:04:31 +00:00
|
|
|
|
2012-04-02 00:03:01 +00:00
|
|
|
for (i = 0; i < app_count; i++) {
|
|
|
|
if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
|
|
|
|
&table[i]))
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
2011-02-27 05:04:31 +00:00
|
|
|
nla_nest_end(skb, app);
|
|
|
|
}
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
kfree(table);
|
|
|
|
return err;
|
|
|
|
}
|
2010-12-30 09:25:46 +00:00
|
|
|
|
2022-11-14 09:29:50 +00:00
|
|
|
static int dcbnl_getapptrust(struct net_device *netdev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
|
|
|
|
enum ieee_attrs_app type;
|
|
|
|
struct nlattr *apptrust;
|
|
|
|
int nselectors, err, i;
|
|
|
|
u8 *selectors;
|
|
|
|
|
|
|
|
selectors = kzalloc(IEEE_8021QAZ_APP_SEL_MAX + 1, GFP_KERNEL);
|
|
|
|
if (!selectors)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = ops->dcbnl_getapptrust(netdev, selectors, &nselectors);
|
|
|
|
if (err) {
|
|
|
|
err = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
apptrust = nla_nest_start(skb, DCB_ATTR_DCB_APP_TRUST_TABLE);
|
|
|
|
if (!apptrust) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nselectors; i++) {
|
|
|
|
type = dcbnl_app_attr_type_get(selectors[i]);
|
|
|
|
err = nla_put_u8(skb, type, selectors[i]);
|
|
|
|
if (err) {
|
|
|
|
nla_nest_cancel(skb, apptrust);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, apptrust);
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(selectors);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-01-18 21:08:26 +00:00
|
|
|
/* Set or delete APP table or rewrite table entries. The APP struct is validated
|
|
|
|
* and the appropriate callback function is called.
|
|
|
|
*/
|
|
|
|
static int dcbnl_app_table_setdel(struct nlattr *attr,
|
|
|
|
struct net_device *netdev,
|
|
|
|
int (*setdel)(struct net_device *dev,
|
|
|
|
struct dcb_app *app))
|
|
|
|
{
|
|
|
|
struct dcb_app *app_data;
|
|
|
|
enum ieee_attrs_app type;
|
|
|
|
struct nlattr *attr_itr;
|
|
|
|
int rem, err;
|
|
|
|
|
|
|
|
nla_for_each_nested(attr_itr, attr, rem) {
|
|
|
|
type = nla_type(attr_itr);
|
|
|
|
|
|
|
|
if (!dcbnl_app_attr_type_validate(type))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (nla_len(attr_itr) < sizeof(struct dcb_app))
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
app_data = nla_data(attr_itr);
|
|
|
|
|
|
|
|
if (!dcbnl_app_selector_validate(type, app_data->selector))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = setdel(netdev, app_data);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-05 18:16:11 +00:00
|
|
|
/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
|
2011-06-21 07:34:37 +00:00
|
|
|
static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
|
2010-12-30 09:25:46 +00:00
|
|
|
{
|
|
|
|
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
|
2023-01-18 21:08:27 +00:00
|
|
|
struct nlattr *ieee, *app, *rewr;
|
2022-11-01 09:48:30 +00:00
|
|
|
struct dcb_app_type *itr;
|
2011-06-21 07:34:31 +00:00
|
|
|
int dcbx;
|
2012-06-13 02:54:58 +00:00
|
|
|
int err;
|
2010-12-30 09:25:46 +00:00
|
|
|
|
2012-04-02 00:03:01 +00:00
|
|
|
if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
|
2010-12-30 09:25:46 +00:00
|
|
|
if (!ieee)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2010-12-30 09:25:46 +00:00
|
|
|
|
|
|
|
if (ops->ieee_getets) {
|
|
|
|
struct ieee_ets ets;
|
2013-03-09 05:52:21 +00:00
|
|
|
memset(&ets, 0, sizeof(ets));
|
2010-12-30 09:25:46 +00:00
|
|
|
err = ops->ieee_getets(netdev, &ets);
|
2012-04-02 00:03:01 +00:00
|
|
|
if (!err &&
|
|
|
|
nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2010-12-30 09:25:46 +00:00
|
|
|
}
|
|
|
|
|
2012-04-04 21:33:30 +00:00
|
|
|
if (ops->ieee_getmaxrate) {
|
|
|
|
struct ieee_maxrate maxrate;
|
2013-03-09 05:52:21 +00:00
|
|
|
memset(&maxrate, 0, sizeof(maxrate));
|
2012-04-04 21:33:30 +00:00
|
|
|
err = ops->ieee_getmaxrate(netdev, &maxrate);
|
|
|
|
if (!err) {
|
|
|
|
err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
|
|
|
|
sizeof(maxrate), &maxrate);
|
|
|
|
if (err)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2012-04-04 21:33:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-05 18:16:11 +00:00
|
|
|
if (ops->ieee_getqcn) {
|
|
|
|
struct ieee_qcn qcn;
|
|
|
|
|
|
|
|
memset(&qcn, 0, sizeof(qcn));
|
|
|
|
err = ops->ieee_getqcn(netdev, &qcn);
|
|
|
|
if (!err) {
|
|
|
|
err = nla_put(skb, DCB_ATTR_IEEE_QCN,
|
|
|
|
sizeof(qcn), &qcn);
|
|
|
|
if (err)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ops->ieee_getqcnstats) {
|
|
|
|
struct ieee_qcn_stats qcn_stats;
|
|
|
|
|
|
|
|
memset(&qcn_stats, 0, sizeof(qcn_stats));
|
|
|
|
err = ops->ieee_getqcnstats(netdev, &qcn_stats);
|
|
|
|
if (!err) {
|
|
|
|
err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
|
|
|
|
sizeof(qcn_stats), &qcn_stats);
|
|
|
|
if (err)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-30 09:25:46 +00:00
|
|
|
if (ops->ieee_getpfc) {
|
|
|
|
struct ieee_pfc pfc;
|
2013-03-09 05:52:21 +00:00
|
|
|
memset(&pfc, 0, sizeof(pfc));
|
2010-12-30 09:25:46 +00:00
|
|
|
err = ops->ieee_getpfc(netdev, &pfc);
|
2012-04-02 00:03:01 +00:00
|
|
|
if (!err &&
|
|
|
|
nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2010-12-30 09:25:46 +00:00
|
|
|
}
|
|
|
|
|
net/dcb: Add dcbnl buffer attribute
In this patch, we add dcbnl buffer attribute to allow user
change the NIC's buffer configuration such as priority
to buffer mapping and buffer size of individual buffer.
This attribute combined with pfc attribute allows advanced user to
fine tune the qos setting for specific priority queue. For example,
user can give dedicated buffer for one or more priorities or user
can give large buffer to certain priorities.
The dcb buffer configuration will be controlled by lldptool.
lldptool -T -i eth2 -V BUFFER prio 0,2,5,7,1,2,3,6
maps priorities 0,1,2,3,4,5,6,7 to receive buffer 0,2,5,7,1,2,3,6
lldptool -T -i eth2 -V BUFFER size 87296,87296,0,87296,0,0,0,0
sets receive buffer size for buffer 0,1,2,3,4,5,6,7 respectively
After discussion on mailing list with Jakub, Jiri, Ido and John, we agreed to
choose dcbnl over devlink interface since this feature is intended to set
port attributes which are governed by the netdev instance of that port, where
devlink API is more suitable for global ASIC configurations.
We present an use case scenario where dcbnl buffer attribute configured
by advance user helps reduce the latency of messages of different sizes.
Scenarios description:
On ConnectX-5, we run latency sensitive traffic with
small/medium message sizes ranging from 64B to 256KB and bandwidth sensitive
traffic with large messages sizes 512KB and 1MB. We group small, medium,
and large message sizes to their own pfc enables priorities as follow.
Priorities 1 & 2 (64B, 256B and 1KB)
Priorities 3 & 4 (4KB, 8KB, 16KB, 64KB, 128KB and 256KB)
Priorities 5 & 6 (512KB and 1MB)
By default, ConnectX-5 maps all pfc enabled priorities to a single
lossless fixed buffer size of 50% of total available buffer space. The
other 50% is assigned to lossy buffer. Using dcbnl buffer attribute,
we create three equal size lossless buffers. Each buffer has 25% of total
available buffer space. Thus, the lossy buffer size reduces to 25%. Priority
to lossless buffer mappings are set as follow.
Priorities 1 & 2 on lossless buffer #1
Priorities 3 & 4 on lossless buffer #2
Priorities 5 & 6 on lossless buffer #3
We observe improvements in latency for small and medium message sizes
as follows. Please note that the large message sizes bandwidth performance is
reduced but the total bandwidth remains the same.
256B message size (42 % latency reduction)
4K message size (21% latency reduction)
64K message size (16% latency reduction)
CC: Ido Schimmel <idosch@idosch.org>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Or Gerlitz <gerlitz.or@gmail.com>
CC: Parav Pandit <parav@mellanox.com>
CC: Aron Silverton <aron.silverton@oracle.com>
Signed-off-by: Huy Nguyen <huyn@mellanox.com>
Reviewed-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2018-02-22 17:57:10 +00:00
|
|
|
if (ops->dcbnl_getbuffer) {
|
|
|
|
struct dcbnl_buffer buffer;
|
|
|
|
|
|
|
|
memset(&buffer, 0, sizeof(buffer));
|
|
|
|
err = ops->dcbnl_getbuffer(netdev, &buffer);
|
|
|
|
if (!err &&
|
|
|
|
nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
|
2010-12-30 09:26:31 +00:00
|
|
|
if (!app)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2010-12-30 09:26:31 +00:00
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_lock_bh(&dcb_lock);
|
2010-12-30 09:26:31 +00:00
|
|
|
list_for_each_entry(itr, &dcb_app_list, list) {
|
2011-10-06 08:52:33 +00:00
|
|
|
if (itr->ifindex == netdev->ifindex) {
|
2022-11-01 09:48:29 +00:00
|
|
|
enum ieee_attrs_app type =
|
|
|
|
dcbnl_app_attr_type_get(itr->app.selector);
|
|
|
|
err = nla_put(skb, type, sizeof(itr->app), &itr->app);
|
2011-01-04 21:03:12 +00:00
|
|
|
if (err) {
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-01-04 21:03:12 +00:00
|
|
|
}
|
|
|
|
}
|
2010-12-30 09:26:31 +00:00
|
|
|
}
|
2011-06-21 07:34:31 +00:00
|
|
|
|
|
|
|
if (netdev->dcbnl_ops->getdcbx)
|
|
|
|
dcbx = netdev->dcbnl_ops->getdcbx(netdev);
|
|
|
|
else
|
|
|
|
dcbx = -EOPNOTSUPP;
|
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2010-12-30 09:26:31 +00:00
|
|
|
nla_nest_end(skb, app);
|
|
|
|
|
2023-01-18 21:08:27 +00:00
|
|
|
rewr = nla_nest_start(skb, DCB_ATTR_DCB_REWR_TABLE);
|
|
|
|
if (!rewr)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
list_for_each_entry(itr, &dcb_rewr_list, list) {
|
|
|
|
if (itr->ifindex == netdev->ifindex) {
|
|
|
|
enum ieee_attrs_app type =
|
|
|
|
dcbnl_app_attr_type_get(itr->app.selector);
|
|
|
|
err = nla_put(skb, type, sizeof(itr->app), &itr->app);
|
|
|
|
if (err) {
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
nla_nest_cancel(skb, rewr);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
nla_nest_end(skb, rewr);
|
|
|
|
|
2022-11-01 09:48:30 +00:00
|
|
|
if (ops->dcbnl_getapptrust) {
|
2022-11-14 09:29:50 +00:00
|
|
|
err = dcbnl_getapptrust(netdev, skb);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2022-11-01 09:48:30 +00:00
|
|
|
}
|
|
|
|
|
2011-02-27 05:04:31 +00:00
|
|
|
/* get peer info if available */
|
|
|
|
if (ops->ieee_peer_getets) {
|
|
|
|
struct ieee_ets ets;
|
2013-03-09 05:52:21 +00:00
|
|
|
memset(&ets, 0, sizeof(ets));
|
2011-02-27 05:04:31 +00:00
|
|
|
err = ops->ieee_peer_getets(netdev, &ets);
|
2012-04-02 00:03:01 +00:00
|
|
|
if (!err &&
|
|
|
|
nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-02-27 05:04:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ops->ieee_peer_getpfc) {
|
|
|
|
struct ieee_pfc pfc;
|
2013-03-09 05:52:21 +00:00
|
|
|
memset(&pfc, 0, sizeof(pfc));
|
2011-02-27 05:04:31 +00:00
|
|
|
err = ops->ieee_peer_getpfc(netdev, &pfc);
|
2012-04-02 00:03:01 +00:00
|
|
|
if (!err &&
|
|
|
|
nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-02-27 05:04:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ops->peer_getappinfo && ops->peer_getapptable) {
|
2011-02-27 05:04:38 +00:00
|
|
|
err = dcbnl_build_peer_app(netdev, skb,
|
|
|
|
DCB_ATTR_IEEE_PEER_APP,
|
|
|
|
DCB_ATTR_IEEE_APP_UNSPEC,
|
|
|
|
DCB_ATTR_IEEE_APP);
|
2011-02-27 05:04:31 +00:00
|
|
|
if (err)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-02-27 05:04:31 +00:00
|
|
|
}
|
|
|
|
|
2010-12-30 09:25:46 +00:00
|
|
|
nla_nest_end(skb, ieee);
|
2011-06-21 07:34:31 +00:00
|
|
|
if (dcbx >= 0) {
|
|
|
|
err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
|
|
|
|
if (err)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-06-21 07:34:31 +00:00
|
|
|
}
|
2010-12-30 09:25:46 +00:00
|
|
|
|
2011-06-21 07:34:37 +00:00
|
|
|
return 0;
|
2010-12-30 09:25:46 +00:00
|
|
|
}
|
|
|
|
|
2011-07-05 06:16:25 +00:00
|
|
|
static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
int dir)
|
|
|
|
{
|
|
|
|
u8 pgid, up_map, prio, tc_pct;
|
|
|
|
const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
|
|
|
|
int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
|
2019-04-26 09:13:06 +00:00
|
|
|
struct nlattr *pg = nla_nest_start_noflag(skb, i);
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
if (!pg)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
|
2019-04-26 09:13:06 +00:00
|
|
|
struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
if (!tc_nest)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
pgid = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
prio = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
up_map = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
|
|
|
|
if (!dir)
|
|
|
|
ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
|
|
|
|
&prio, &pgid, &tc_pct, &up_map);
|
|
|
|
else
|
|
|
|
ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
|
|
|
|
&prio, &pgid, &tc_pct, &up_map);
|
|
|
|
|
2012-04-02 00:03:01 +00:00
|
|
|
if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
|
|
|
|
nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
|
|
|
|
nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
|
|
|
|
nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-07-05 06:16:25 +00:00
|
|
|
nla_nest_end(skb, tc_nest);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
|
|
|
|
tc_pct = DCB_ATTR_VALUE_UNDEFINED;
|
|
|
|
|
|
|
|
if (!dir)
|
|
|
|
ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
|
|
|
|
&tc_pct);
|
|
|
|
else
|
|
|
|
ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
|
|
|
|
&tc_pct);
|
2012-04-02 00:03:01 +00:00
|
|
|
if (nla_put_u8(skb, i, tc_pct))
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EMSGSIZE;
|
2011-07-05 06:16:25 +00:00
|
|
|
}
|
|
|
|
nla_nest_end(skb, pg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct nlattr *cee, *app;
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
|
|
|
|
int dcbx, i, err = -EMSGSIZE;
|
|
|
|
u8 value;
|
|
|
|
|
2012-04-02 00:03:01 +00:00
|
|
|
if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
|
|
|
|
goto nla_put_failure;
|
2019-04-26 09:13:06 +00:00
|
|
|
cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
|
2011-07-05 06:16:25 +00:00
|
|
|
if (!cee)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
/* local pg */
|
|
|
|
if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
|
|
|
|
err = dcbnl_cee_pg_fill(skb, netdev, 1);
|
|
|
|
if (err)
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
|
|
|
|
err = dcbnl_cee_pg_fill(skb, netdev, 0);
|
|
|
|
if (err)
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* local pfc */
|
|
|
|
if (ops->getpfccfg) {
|
2019-04-26 09:13:06 +00:00
|
|
|
struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
|
|
|
|
DCB_ATTR_CEE_PFC);
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
if (!pfc_nest)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
|
|
|
|
ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
|
2012-04-02 00:03:01 +00:00
|
|
|
if (nla_put_u8(skb, i, value))
|
|
|
|
goto nla_put_failure;
|
2011-07-05 06:16:25 +00:00
|
|
|
}
|
|
|
|
nla_nest_end(skb, pfc_nest);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* local app */
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_lock_bh(&dcb_lock);
|
2019-04-26 09:13:06 +00:00
|
|
|
app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
|
2011-07-05 06:16:25 +00:00
|
|
|
if (!app)
|
2011-07-07 21:27:24 +00:00
|
|
|
goto dcb_unlock;
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
list_for_each_entry(itr, &dcb_app_list, list) {
|
2011-10-06 08:52:33 +00:00
|
|
|
if (itr->ifindex == netdev->ifindex) {
|
2019-04-26 09:13:06 +00:00
|
|
|
struct nlattr *app_nest = nla_nest_start_noflag(skb,
|
|
|
|
DCB_ATTR_APP);
|
2011-07-05 06:16:25 +00:00
|
|
|
if (!app_nest)
|
|
|
|
goto dcb_unlock;
|
|
|
|
|
|
|
|
err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
|
|
|
|
itr->app.selector);
|
|
|
|
if (err)
|
|
|
|
goto dcb_unlock;
|
|
|
|
|
|
|
|
err = nla_put_u16(skb, DCB_APP_ATTR_ID,
|
|
|
|
itr->app.protocol);
|
|
|
|
if (err)
|
|
|
|
goto dcb_unlock;
|
|
|
|
|
|
|
|
err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
|
|
|
|
itr->app.priority);
|
|
|
|
if (err)
|
|
|
|
goto dcb_unlock;
|
|
|
|
|
|
|
|
nla_nest_end(skb, app_nest);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, app);
|
|
|
|
|
|
|
|
if (netdev->dcbnl_ops->getdcbx)
|
|
|
|
dcbx = netdev->dcbnl_ops->getdcbx(netdev);
|
|
|
|
else
|
|
|
|
dcbx = -EOPNOTSUPP;
|
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
/* features flags */
|
|
|
|
if (ops->getfeatcfg) {
|
2019-04-26 09:13:06 +00:00
|
|
|
struct nlattr *feat = nla_nest_start_noflag(skb,
|
|
|
|
DCB_ATTR_CEE_FEAT);
|
2011-07-05 06:16:25 +00:00
|
|
|
if (!feat)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
|
|
|
|
i++)
|
2012-04-02 00:03:01 +00:00
|
|
|
if (!ops->getfeatcfg(netdev, i, &value) &&
|
|
|
|
nla_put_u8(skb, i, value))
|
|
|
|
goto nla_put_failure;
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
nla_nest_end(skb, feat);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* peer info if available */
|
|
|
|
if (ops->cee_peer_getpg) {
|
|
|
|
struct cee_pg pg;
|
2013-03-09 05:52:21 +00:00
|
|
|
memset(&pg, 0, sizeof(pg));
|
2011-07-05 06:16:25 +00:00
|
|
|
err = ops->cee_peer_getpg(netdev, &pg);
|
2012-04-02 00:03:01 +00:00
|
|
|
if (!err &&
|
|
|
|
nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
|
|
|
|
goto nla_put_failure;
|
2011-07-05 06:16:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ops->cee_peer_getpfc) {
|
|
|
|
struct cee_pfc pfc;
|
2013-03-09 05:52:21 +00:00
|
|
|
memset(&pfc, 0, sizeof(pfc));
|
2011-07-05 06:16:25 +00:00
|
|
|
err = ops->cee_peer_getpfc(netdev, &pfc);
|
2012-04-02 00:03:01 +00:00
|
|
|
if (!err &&
|
|
|
|
nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
|
|
|
|
goto nla_put_failure;
|
2011-07-05 06:16:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ops->peer_getappinfo && ops->peer_getapptable) {
|
|
|
|
err = dcbnl_build_peer_app(netdev, skb,
|
|
|
|
DCB_ATTR_CEE_PEER_APP_TABLE,
|
|
|
|
DCB_ATTR_CEE_PEER_APP_INFO,
|
|
|
|
DCB_ATTR_CEE_PEER_APP);
|
|
|
|
if (err)
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, cee);
|
|
|
|
|
|
|
|
/* DCBX state */
|
|
|
|
if (dcbx >= 0) {
|
|
|
|
err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
|
|
|
|
if (err)
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dcb_unlock:
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2011-07-05 06:16:25 +00:00
|
|
|
nla_put_failure:
|
2016-12-03 13:49:08 +00:00
|
|
|
err = -EMSGSIZE;
|
2011-07-05 06:16:25 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dcbnl_notify(struct net_device *dev, int event, int cmd,
|
2012-09-07 20:12:54 +00:00
|
|
|
u32 seq, u32 portid, int dcbx_ver)
|
2011-06-21 07:34:37 +00:00
|
|
|
{
|
|
|
|
struct net *net = dev_net(dev);
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct nlmsghdr *nlh;
|
|
|
|
const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!ops)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2012-09-07 20:12:54 +00:00
|
|
|
skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
|
2011-06-21 07:34:37 +00:00
|
|
|
if (!skb)
|
2021-06-01 14:13:58 +00:00
|
|
|
return -ENOMEM;
|
2011-06-21 07:34:37 +00:00
|
|
|
|
2011-07-05 06:16:25 +00:00
|
|
|
if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
|
|
|
|
err = dcbnl_ieee_fill(skb, dev);
|
|
|
|
else
|
|
|
|
err = dcbnl_cee_fill(skb, dev);
|
|
|
|
|
2011-06-21 07:34:37 +00:00
|
|
|
if (err < 0) {
|
|
|
|
/* Report error to broadcast listeners */
|
2012-06-13 02:54:57 +00:00
|
|
|
nlmsg_free(skb);
|
2011-06-21 07:34:37 +00:00
|
|
|
rtnl_set_sk_err(net, RTNLGRP_DCB, err);
|
|
|
|
} else {
|
|
|
|
/* End nlmsg and notify broadcast listeners */
|
|
|
|
nlmsg_end(skb, nlh);
|
|
|
|
rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2011-07-05 06:16:25 +00:00
|
|
|
|
|
|
|
int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
|
2012-09-07 20:12:54 +00:00
|
|
|
u32 seq, u32 portid)
|
2011-07-05 06:16:25 +00:00
|
|
|
{
|
2012-09-07 20:12:54 +00:00
|
|
|
return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
|
2011-07-05 06:16:25 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcbnl_ieee_notify);
|
|
|
|
|
|
|
|
int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
|
2012-09-07 20:12:54 +00:00
|
|
|
u32 seq, u32 portid)
|
2011-07-05 06:16:25 +00:00
|
|
|
{
|
2012-09-07 20:12:54 +00:00
|
|
|
return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
|
2011-07-05 06:16:25 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcbnl_cee_notify);
|
2011-06-21 07:34:37 +00:00
|
|
|
|
2015-03-05 18:16:11 +00:00
|
|
|
/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
|
|
|
|
* If any requested operation can not be completed
|
|
|
|
* the entire msg is aborted and error value is returned.
|
2011-06-21 07:34:37 +00:00
|
|
|
* No attempt is made to reconcile the case where only part of the
|
|
|
|
* cmd can be completed.
|
|
|
|
*/
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2011-06-21 07:34:37 +00:00
|
|
|
{
|
|
|
|
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
|
|
|
|
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
|
2020-09-10 12:09:05 +00:00
|
|
|
int prio;
|
2012-06-13 02:54:58 +00:00
|
|
|
int err;
|
2011-06-21 07:34:37 +00:00
|
|
|
|
|
|
|
if (!ops)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -EOPNOTSUPP;
|
2011-06-21 07:34:37 +00:00
|
|
|
|
2011-06-21 07:35:04 +00:00
|
|
|
if (!tb[DCB_ATTR_IEEE])
|
|
|
|
return -EINVAL;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
|
|
|
|
tb[DCB_ATTR_IEEE],
|
|
|
|
dcbnl_ieee_policy, NULL);
|
2011-06-21 07:34:37 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
|
|
|
|
struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
|
|
|
|
err = ops->ieee_setets(netdev, ets);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2012-04-04 21:33:30 +00:00
|
|
|
if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
|
|
|
|
struct ieee_maxrate *maxrate =
|
|
|
|
nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
|
|
|
|
err = ops->ieee_setmaxrate(netdev, maxrate);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2015-03-05 18:16:11 +00:00
|
|
|
if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
|
|
|
|
struct ieee_qcn *qcn =
|
|
|
|
nla_data(ieee[DCB_ATTR_IEEE_QCN]);
|
|
|
|
|
|
|
|
err = ops->ieee_setqcn(netdev, qcn);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2011-06-21 07:34:37 +00:00
|
|
|
if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
|
|
|
|
struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
|
|
|
|
err = ops->ieee_setpfc(netdev, pfc);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
net/dcb: Add dcbnl buffer attribute
In this patch, we add dcbnl buffer attribute to allow user
change the NIC's buffer configuration such as priority
to buffer mapping and buffer size of individual buffer.
This attribute combined with pfc attribute allows advanced user to
fine tune the qos setting for specific priority queue. For example,
user can give dedicated buffer for one or more priorities or user
can give large buffer to certain priorities.
The dcb buffer configuration will be controlled by lldptool.
lldptool -T -i eth2 -V BUFFER prio 0,2,5,7,1,2,3,6
maps priorities 0,1,2,3,4,5,6,7 to receive buffer 0,2,5,7,1,2,3,6
lldptool -T -i eth2 -V BUFFER size 87296,87296,0,87296,0,0,0,0
sets receive buffer size for buffer 0,1,2,3,4,5,6,7 respectively
After discussion on mailing list with Jakub, Jiri, Ido and John, we agreed to
choose dcbnl over devlink interface since this feature is intended to set
port attributes which are governed by the netdev instance of that port, where
devlink API is more suitable for global ASIC configurations.
We present an use case scenario where dcbnl buffer attribute configured
by advance user helps reduce the latency of messages of different sizes.
Scenarios description:
On ConnectX-5, we run latency sensitive traffic with
small/medium message sizes ranging from 64B to 256KB and bandwidth sensitive
traffic with large messages sizes 512KB and 1MB. We group small, medium,
and large message sizes to their own pfc enables priorities as follow.
Priorities 1 & 2 (64B, 256B and 1KB)
Priorities 3 & 4 (4KB, 8KB, 16KB, 64KB, 128KB and 256KB)
Priorities 5 & 6 (512KB and 1MB)
By default, ConnectX-5 maps all pfc enabled priorities to a single
lossless fixed buffer size of 50% of total available buffer space. The
other 50% is assigned to lossy buffer. Using dcbnl buffer attribute,
we create three equal size lossless buffers. Each buffer has 25% of total
available buffer space. Thus, the lossy buffer size reduces to 25%. Priority
to lossless buffer mappings are set as follow.
Priorities 1 & 2 on lossless buffer #1
Priorities 3 & 4 on lossless buffer #2
Priorities 5 & 6 on lossless buffer #3
We observe improvements in latency for small and medium message sizes
as follows. Please note that the large message sizes bandwidth performance is
reduced but the total bandwidth remains the same.
256B message size (42 % latency reduction)
4K message size (21% latency reduction)
64K message size (16% latency reduction)
CC: Ido Schimmel <idosch@idosch.org>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Or Gerlitz <gerlitz.or@gmail.com>
CC: Parav Pandit <parav@mellanox.com>
CC: Aron Silverton <aron.silverton@oracle.com>
Signed-off-by: Huy Nguyen <huyn@mellanox.com>
Reviewed-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2018-02-22 17:57:10 +00:00
|
|
|
if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
|
|
|
|
struct dcbnl_buffer *buffer =
|
|
|
|
nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
|
|
|
|
|
2020-09-10 12:09:05 +00:00
|
|
|
for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
|
|
|
|
if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
net/dcb: Add dcbnl buffer attribute
In this patch, we add dcbnl buffer attribute to allow user
change the NIC's buffer configuration such as priority
to buffer mapping and buffer size of individual buffer.
This attribute combined with pfc attribute allows advanced user to
fine tune the qos setting for specific priority queue. For example,
user can give dedicated buffer for one or more priorities or user
can give large buffer to certain priorities.
The dcb buffer configuration will be controlled by lldptool.
lldptool -T -i eth2 -V BUFFER prio 0,2,5,7,1,2,3,6
maps priorities 0,1,2,3,4,5,6,7 to receive buffer 0,2,5,7,1,2,3,6
lldptool -T -i eth2 -V BUFFER size 87296,87296,0,87296,0,0,0,0
sets receive buffer size for buffer 0,1,2,3,4,5,6,7 respectively
After discussion on mailing list with Jakub, Jiri, Ido and John, we agreed to
choose dcbnl over devlink interface since this feature is intended to set
port attributes which are governed by the netdev instance of that port, where
devlink API is more suitable for global ASIC configurations.
We present an use case scenario where dcbnl buffer attribute configured
by advance user helps reduce the latency of messages of different sizes.
Scenarios description:
On ConnectX-5, we run latency sensitive traffic with
small/medium message sizes ranging from 64B to 256KB and bandwidth sensitive
traffic with large messages sizes 512KB and 1MB. We group small, medium,
and large message sizes to their own pfc enables priorities as follow.
Priorities 1 & 2 (64B, 256B and 1KB)
Priorities 3 & 4 (4KB, 8KB, 16KB, 64KB, 128KB and 256KB)
Priorities 5 & 6 (512KB and 1MB)
By default, ConnectX-5 maps all pfc enabled priorities to a single
lossless fixed buffer size of 50% of total available buffer space. The
other 50% is assigned to lossy buffer. Using dcbnl buffer attribute,
we create three equal size lossless buffers. Each buffer has 25% of total
available buffer space. Thus, the lossy buffer size reduces to 25%. Priority
to lossless buffer mappings are set as follow.
Priorities 1 & 2 on lossless buffer #1
Priorities 3 & 4 on lossless buffer #2
Priorities 5 & 6 on lossless buffer #3
We observe improvements in latency for small and medium message sizes
as follows. Please note that the large message sizes bandwidth performance is
reduced but the total bandwidth remains the same.
256B message size (42 % latency reduction)
4K message size (21% latency reduction)
64K message size (16% latency reduction)
CC: Ido Schimmel <idosch@idosch.org>
CC: Jakub Kicinski <jakub.kicinski@netronome.com>
CC: Jiri Pirko <jiri@resnulli.us>
CC: Or Gerlitz <gerlitz.or@gmail.com>
CC: Parav Pandit <parav@mellanox.com>
CC: Aron Silverton <aron.silverton@oracle.com>
Signed-off-by: Huy Nguyen <huyn@mellanox.com>
Reviewed-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
2018-02-22 17:57:10 +00:00
|
|
|
err = ops->dcbnl_setbuffer(netdev, buffer);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2023-01-18 21:08:27 +00:00
|
|
|
if (ieee[DCB_ATTR_DCB_REWR_TABLE]) {
|
|
|
|
err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE],
|
|
|
|
netdev,
|
|
|
|
ops->dcbnl_setrewr ?: dcb_setrewr);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2011-06-21 07:34:37 +00:00
|
|
|
if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
|
2023-01-18 21:08:26 +00:00
|
|
|
err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE],
|
|
|
|
netdev, ops->ieee_setapp ?:
|
|
|
|
dcb_ieee_setapp);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
2011-06-21 07:34:37 +00:00
|
|
|
}
|
|
|
|
|
2022-11-01 09:48:30 +00:00
|
|
|
if (ieee[DCB_ATTR_DCB_APP_TRUST_TABLE]) {
|
|
|
|
u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1] = {0};
|
|
|
|
struct nlattr *attr;
|
|
|
|
int nselectors = 0;
|
|
|
|
int rem;
|
|
|
|
|
|
|
|
if (!ops->dcbnl_setapptrust) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_for_each_nested(attr, ieee[DCB_ATTR_DCB_APP_TRUST_TABLE],
|
|
|
|
rem) {
|
|
|
|
enum ieee_attrs_app type = nla_type(attr);
|
|
|
|
u8 selector;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!dcbnl_app_attr_type_validate(type) ||
|
|
|
|
nla_len(attr) != 1 ||
|
|
|
|
nselectors >= sizeof(selectors)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
selector = nla_get_u8(attr);
|
|
|
|
|
|
|
|
if (!dcbnl_app_selector_validate(type, selector)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Duplicate selector ? */
|
|
|
|
for (i = 0; i < nselectors; i++) {
|
|
|
|
if (selectors[i] == selector) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
selectors[nselectors++] = selector;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ops->dcbnl_setapptrust(netdev, selectors, nselectors);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2011-06-21 07:34:37 +00:00
|
|
|
err:
|
2012-06-13 02:54:55 +00:00
|
|
|
err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
|
2011-07-05 06:16:25 +00:00
|
|
|
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
|
2011-06-21 07:34:37 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2011-06-21 07:34:37 +00:00
|
|
|
{
|
|
|
|
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
|
|
|
|
|
|
|
|
if (!ops)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return dcbnl_ieee_fill(skb, netdev);
|
2011-06-21 07:34:37 +00:00
|
|
|
}
|
2011-06-21 07:34:48 +00:00
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2011-06-21 07:34:48 +00:00
|
|
|
{
|
|
|
|
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
|
|
|
|
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
|
2012-06-13 02:54:58 +00:00
|
|
|
int err;
|
2011-06-21 07:34:48 +00:00
|
|
|
|
|
|
|
if (!ops)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!tb[DCB_ATTR_IEEE])
|
|
|
|
return -EINVAL;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
|
|
|
|
tb[DCB_ATTR_IEEE],
|
|
|
|
dcbnl_ieee_policy, NULL);
|
2011-06-21 07:34:48 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
|
2023-01-18 21:08:26 +00:00
|
|
|
err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE],
|
|
|
|
netdev, ops->ieee_delapp ?:
|
|
|
|
dcb_ieee_delapp);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
2011-06-21 07:34:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-18 21:08:27 +00:00
|
|
|
if (ieee[DCB_ATTR_DCB_REWR_TABLE]) {
|
|
|
|
err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE],
|
|
|
|
netdev,
|
|
|
|
ops->dcbnl_delrewr ?: dcb_delrewr);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2011-06-21 07:34:48 +00:00
|
|
|
err:
|
2012-06-13 02:54:55 +00:00
|
|
|
err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
|
2011-07-05 06:16:25 +00:00
|
|
|
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
|
2011-06-21 07:34:48 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-30 06:26:48 +00:00
|
|
|
/* DCBX configuration */
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2010-12-30 06:26:48 +00:00
|
|
|
{
|
|
|
|
if (!netdev->dcbnl_ops->getdcbx)
|
2011-01-03 08:04:59 +00:00
|
|
|
return -EOPNOTSUPP;
|
2010-12-30 06:26:48 +00:00
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_DCBX,
|
|
|
|
netdev->dcbnl_ops->getdcbx(netdev));
|
2010-12-30 06:26:48 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2010-12-30 06:26:48 +00:00
|
|
|
{
|
|
|
|
u8 value;
|
|
|
|
|
2011-01-03 08:04:59 +00:00
|
|
|
if (!netdev->dcbnl_ops->setdcbx)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!tb[DCB_ATTR_DCBX])
|
|
|
|
return -EINVAL;
|
2010-12-30 06:26:48 +00:00
|
|
|
|
|
|
|
value = nla_get_u8(tb[DCB_ATTR_DCBX]);
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return nla_put_u8(skb, DCB_ATTR_DCBX,
|
|
|
|
netdev->dcbnl_ops->setdcbx(netdev, value));
|
2010-12-30 06:26:48 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2010-12-30 06:26:55 +00:00
|
|
|
{
|
|
|
|
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
|
|
|
|
u8 value;
|
2011-01-03 08:04:59 +00:00
|
|
|
int ret, i;
|
2010-12-30 06:26:55 +00:00
|
|
|
int getall = 0;
|
|
|
|
|
2011-01-03 08:04:59 +00:00
|
|
|
if (!netdev->dcbnl_ops->getfeatcfg)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!tb[DCB_ATTR_FEATCFG])
|
|
|
|
return -EINVAL;
|
2010-12-30 06:26:55 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_FEATCFG],
|
|
|
|
dcbnl_featcfg_nest, NULL);
|
2011-01-03 08:04:59 +00:00
|
|
|
if (ret)
|
2012-06-13 02:54:55 +00:00
|
|
|
return ret;
|
2010-12-30 06:26:55 +00:00
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
|
2012-06-13 02:54:55 +00:00
|
|
|
if (!nest)
|
|
|
|
return -EMSGSIZE;
|
2010-12-30 06:26:55 +00:00
|
|
|
|
|
|
|
if (data[DCB_FEATCFG_ATTR_ALL])
|
|
|
|
getall = 1;
|
|
|
|
|
|
|
|
for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
|
|
|
|
if (!getall && !data[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
|
2011-01-03 08:04:59 +00:00
|
|
|
if (!ret)
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, i, value);
|
2010-12-30 06:26:55 +00:00
|
|
|
|
2011-01-03 08:04:59 +00:00
|
|
|
if (ret) {
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_cancel(skb, nest);
|
2011-01-03 08:04:59 +00:00
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
2010-12-30 06:26:55 +00:00
|
|
|
}
|
2012-06-13 02:54:55 +00:00
|
|
|
nla_nest_end(skb, nest);
|
2010-12-30 06:26:55 +00:00
|
|
|
|
2011-01-03 08:04:59 +00:00
|
|
|
nla_put_failure:
|
2010-12-30 06:26:55 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2010-12-30 06:26:55 +00:00
|
|
|
{
|
|
|
|
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
|
2011-01-03 08:04:59 +00:00
|
|
|
int ret, i;
|
2010-12-30 06:26:55 +00:00
|
|
|
u8 value;
|
|
|
|
|
2011-01-03 08:04:59 +00:00
|
|
|
if (!netdev->dcbnl_ops->setfeatcfg)
|
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
|
|
if (!tb[DCB_ATTR_FEATCFG])
|
|
|
|
return -EINVAL;
|
2010-12-30 06:26:55 +00:00
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
|
|
|
|
tb[DCB_ATTR_FEATCFG],
|
|
|
|
dcbnl_featcfg_nest, NULL);
|
2010-12-30 06:26:55 +00:00
|
|
|
|
2011-01-03 08:04:59 +00:00
|
|
|
if (ret)
|
2010-12-30 06:26:55 +00:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
|
|
|
|
if (data[i] == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
value = nla_get_u8(data[i]);
|
|
|
|
|
|
|
|
ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
|
|
|
|
|
|
|
|
if (ret)
|
2011-01-03 08:04:59 +00:00
|
|
|
goto err;
|
2010-12-30 06:26:55 +00:00
|
|
|
}
|
|
|
|
err:
|
2012-06-13 02:54:55 +00:00
|
|
|
ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
|
2011-01-03 08:04:59 +00:00
|
|
|
|
2010-12-30 06:26:55 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-27 05:04:38 +00:00
|
|
|
/* Handle CEE DCBX GET commands. */
|
2012-06-13 02:54:55 +00:00
|
|
|
static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
|
|
|
|
u32 seq, struct nlattr **tb, struct sk_buff *skb)
|
2011-02-27 05:04:38 +00:00
|
|
|
{
|
|
|
|
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
|
|
|
|
|
|
|
|
if (!ops)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2012-06-13 02:54:55 +00:00
|
|
|
return dcbnl_cee_fill(skb, netdev);
|
2011-02-27 05:04:38 +00:00
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:54 +00:00
|
|
|
struct reply_func {
|
|
|
|
/* reply netlink message type */
|
|
|
|
int type;
|
|
|
|
|
|
|
|
/* function to fill message contents */
|
|
|
|
int (*cb)(struct net_device *, struct nlmsghdr *, u32,
|
|
|
|
struct nlattr **, struct sk_buff *);
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
|
2012-06-13 02:54:55 +00:00
|
|
|
[DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
|
|
|
|
[DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
|
|
|
|
[DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
|
|
|
|
[DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
|
|
|
|
[DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
|
|
|
|
[DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
|
|
|
|
[DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
|
|
|
|
[DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
|
|
|
|
[DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
|
|
|
|
[DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
|
|
|
|
[DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
|
|
|
|
[DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
|
|
|
|
[DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
|
|
|
|
[DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
|
|
|
|
[DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
|
|
|
|
[DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
|
|
|
|
[DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
|
|
|
|
[DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
|
|
|
|
[DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
|
|
|
|
[DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
|
|
|
|
[DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
|
|
|
|
[DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
|
|
|
|
[DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
|
|
|
|
[DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
|
|
|
|
[DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
|
|
|
|
[DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
|
|
|
|
[DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
|
2012-06-13 02:54:54 +00:00
|
|
|
};
|
|
|
|
|
2017-04-16 16:48:24 +00:00
|
|
|
static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|
|
|
struct netlink_ext_ack *extack)
|
2008-11-21 04:52:10 +00:00
|
|
|
{
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
struct net_device *netdev;
|
2012-06-13 02:55:01 +00:00
|
|
|
struct dcbmsg *dcb = nlmsg_data(nlh);
|
2008-11-21 04:52:10 +00:00
|
|
|
struct nlattr *tb[DCB_ATTR_MAX + 1];
|
2020-06-23 02:50:39 +00:00
|
|
|
u32 portid = NETLINK_CB(skb).portid;
|
2008-11-21 04:52:10 +00:00
|
|
|
int ret = -EINVAL;
|
2012-06-13 02:54:54 +00:00
|
|
|
struct sk_buff *reply_skb;
|
2012-06-13 22:34:03 +00:00
|
|
|
struct nlmsghdr *reply_nlh = NULL;
|
2012-06-13 02:54:54 +00:00
|
|
|
const struct reply_func *fn;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2014-04-23 21:29:27 +00:00
|
|
|
if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
|
2012-11-16 03:03:00 +00:00
|
|
|
return -EPERM;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
|
|
|
|
dcbnl_rtnl_policy, extack);
|
2008-11-21 04:52:10 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2012-06-13 02:54:54 +00:00
|
|
|
if (dcb->cmd > DCB_CMD_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* check if a reply function has been defined for the command */
|
|
|
|
fn = &reply_funcs[dcb->cmd];
|
|
|
|
if (!fn->cb)
|
|
|
|
return -EOPNOTSUPP;
|
2021-01-11 17:07:07 +00:00
|
|
|
if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
|
2020-12-22 21:49:44 +00:00
|
|
|
return -EPERM;
|
2012-06-13 02:54:54 +00:00
|
|
|
|
2008-11-21 04:52:10 +00:00
|
|
|
if (!tb[DCB_ATTR_IFNAME])
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-01-15 02:23:39 +00:00
|
|
|
netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
|
2008-11-21 04:52:10 +00:00
|
|
|
if (!netdev)
|
2012-06-13 02:54:58 +00:00
|
|
|
return -ENODEV;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2014-01-15 02:23:39 +00:00
|
|
|
if (!netdev->dcbnl_ops)
|
|
|
|
return -EOPNOTSUPP;
|
2008-11-21 04:52:10 +00:00
|
|
|
|
2012-09-07 20:12:54 +00:00
|
|
|
reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
|
2012-06-13 02:54:54 +00:00
|
|
|
nlh->nlmsg_flags, &reply_nlh);
|
2014-01-15 02:23:39 +00:00
|
|
|
if (!reply_skb)
|
2021-06-01 14:13:58 +00:00
|
|
|
return -ENOMEM;
|
2012-06-13 02:54:54 +00:00
|
|
|
|
|
|
|
ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
|
|
|
|
if (ret < 0) {
|
|
|
|
nlmsg_free(reply_skb);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
nlmsg_end(reply_skb, reply_nlh);
|
|
|
|
|
2012-12-09 20:48:13 +00:00
|
|
|
ret = rtnl_unicast(reply_skb, net, portid);
|
2008-11-21 04:52:10 +00:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-01-18 21:08:27 +00:00
|
|
|
static struct dcb_app_type *dcb_rewr_lookup(const struct dcb_app *app,
|
|
|
|
int ifindex, int proto)
|
|
|
|
{
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
|
|
|
|
list_for_each_entry(itr, &dcb_rewr_list, list) {
|
|
|
|
if (itr->app.selector == app->selector &&
|
|
|
|
itr->app.priority == app->priority &&
|
|
|
|
itr->ifindex == ifindex &&
|
|
|
|
((proto == -1) || itr->app.protocol == proto))
|
|
|
|
return itr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-06-13 02:54:59 +00:00
|
|
|
static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
|
|
|
|
int ifindex, int prio)
|
|
|
|
{
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
|
|
|
|
list_for_each_entry(itr, &dcb_app_list, list) {
|
|
|
|
if (itr->app.selector == app->selector &&
|
|
|
|
itr->app.protocol == app->protocol &&
|
|
|
|
itr->ifindex == ifindex &&
|
2018-07-27 12:26:55 +00:00
|
|
|
((prio == -1) || itr->app.priority == prio))
|
2012-06-13 02:54:59 +00:00
|
|
|
return itr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-01-18 21:08:25 +00:00
|
|
|
static int dcb_app_add(struct list_head *list, const struct dcb_app *app,
|
|
|
|
int ifindex)
|
2012-06-13 02:55:00 +00:00
|
|
|
{
|
|
|
|
struct dcb_app_type *entry;
|
|
|
|
|
|
|
|
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memcpy(&entry->app, app, sizeof(*app));
|
|
|
|
entry->ifindex = ifindex;
|
2023-01-18 21:08:25 +00:00
|
|
|
list_add(&entry->list, list);
|
2012-06-13 02:55:00 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-12-30 09:26:31 +00:00
|
|
|
/**
|
|
|
|
* dcb_getapp - retrieve the DCBX application user priority
|
2020-10-28 01:09:13 +00:00
|
|
|
* @dev: network interface
|
|
|
|
* @app: application to get user priority of
|
2010-12-30 09:26:31 +00:00
|
|
|
*
|
|
|
|
* On success returns a non-zero 802.1p user priority bitmap
|
|
|
|
* otherwise returns 0 as the invalid user priority bitmap to
|
|
|
|
* indicate an error.
|
|
|
|
*/
|
|
|
|
u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
|
|
|
|
{
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
u8 prio = 0;
|
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_lock_bh(&dcb_lock);
|
2018-07-27 12:26:55 +00:00
|
|
|
itr = dcb_app_lookup(app, dev->ifindex, -1);
|
|
|
|
if (itr)
|
2012-06-13 02:54:59 +00:00
|
|
|
prio = itr->app.priority;
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2010-12-30 09:26:31 +00:00
|
|
|
|
|
|
|
return prio;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_getapp);
|
|
|
|
|
|
|
|
/**
|
2011-06-21 07:34:42 +00:00
|
|
|
* dcb_setapp - add CEE dcb application data to app list
|
2020-10-28 01:09:13 +00:00
|
|
|
* @dev: network interface
|
|
|
|
* @new: application data to add
|
2010-12-30 09:26:31 +00:00
|
|
|
*
|
2011-06-21 07:34:42 +00:00
|
|
|
* Priority 0 is an invalid priority in CEE spec. This routine
|
|
|
|
* removes applications from the app list if the priority is
|
2014-07-29 03:57:07 +00:00
|
|
|
* set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
|
2010-12-30 09:26:31 +00:00
|
|
|
*/
|
2011-06-21 07:34:58 +00:00
|
|
|
int dcb_setapp(struct net_device *dev, struct dcb_app *new)
|
2010-12-30 09:26:31 +00:00
|
|
|
{
|
|
|
|
struct dcb_app_type *itr;
|
2011-01-31 12:00:59 +00:00
|
|
|
struct dcb_app_type event;
|
2012-06-13 02:55:00 +00:00
|
|
|
int err = 0;
|
2011-01-31 12:00:59 +00:00
|
|
|
|
2011-10-06 08:52:33 +00:00
|
|
|
event.ifindex = dev->ifindex;
|
2011-01-31 12:00:59 +00:00
|
|
|
memcpy(&event.app, new, sizeof(event.app));
|
2011-10-06 08:52:38 +00:00
|
|
|
if (dev->dcbnl_ops->getdcbx)
|
|
|
|
event.dcbx = dev->dcbnl_ops->getdcbx(dev);
|
2010-12-30 09:26:31 +00:00
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_lock_bh(&dcb_lock);
|
2010-12-30 09:26:31 +00:00
|
|
|
/* Search for existing match and replace */
|
2018-07-27 12:26:55 +00:00
|
|
|
itr = dcb_app_lookup(new, dev->ifindex, -1);
|
|
|
|
if (itr) {
|
2012-06-13 02:54:59 +00:00
|
|
|
if (new->priority)
|
|
|
|
itr->app.priority = new->priority;
|
|
|
|
else {
|
|
|
|
list_del(&itr->list);
|
|
|
|
kfree(itr);
|
2010-12-30 09:26:31 +00:00
|
|
|
}
|
2012-06-13 02:54:59 +00:00
|
|
|
goto out;
|
2010-12-30 09:26:31 +00:00
|
|
|
}
|
|
|
|
/* App type does not exist add new application type */
|
2012-06-13 02:55:00 +00:00
|
|
|
if (new->priority)
|
2023-01-18 21:08:25 +00:00
|
|
|
err = dcb_app_add(&dcb_app_list, new, dev->ifindex);
|
2010-12-30 09:26:31 +00:00
|
|
|
out:
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2012-06-13 02:55:00 +00:00
|
|
|
if (!err)
|
|
|
|
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
|
|
|
|
return err;
|
2010-12-30 09:26:31 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_setapp);
|
|
|
|
|
2011-06-21 07:34:53 +00:00
|
|
|
/**
|
|
|
|
* dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
|
2020-10-28 01:09:13 +00:00
|
|
|
* @dev: network interface
|
|
|
|
* @app: where to store the retrieve application data
|
2011-06-21 07:34:53 +00:00
|
|
|
*
|
|
|
|
* Helper routine which on success returns a non-zero 802.1Qaz user
|
|
|
|
* priority bitmap otherwise returns 0 to indicate the dcb_app was
|
|
|
|
* not found in APP list.
|
|
|
|
*/
|
|
|
|
u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
|
|
|
|
{
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
u8 prio = 0;
|
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_lock_bh(&dcb_lock);
|
2018-07-27 12:26:55 +00:00
|
|
|
itr = dcb_app_lookup(app, dev->ifindex, -1);
|
|
|
|
if (itr)
|
2012-06-13 02:54:59 +00:00
|
|
|
prio |= 1 << itr->app.priority;
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2011-06-21 07:34:53 +00:00
|
|
|
|
|
|
|
return prio;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_ieee_getapp_mask);
|
|
|
|
|
2023-01-18 21:08:27 +00:00
|
|
|
/* Get protocol value from rewrite entry. */
|
|
|
|
u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app)
|
|
|
|
{
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
u16 proto = 0;
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
itr = dcb_rewr_lookup(app, dev->ifindex, -1);
|
|
|
|
if (itr)
|
|
|
|
proto = itr->app.protocol;
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
|
|
|
|
return proto;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_getrewr);
|
|
|
|
|
|
|
|
/* Add rewrite entry to the rewrite list. */
|
|
|
|
int dcb_setrewr(struct net_device *dev, struct dcb_app *new)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
/* Search for existing match and abort if found. */
|
|
|
|
if (dcb_rewr_lookup(new, dev->ifindex, new->protocol)) {
|
|
|
|
err = -EEXIST;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dcb_app_add(&dcb_rewr_list, new, dev->ifindex);
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_setrewr);
|
|
|
|
|
|
|
|
/* Delete rewrite entry from the rewrite list. */
|
|
|
|
int dcb_delrewr(struct net_device *dev, struct dcb_app *del)
|
|
|
|
{
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
int err = -ENOENT;
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
/* Search for existing match and remove it. */
|
|
|
|
itr = dcb_rewr_lookup(del, dev->ifindex, del->protocol);
|
|
|
|
if (itr) {
|
|
|
|
list_del(&itr->list);
|
|
|
|
kfree(itr);
|
|
|
|
err = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_delrewr);
|
|
|
|
|
2011-06-21 07:34:42 +00:00
|
|
|
/**
|
|
|
|
* dcb_ieee_setapp - add IEEE dcb application data to app list
|
2020-10-28 01:09:13 +00:00
|
|
|
* @dev: network interface
|
|
|
|
* @new: application data to add
|
2011-06-21 07:34:42 +00:00
|
|
|
*
|
|
|
|
* This adds Application data to the list. Multiple application
|
|
|
|
* entries may exists for the same selector and protocol as long
|
2014-07-29 03:57:07 +00:00
|
|
|
* as the priorities are different. Priority is expected to be a
|
|
|
|
* 3-bit unsigned integer
|
2011-06-21 07:34:42 +00:00
|
|
|
*/
|
|
|
|
int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
|
|
|
|
{
|
|
|
|
struct dcb_app_type event;
|
|
|
|
int err = 0;
|
|
|
|
|
2011-10-06 08:52:33 +00:00
|
|
|
event.ifindex = dev->ifindex;
|
2011-06-21 07:34:42 +00:00
|
|
|
memcpy(&event.app, new, sizeof(event.app));
|
2011-10-06 08:52:38 +00:00
|
|
|
if (dev->dcbnl_ops->getdcbx)
|
|
|
|
event.dcbx = dev->dcbnl_ops->getdcbx(dev);
|
2011-06-21 07:34:42 +00:00
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_lock_bh(&dcb_lock);
|
2011-06-21 07:34:42 +00:00
|
|
|
/* Search for existing match and abort if found */
|
2012-06-13 02:54:59 +00:00
|
|
|
if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
|
|
|
|
err = -EEXIST;
|
|
|
|
goto out;
|
2011-06-21 07:34:42 +00:00
|
|
|
}
|
|
|
|
|
2023-01-18 21:08:25 +00:00
|
|
|
err = dcb_app_add(&dcb_app_list, new, dev->ifindex);
|
2011-06-21 07:34:42 +00:00
|
|
|
out:
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2011-06-21 07:34:42 +00:00
|
|
|
if (!err)
|
|
|
|
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_ieee_setapp);
|
|
|
|
|
2011-06-21 07:34:48 +00:00
|
|
|
/**
|
|
|
|
* dcb_ieee_delapp - delete IEEE dcb application data from list
|
2020-10-28 01:09:13 +00:00
|
|
|
* @dev: network interface
|
|
|
|
* @del: application data to delete
|
2011-06-21 07:34:48 +00:00
|
|
|
*
|
|
|
|
* This removes a matching APP data from the APP list
|
|
|
|
*/
|
|
|
|
int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
|
|
|
|
{
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
struct dcb_app_type event;
|
|
|
|
int err = -ENOENT;
|
|
|
|
|
2011-10-06 08:52:33 +00:00
|
|
|
event.ifindex = dev->ifindex;
|
2011-06-21 07:34:48 +00:00
|
|
|
memcpy(&event.app, del, sizeof(event.app));
|
2011-10-06 08:52:38 +00:00
|
|
|
if (dev->dcbnl_ops->getdcbx)
|
|
|
|
event.dcbx = dev->dcbnl_ops->getdcbx(dev);
|
2011-06-21 07:34:48 +00:00
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_lock_bh(&dcb_lock);
|
2011-06-21 07:34:48 +00:00
|
|
|
/* Search for existing match and remove it. */
|
2012-06-13 02:54:59 +00:00
|
|
|
if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
|
|
|
|
list_del(&itr->list);
|
|
|
|
kfree(itr);
|
|
|
|
err = 0;
|
2011-06-21 07:34:48 +00:00
|
|
|
}
|
|
|
|
|
2014-11-15 00:38:31 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2011-06-21 07:34:48 +00:00
|
|
|
if (!err)
|
|
|
|
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_ieee_delapp);
|
|
|
|
|
2023-01-18 21:08:28 +00:00
|
|
|
/* dcb_getrewr_prio_pcp_mask_map - For a given device, find mapping from
|
|
|
|
* priorities to the PCP and DEI values assigned to that priority.
|
|
|
|
*/
|
|
|
|
void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev,
|
|
|
|
struct dcb_rewr_prio_pcp_map *p_map)
|
|
|
|
{
|
|
|
|
int ifindex = dev->ifindex;
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
u8 prio;
|
|
|
|
|
|
|
|
memset(p_map->map, 0, sizeof(p_map->map));
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
list_for_each_entry(itr, &dcb_rewr_list, list) {
|
|
|
|
if (itr->ifindex == ifindex &&
|
|
|
|
itr->app.selector == DCB_APP_SEL_PCP &&
|
|
|
|
itr->app.protocol < 16 &&
|
|
|
|
itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
|
|
|
|
prio = itr->app.priority;
|
|
|
|
p_map->map[prio] |= 1 << itr->app.protocol;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_getrewr_prio_pcp_mask_map);
|
|
|
|
|
|
|
|
/* dcb_getrewr_prio_dscp_mask_map - For a given device, find mapping from
|
|
|
|
* priorities to the DSCP values assigned to that priority.
|
|
|
|
*/
|
|
|
|
void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev,
|
|
|
|
struct dcb_ieee_app_prio_map *p_map)
|
|
|
|
{
|
|
|
|
int ifindex = dev->ifindex;
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
u8 prio;
|
|
|
|
|
|
|
|
memset(p_map->map, 0, sizeof(p_map->map));
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
list_for_each_entry(itr, &dcb_rewr_list, list) {
|
|
|
|
if (itr->ifindex == ifindex &&
|
|
|
|
itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
|
|
|
|
itr->app.protocol < 64 &&
|
|
|
|
itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
|
|
|
|
prio = itr->app.priority;
|
|
|
|
p_map->map[prio] |= 1ULL << itr->app.protocol;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_getrewr_prio_dscp_mask_map);
|
|
|
|
|
2020-10-28 01:09:13 +00:00
|
|
|
/*
|
2018-07-27 12:26:56 +00:00
|
|
|
* dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
|
|
|
|
* priorities to the DSCP values assigned to that priority. Initialize p_map
|
|
|
|
* such that each map element holds a bit mask of DSCP values configured for
|
|
|
|
* that priority by APP entries.
|
|
|
|
*/
|
|
|
|
void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
|
|
|
|
struct dcb_ieee_app_prio_map *p_map)
|
|
|
|
{
|
|
|
|
int ifindex = dev->ifindex;
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
u8 prio;
|
|
|
|
|
|
|
|
memset(p_map->map, 0, sizeof(p_map->map));
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
list_for_each_entry(itr, &dcb_app_list, list) {
|
|
|
|
if (itr->ifindex == ifindex &&
|
|
|
|
itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
|
|
|
|
itr->app.protocol < 64 &&
|
|
|
|
itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
|
|
|
|
prio = itr->app.priority;
|
|
|
|
p_map->map[prio] |= 1ULL << itr->app.protocol;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
|
|
|
|
|
2020-10-28 01:09:13 +00:00
|
|
|
/*
|
2018-07-27 12:26:56 +00:00
|
|
|
* dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
|
|
|
|
* DSCP values to the priorities assigned to that DSCP value. Initialize p_map
|
|
|
|
* such that each map element holds a bit mask of priorities configured for a
|
|
|
|
* given DSCP value by APP entries.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
|
|
|
|
struct dcb_ieee_app_dscp_map *p_map)
|
|
|
|
{
|
|
|
|
int ifindex = dev->ifindex;
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
|
|
|
|
memset(p_map->map, 0, sizeof(p_map->map));
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
list_for_each_entry(itr, &dcb_app_list, list) {
|
|
|
|
if (itr->ifindex == ifindex &&
|
|
|
|
itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
|
|
|
|
itr->app.protocol < 64 &&
|
|
|
|
itr->app.priority < IEEE_8021QAZ_MAX_TCS)
|
|
|
|
p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
|
|
|
|
|
2020-10-28 01:09:13 +00:00
|
|
|
/*
|
2018-07-27 12:26:56 +00:00
|
|
|
* Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
|
|
|
|
* type, with valid PID values >= 1536. A special meaning is then assigned to
|
|
|
|
* protocol value of 0: "default priority. For use when priority is not
|
|
|
|
* otherwise specified".
|
|
|
|
*
|
|
|
|
* dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
|
|
|
|
* of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
|
|
|
|
* priorities set by these entries.
|
|
|
|
*/
|
|
|
|
u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
int ifindex = dev->ifindex;
|
|
|
|
struct dcb_app_type *itr;
|
|
|
|
u8 mask = 0;
|
|
|
|
|
|
|
|
spin_lock_bh(&dcb_lock);
|
|
|
|
list_for_each_entry(itr, &dcb_app_list, list) {
|
|
|
|
if (itr->ifindex == ifindex &&
|
|
|
|
itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
|
|
|
|
itr->app.protocol == 0 &&
|
|
|
|
itr->app.priority < IEEE_8021QAZ_MAX_TCS)
|
|
|
|
mask |= 1 << itr->app.priority;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&dcb_lock);
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
|
|
|
|
|
2022-02-24 16:01:54 +00:00
|
|
|
static void dcbnl_flush_dev(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct dcb_app_type *itr, *tmp;
|
|
|
|
|
2022-03-02 19:39:39 +00:00
|
|
|
spin_lock_bh(&dcb_lock);
|
2022-02-24 16:01:54 +00:00
|
|
|
|
|
|
|
list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
|
|
|
|
if (itr->ifindex == dev->ifindex) {
|
|
|
|
list_del(&itr->list);
|
|
|
|
kfree(itr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-02 19:39:39 +00:00
|
|
|
spin_unlock_bh(&dcb_lock);
|
2022-02-24 16:01:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dcbnl_netdevice_event(struct notifier_block *nb,
|
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_UNREGISTER:
|
|
|
|
if (!dev->dcbnl_ops)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
dcbnl_flush_dev(dev);
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
default:
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block dcbnl_nb __read_mostly = {
|
|
|
|
.notifier_call = dcbnl_netdevice_event,
|
|
|
|
};
|
|
|
|
|
2008-11-21 04:52:10 +00:00
|
|
|
static int __init dcbnl_init(void)
|
|
|
|
{
|
2022-02-24 16:01:54 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = register_netdevice_notifier(&dcbnl_nb);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2017-08-09 18:41:48 +00:00
|
|
|
rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
|
|
|
|
rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
|
2008-11-21 04:52:10 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2015-10-07 21:27:44 +00:00
|
|
|
device_initcall(dcbnl_init);
|