2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2016-03-11 17:07:33 +00:00
|
|
|
/*
|
|
|
|
* drivers/net/macsec.c - MACsec device
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <crypto/aead.h>
|
|
|
|
#include <linux/etherdevice.h>
|
2020-01-13 22:31:43 +00:00
|
|
|
#include <linux/netdevice.h>
|
2016-03-11 17:07:33 +00:00
|
|
|
#include <linux/rtnetlink.h>
|
2017-10-20 07:23:43 +00:00
|
|
|
#include <linux/refcount.h>
|
2016-03-11 17:07:33 +00:00
|
|
|
#include <net/genetlink.h>
|
|
|
|
#include <net/sock.h>
|
2016-07-20 16:11:32 +00:00
|
|
|
#include <net/gro_cells.h>
|
2020-01-13 22:31:39 +00:00
|
|
|
#include <net/macsec.h>
|
2022-09-06 05:21:13 +00:00
|
|
|
#include <net/dst_metadata.h>
|
2020-01-13 22:31:43 +00:00
|
|
|
#include <linux/phy.h>
|
2020-03-09 19:47:01 +00:00
|
|
|
#include <linux/byteorder/generic.h>
|
2020-03-22 17:51:13 +00:00
|
|
|
#include <linux/if_arp.h>
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
#include <uapi/linux/if_macsec.h>
|
|
|
|
|
|
|
|
/* SecTAG length = macsec_eth_header without the optional SCI */
|
|
|
|
#define MACSEC_TAG_LEN 6
|
|
|
|
|
|
|
|
struct macsec_eth_header {
|
|
|
|
struct ethhdr eth;
|
|
|
|
/* SecTAG */
|
|
|
|
u8 tci_an;
|
|
|
|
#if defined(__LITTLE_ENDIAN_BITFIELD)
|
|
|
|
u8 short_length:6,
|
|
|
|
unused:2;
|
|
|
|
#elif defined(__BIG_ENDIAN_BITFIELD)
|
|
|
|
u8 unused:2,
|
|
|
|
short_length:6;
|
|
|
|
#else
|
|
|
|
#error "Please fix <asm/byteorder.h>"
|
|
|
|
#endif
|
|
|
|
__be32 packet_number;
|
|
|
|
u8 secure_channel_id[8]; /* optional */
|
|
|
|
} __packed;
|
|
|
|
|
|
|
|
/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
|
|
|
|
#define MIN_NON_SHORT_LEN 48
|
|
|
|
|
|
|
|
#define GCM_AES_IV_LEN 12
|
|
|
|
|
2018-09-20 19:33:08 +00:00
|
|
|
#define for_each_rxsc(secy, sc) \
|
2016-03-11 17:07:33 +00:00
|
|
|
for (sc = rcu_dereference_bh(secy->rx_sc); \
|
2018-09-20 19:33:08 +00:00
|
|
|
sc; \
|
2016-03-11 17:07:33 +00:00
|
|
|
sc = rcu_dereference_bh(sc->next))
|
|
|
|
#define for_each_rxsc_rtnl(secy, sc) \
|
|
|
|
for (sc = rtnl_dereference(secy->rx_sc); \
|
|
|
|
sc; \
|
|
|
|
sc = rtnl_dereference(sc->next))
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
|
|
|
|
|
|
|
|
struct gcm_iv_xpn {
|
|
|
|
union {
|
|
|
|
u8 short_secure_channel_id[4];
|
|
|
|
ssci_t ssci;
|
|
|
|
};
|
|
|
|
__be64 pn;
|
|
|
|
} __packed;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
struct gcm_iv {
|
|
|
|
union {
|
|
|
|
u8 secure_channel_id[8];
|
|
|
|
sci_t sci;
|
|
|
|
};
|
|
|
|
__be32 pn;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
|
|
|
|
|
|
|
|
struct pcpu_secy_stats {
|
|
|
|
struct macsec_dev_stats stats;
|
|
|
|
struct u64_stats_sync syncp;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct macsec_dev - private data
|
|
|
|
* @secy: SecY config
|
|
|
|
* @real_dev: pointer to underlying netdevice
|
2022-05-31 07:45:00 +00:00
|
|
|
* @dev_tracker: refcount tracker for @real_dev reference
|
2016-03-11 17:07:33 +00:00
|
|
|
* @stats: MACsec device stats
|
|
|
|
* @secys: linked list of SecY's on the underlying device
|
2020-11-02 11:45:07 +00:00
|
|
|
* @gro_cells: pointer to the Generic Receive Offload cell
|
2020-01-13 22:31:43 +00:00
|
|
|
* @offload: status of offloading on the MACsec device
|
2023-12-19 14:53:30 +00:00
|
|
|
* @insert_tx_tag: when offloading, device requires to insert an
|
|
|
|
* additional tag
|
2016-03-11 17:07:33 +00:00
|
|
|
*/
|
|
|
|
struct macsec_dev {
|
|
|
|
struct macsec_secy secy;
|
|
|
|
struct net_device *real_dev;
|
2022-05-31 07:45:00 +00:00
|
|
|
netdevice_tracker dev_tracker;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct pcpu_secy_stats __percpu *stats;
|
|
|
|
struct list_head secys;
|
2016-07-20 16:11:32 +00:00
|
|
|
struct gro_cells gro_cells;
|
2020-01-13 22:31:43 +00:00
|
|
|
enum macsec_offload offload;
|
2023-12-19 14:53:30 +00:00
|
|
|
bool insert_tx_tag;
|
2016-03-11 17:07:33 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct macsec_rxh_data - rx_handler private argument
|
|
|
|
* @secys: linked list of SecY's on this underlying device
|
|
|
|
*/
|
|
|
|
struct macsec_rxh_data {
|
|
|
|
struct list_head secys;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct macsec_dev *macsec_priv(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return (struct macsec_dev *)netdev_priv(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rcu_dereference_bh(dev->rx_handler_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rtnl_dereference(dev->rx_handler_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct macsec_cb {
|
|
|
|
struct aead_request *req;
|
|
|
|
union {
|
|
|
|
struct macsec_tx_sa *tx_sa;
|
|
|
|
struct macsec_rx_sa *rx_sa;
|
|
|
|
};
|
|
|
|
u8 assoc_num;
|
|
|
|
bool valid;
|
|
|
|
bool has_sci;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
|
|
|
|
|
|
|
|
if (!sa || !sa->active)
|
|
|
|
return NULL;
|
|
|
|
|
2017-10-20 07:23:43 +00:00
|
|
|
if (!refcount_inc_not_zero(&sa->refcnt))
|
2016-03-11 17:07:33 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return sa;
|
|
|
|
}
|
|
|
|
|
2022-08-08 22:38:23 +00:00
|
|
|
static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sa *sa = NULL;
|
|
|
|
int an;
|
|
|
|
|
|
|
|
for (an = 0; an < MACSEC_NUM_AN; an++) {
|
|
|
|
sa = macsec_rxsa_get(rx_sc->sa[an]);
|
|
|
|
if (sa)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return sa;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static void free_rx_sc_rcu(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
|
|
|
|
|
|
|
|
free_percpu(rx_sc->stats);
|
|
|
|
kfree(rx_sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
|
|
|
|
{
|
2017-10-20 07:23:44 +00:00
|
|
|
return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_rxsc_put(struct macsec_rx_sc *sc)
|
|
|
|
{
|
2017-10-20 07:23:44 +00:00
|
|
|
if (refcount_dec_and_test(&sc->refcnt))
|
2016-03-11 17:07:33 +00:00
|
|
|
call_rcu(&sc->rcu_head, free_rx_sc_rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_rxsa(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
|
|
|
|
|
|
|
|
crypto_free_aead(sa->key.tfm);
|
|
|
|
free_percpu(sa->stats);
|
|
|
|
kfree(sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_rxsa_put(struct macsec_rx_sa *sa)
|
|
|
|
{
|
2017-10-20 07:23:43 +00:00
|
|
|
if (refcount_dec_and_test(&sa->refcnt))
|
2016-03-11 17:07:33 +00:00
|
|
|
call_rcu(&sa->rcu, free_rxsa);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
|
|
|
|
{
|
|
|
|
struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
|
|
|
|
|
|
|
|
if (!sa || !sa->active)
|
|
|
|
return NULL;
|
|
|
|
|
2017-10-20 07:23:45 +00:00
|
|
|
if (!refcount_inc_not_zero(&sa->refcnt))
|
2016-03-11 17:07:33 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return sa;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_txsa(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
|
|
|
|
|
|
|
|
crypto_free_aead(sa->key.tfm);
|
|
|
|
free_percpu(sa->stats);
|
|
|
|
kfree(sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_txsa_put(struct macsec_tx_sa *sa)
|
|
|
|
{
|
2017-10-20 07:23:45 +00:00
|
|
|
if (refcount_dec_and_test(&sa->refcnt))
|
2016-03-11 17:07:33 +00:00
|
|
|
call_rcu(&sa->rcu, free_txsa);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
|
|
|
|
return (struct macsec_cb *)skb->cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MACSEC_PORT_SCB (0x0000)
|
|
|
|
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
|
2020-03-09 19:47:02 +00:00
|
|
|
#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2018-01-05 13:33:31 +00:00
|
|
|
#define MACSEC_GCM_AES_128_SAK_LEN 16
|
|
|
|
#define MACSEC_GCM_AES_256_SAK_LEN 32
|
|
|
|
|
|
|
|
#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
|
2020-03-09 19:47:02 +00:00
|
|
|
#define DEFAULT_XPN false
|
2016-03-11 17:07:33 +00:00
|
|
|
#define DEFAULT_SEND_SCI true
|
|
|
|
#define DEFAULT_ENCRYPT false
|
|
|
|
#define DEFAULT_ENCODING_SA 0
|
2022-07-22 09:16:29 +00:00
|
|
|
#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2021-10-22 23:21:02 +00:00
|
|
|
static sci_t make_sci(const u8 *addr, __be16 port)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
|
|
|
sci_t sci;
|
|
|
|
|
|
|
|
memcpy(&sci, addr, ETH_ALEN);
|
|
|
|
memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
|
|
|
|
|
|
|
|
return sci;
|
|
|
|
}
|
|
|
|
|
|
|
|
static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
|
|
|
|
{
|
|
|
|
sci_t sci;
|
|
|
|
|
|
|
|
if (sci_present)
|
|
|
|
memcpy(&sci, hdr->secure_channel_id,
|
|
|
|
sizeof(hdr->secure_channel_id));
|
|
|
|
else
|
|
|
|
sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
|
|
|
|
|
|
|
|
return sci;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int macsec_sectag_len(bool sci_present)
|
|
|
|
{
|
|
|
|
return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int macsec_hdr_len(bool sci_present)
|
|
|
|
{
|
|
|
|
return macsec_sectag_len(sci_present) + ETH_HLEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int macsec_extra_len(bool sci_present)
|
|
|
|
{
|
|
|
|
return macsec_sectag_len(sci_present) + sizeof(__be16);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
|
|
|
|
static void macsec_fill_sectag(struct macsec_eth_header *h,
|
2016-10-24 13:44:26 +00:00
|
|
|
const struct macsec_secy *secy, u32 pn,
|
|
|
|
bool sci_present)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
|
|
|
const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
|
|
|
|
|
2016-10-24 13:44:26 +00:00
|
|
|
memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
|
2016-03-11 17:07:33 +00:00
|
|
|
h->eth.h_proto = htons(ETH_P_MACSEC);
|
|
|
|
|
2016-10-24 13:44:26 +00:00
|
|
|
if (sci_present) {
|
2016-03-11 17:07:33 +00:00
|
|
|
h->tci_an |= MACSEC_TCI_SC;
|
|
|
|
memcpy(&h->secure_channel_id, &secy->sci,
|
|
|
|
sizeof(h->secure_channel_id));
|
|
|
|
} else {
|
|
|
|
if (tx_sc->end_station)
|
|
|
|
h->tci_an |= MACSEC_TCI_ES;
|
|
|
|
if (tx_sc->scb)
|
|
|
|
h->tci_an |= MACSEC_TCI_SCB;
|
|
|
|
}
|
|
|
|
|
|
|
|
h->packet_number = htonl(pn);
|
|
|
|
|
|
|
|
/* with GCM, C/E clear for !encrypt, both set for encrypt */
|
|
|
|
if (tx_sc->encrypt)
|
|
|
|
h->tci_an |= MACSEC_TCI_CONFID;
|
2022-09-06 05:21:15 +00:00
|
|
|
else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
|
2016-03-11 17:07:33 +00:00
|
|
|
h->tci_an |= MACSEC_TCI_C;
|
|
|
|
|
|
|
|
h->tci_an |= tx_sc->encoding_sa;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
|
|
|
|
{
|
|
|
|
if (data_len < MIN_NON_SHORT_LEN)
|
|
|
|
h->short_length = data_len;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* Checks if a MACsec interface is being offloaded to an hardware engine */
|
|
|
|
static bool macsec_is_offloaded(struct macsec_dev *macsec)
|
|
|
|
{
|
2020-03-25 12:52:33 +00:00
|
|
|
if (macsec->offload == MACSEC_OFFLOAD_MAC ||
|
|
|
|
macsec->offload == MACSEC_OFFLOAD_PHY)
|
2020-01-13 22:31:43 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Checks if underlying layers implement MACsec offloading functions. */
|
|
|
|
static bool macsec_check_offload(enum macsec_offload offload,
|
|
|
|
struct macsec_dev *macsec)
|
|
|
|
{
|
|
|
|
if (!macsec || !macsec->real_dev)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (offload == MACSEC_OFFLOAD_PHY)
|
|
|
|
return macsec->real_dev->phydev &&
|
|
|
|
macsec->real_dev->phydev->macsec_ops;
|
2020-03-25 12:52:33 +00:00
|
|
|
else if (offload == MACSEC_OFFLOAD_MAC)
|
|
|
|
return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
|
|
|
|
macsec->real_dev->macsec_ops;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
|
|
|
|
struct macsec_dev *macsec,
|
|
|
|
struct macsec_context *ctx)
|
|
|
|
{
|
|
|
|
if (ctx) {
|
|
|
|
memset(ctx, 0, sizeof(*ctx));
|
|
|
|
ctx->offload = offload;
|
|
|
|
|
|
|
|
if (offload == MACSEC_OFFLOAD_PHY)
|
|
|
|
ctx->phydev = macsec->real_dev->phydev;
|
2020-03-25 12:52:33 +00:00
|
|
|
else if (offload == MACSEC_OFFLOAD_MAC)
|
|
|
|
ctx->netdev = macsec->real_dev;
|
2020-01-13 22:31:43 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:33 +00:00
|
|
|
if (offload == MACSEC_OFFLOAD_PHY)
|
|
|
|
return macsec->real_dev->phydev->macsec_ops;
|
|
|
|
else
|
|
|
|
return macsec->real_dev->macsec_ops;
|
2020-01-13 22:31:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
|
|
|
|
* context device reference if provided.
|
|
|
|
*/
|
|
|
|
static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
|
|
|
|
struct macsec_context *ctx)
|
|
|
|
{
|
|
|
|
if (!macsec_check_offload(macsec->offload, macsec))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return __macsec_get_ops(macsec->offload, macsec, ctx);
|
|
|
|
}
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
|
|
|
|
static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
|
|
|
struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
|
|
|
|
int len = skb->len - 2 * ETH_ALEN;
|
|
|
|
int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
|
|
|
|
|
|
|
|
/* a) It comprises at least 17 octets */
|
|
|
|
if (skb->len <= 16)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* b) MACsec EtherType: already checked */
|
|
|
|
|
|
|
|
/* c) V bit is clear */
|
|
|
|
if (h->tci_an & MACSEC_TCI_VERSION)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* d) ES or SCB => !SC */
|
|
|
|
if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
|
|
|
|
(h->tci_an & MACSEC_TCI_SC))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
|
|
|
|
if (h->unused)
|
|
|
|
return false;
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
|
|
|
|
if (!h->packet_number && !xpn)
|
2016-03-11 17:07:33 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* length check, f) g) h) i) */
|
|
|
|
if (h->short_length)
|
|
|
|
return len == extra_len + h->short_length;
|
|
|
|
return len >= extra_len + MIN_NON_SHORT_LEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
|
2016-07-22 13:07:56 +00:00
|
|
|
#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
|
|
|
|
salt_t salt)
|
|
|
|
{
|
|
|
|
struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
|
|
|
|
|
|
|
|
gcm_iv->ssci = ssci ^ salt.ssci;
|
|
|
|
gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
|
|
|
|
{
|
|
|
|
struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
|
|
|
|
|
|
|
|
gcm_iv->sci = sci;
|
|
|
|
gcm_iv->pn = htonl(pn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct macsec_eth_header *)skb_mac_header(skb);
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:47 +00:00
|
|
|
static void __macsec_pn_wrapped(struct macsec_secy *secy,
|
|
|
|
struct macsec_tx_sa *tx_sa)
|
|
|
|
{
|
|
|
|
pr_debug("PN wrapped, transitioning to !oper\n");
|
|
|
|
tx_sa->active = false;
|
|
|
|
if (secy->protect_frames)
|
|
|
|
secy->operational = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
|
|
|
|
{
|
|
|
|
spin_lock_bh(&tx_sa->lock);
|
|
|
|
__macsec_pn_wrapped(secy, tx_sa);
|
|
|
|
spin_unlock_bh(&tx_sa->lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
|
|
|
|
struct macsec_secy *secy)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-03-09 19:47:01 +00:00
|
|
|
pn_t pn;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
spin_lock_bh(&tx_sa->lock);
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
pn = tx_sa->next_pn_halves;
|
|
|
|
if (secy->xpn)
|
|
|
|
tx_sa->next_pn++;
|
|
|
|
else
|
|
|
|
tx_sa->next_pn_halves.lower++;
|
|
|
|
|
2020-01-13 22:31:47 +00:00
|
|
|
if (tx_sa->next_pn == 0)
|
|
|
|
__macsec_pn_wrapped(secy, tx_sa);
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_unlock_bh(&tx_sa->lock);
|
|
|
|
|
|
|
|
return pn;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = netdev_priv(dev);
|
|
|
|
|
|
|
|
skb->dev = macsec->real_dev;
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
skb->protocol = eth_hdr(skb)->h_proto;
|
|
|
|
}
|
|
|
|
|
2022-08-08 22:38:23 +00:00
|
|
|
static unsigned int macsec_msdu_len(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(skb->dev);
|
|
|
|
struct macsec_secy *secy = &macsec->secy;
|
|
|
|
bool sci_present = macsec_skb_cb(skb)->has_sci;
|
|
|
|
|
|
|
|
return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
|
|
|
|
struct macsec_tx_sa *tx_sa)
|
|
|
|
{
|
2022-08-08 22:38:23 +00:00
|
|
|
unsigned int msdu_len = macsec_msdu_len(skb);
|
2016-03-11 17:07:33 +00:00
|
|
|
struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
|
|
|
|
|
|
|
|
u64_stats_update_begin(&txsc_stats->syncp);
|
|
|
|
if (tx_sc->encrypt) {
|
2022-08-08 22:38:23 +00:00
|
|
|
txsc_stats->stats.OutOctetsEncrypted += msdu_len;
|
2016-03-11 17:07:33 +00:00
|
|
|
txsc_stats->stats.OutPktsEncrypted++;
|
|
|
|
this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
|
|
|
|
} else {
|
2022-08-08 22:38:23 +00:00
|
|
|
txsc_stats->stats.OutOctetsProtected += msdu_len;
|
2016-03-11 17:07:33 +00:00
|
|
|
txsc_stats->stats.OutPktsProtected++;
|
|
|
|
this_cpu_inc(tx_sa->stats->OutPktsProtected);
|
|
|
|
}
|
|
|
|
u64_stats_update_end(&txsc_stats->syncp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void count_tx(struct net_device *dev, int ret, int len)
|
|
|
|
{
|
2023-08-10 08:56:41 +00:00
|
|
|
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
|
|
|
|
dev_sw_netstats_tx_add(dev, 1, len);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
2023-02-06 10:22:36 +00:00
|
|
|
static void macsec_encrypt_done(void *data, int err)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2023-02-06 10:22:36 +00:00
|
|
|
struct sk_buff *skb = data;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct net_device *dev = skb->dev;
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
|
|
|
|
int len, ret;
|
|
|
|
|
|
|
|
aead_request_free(macsec_skb_cb(skb)->req);
|
|
|
|
|
|
|
|
rcu_read_lock_bh();
|
|
|
|
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
|
2022-08-08 22:38:23 +00:00
|
|
|
/* packet is encrypted/protected so tx_bytes must be calculated */
|
|
|
|
len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
|
|
|
|
macsec_encrypt_finish(skb, dev);
|
2016-03-11 17:07:33 +00:00
|
|
|
ret = dev_queue_xmit(skb);
|
|
|
|
count_tx(dev, ret, len);
|
|
|
|
rcu_read_unlock_bh();
|
|
|
|
|
|
|
|
macsec_txsa_put(sa);
|
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
|
2016-06-14 13:25:15 +00:00
|
|
|
static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
|
|
|
|
unsigned char **iv,
|
2017-04-25 17:08:18 +00:00
|
|
|
struct scatterlist **sg,
|
|
|
|
int num_frags)
|
2016-06-14 13:25:15 +00:00
|
|
|
{
|
|
|
|
size_t size, iv_offset, sg_offset;
|
|
|
|
struct aead_request *req;
|
|
|
|
void *tmp;
|
|
|
|
|
|
|
|
size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
|
|
|
|
iv_offset = size;
|
|
|
|
size += GCM_AES_IV_LEN;
|
|
|
|
|
|
|
|
size = ALIGN(size, __alignof__(struct scatterlist));
|
|
|
|
sg_offset = size;
|
2017-04-25 17:08:18 +00:00
|
|
|
size += sizeof(struct scatterlist) * num_frags;
|
2016-06-14 13:25:15 +00:00
|
|
|
|
|
|
|
tmp = kmalloc(size, GFP_ATOMIC);
|
|
|
|
if (!tmp)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
*iv = (unsigned char *)(tmp + iv_offset);
|
|
|
|
*sg = (struct scatterlist *)(tmp + sg_offset);
|
|
|
|
req = tmp;
|
|
|
|
|
|
|
|
aead_request_set_tfm(req, tfm);
|
|
|
|
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
2016-06-14 13:25:15 +00:00
|
|
|
struct scatterlist *sg;
|
2017-04-25 17:08:18 +00:00
|
|
|
struct sk_buff *trailer;
|
2016-06-14 13:25:15 +00:00
|
|
|
unsigned char *iv;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct ethhdr *eth;
|
|
|
|
struct macsec_eth_header *hh;
|
|
|
|
size_t unprotected_len;
|
|
|
|
struct aead_request *req;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_tx_sc *tx_sc;
|
|
|
|
struct macsec_tx_sa *tx_sa;
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2016-10-24 13:44:26 +00:00
|
|
|
bool sci_present;
|
2020-03-09 19:47:01 +00:00
|
|
|
pn_t pn;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
secy = &macsec->secy;
|
|
|
|
tx_sc = &secy->tx_sc;
|
|
|
|
|
|
|
|
/* 10.5.1 TX SA assignment */
|
|
|
|
tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
|
|
|
|
if (!tx_sa) {
|
|
|
|
secy->operational = false;
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
2024-01-18 19:18:06 +00:00
|
|
|
if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
|
|
|
|
skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
|
|
|
|
struct sk_buff *nskb = skb_copy_expand(skb,
|
|
|
|
MACSEC_NEEDED_HEADROOM,
|
|
|
|
MACSEC_NEEDED_TAILROOM,
|
|
|
|
GFP_ATOMIC);
|
|
|
|
if (likely(nskb)) {
|
|
|
|
consume_skb(skb);
|
|
|
|
skb = nskb;
|
|
|
|
} else {
|
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
skb = skb_unshare(skb, GFP_ATOMIC);
|
|
|
|
if (!skb) {
|
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
unprotected_len = skb->len;
|
|
|
|
eth = eth_hdr(skb);
|
2022-09-06 05:21:15 +00:00
|
|
|
sci_present = macsec_send_sci(secy);
|
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 12:29:23 +00:00
|
|
|
hh = skb_push(skb, macsec_extra_len(sci_present));
|
2016-03-11 17:07:33 +00:00
|
|
|
memmove(hh, eth, 2 * ETH_ALEN);
|
|
|
|
|
|
|
|
pn = tx_sa_update_pn(tx_sa, secy);
|
2020-03-09 19:47:01 +00:00
|
|
|
if (pn.full64 == 0) {
|
2016-03-11 17:07:33 +00:00
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(-ENOLINK);
|
|
|
|
}
|
2020-03-09 19:47:01 +00:00
|
|
|
macsec_fill_sectag(hh, secy, pn.lower, sci_present);
|
2016-03-11 17:07:33 +00:00
|
|
|
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
|
|
|
|
|
|
|
|
skb_put(skb, secy->icv_len);
|
|
|
|
|
|
|
|
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
|
|
|
|
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
|
|
|
|
|
|
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
|
|
secy_stats->stats.OutPktsTooLong++;
|
|
|
|
u64_stats_update_end(&secy_stats->syncp);
|
|
|
|
|
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
2017-04-25 17:08:18 +00:00
|
|
|
ret = skb_cow_data(skb, 0, &trailer);
|
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!req) {
|
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
if (secy->xpn)
|
|
|
|
macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
|
|
|
|
else
|
|
|
|
macsec_fill_iv(iv, secy->sci, pn.lower);
|
2016-06-14 13:25:15 +00:00
|
|
|
|
2017-04-25 17:08:18 +00:00
|
|
|
sg_init_table(sg, ret);
|
2017-06-04 02:16:25 +00:00
|
|
|
ret = skb_to_sgvec(skb, sg, 0, skb->len);
|
|
|
|
if (unlikely(ret < 0)) {
|
2017-10-10 15:07:12 +00:00
|
|
|
aead_request_free(req);
|
2017-06-04 02:16:25 +00:00
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (tx_sc->encrypt) {
|
2016-10-24 13:44:26 +00:00
|
|
|
int len = skb->len - macsec_hdr_len(sci_present) -
|
2016-03-11 17:07:33 +00:00
|
|
|
secy->icv_len;
|
|
|
|
aead_request_set_crypt(req, sg, sg, len, iv);
|
2016-10-24 13:44:26 +00:00
|
|
|
aead_request_set_ad(req, macsec_hdr_len(sci_present));
|
2016-03-11 17:07:33 +00:00
|
|
|
} else {
|
|
|
|
aead_request_set_crypt(req, sg, sg, 0, iv);
|
|
|
|
aead_request_set_ad(req, skb->len - secy->icv_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
macsec_skb_cb(skb)->req = req;
|
|
|
|
macsec_skb_cb(skb)->tx_sa = tx_sa;
|
2022-08-08 22:38:23 +00:00
|
|
|
macsec_skb_cb(skb)->has_sci = sci_present;
|
2016-03-11 17:07:33 +00:00
|
|
|
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
|
|
|
|
|
|
|
|
dev_hold(skb->dev);
|
|
|
|
ret = crypto_aead_encrypt(req);
|
|
|
|
if (ret == -EINPROGRESS) {
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
} else if (ret != 0) {
|
|
|
|
dev_put(skb->dev);
|
|
|
|
kfree_skb(skb);
|
|
|
|
aead_request_free(req);
|
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_put(skb->dev);
|
|
|
|
aead_request_free(req);
|
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
|
|
|
|
struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
|
|
|
|
struct macsec_eth_header *hdr = macsec_ethhdr(skb);
|
|
|
|
u32 lowest_pn = 0;
|
|
|
|
|
|
|
|
spin_lock(&rx_sa->lock);
|
2020-03-09 19:47:01 +00:00
|
|
|
if (rx_sa->next_pn_halves.lower >= secy->replay_window)
|
|
|
|
lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
/* Now perform replay protection check again
|
|
|
|
* (see IEEE 802.1AE-2006 figure 10-5)
|
|
|
|
*/
|
2020-03-09 19:47:01 +00:00
|
|
|
if (secy->replay_protect && pn < lowest_pn &&
|
|
|
|
(!secy->xpn || pn_same_half(pn, lowest_pn))) {
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_unlock(&rx_sa->lock);
|
|
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
|
|
rxsc_stats->stats.InPktsLate++;
|
|
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(secy->netdev, rx_dropped);
|
2016-03-11 17:07:33 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
|
2022-08-08 22:38:23 +00:00
|
|
|
unsigned int msdu_len = macsec_msdu_len(skb);
|
2016-03-11 17:07:33 +00:00
|
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
|
|
if (hdr->tci_an & MACSEC_TCI_E)
|
2022-08-08 22:38:23 +00:00
|
|
|
rxsc_stats->stats.InOctetsDecrypted += msdu_len;
|
2016-03-11 17:07:33 +00:00
|
|
|
else
|
2022-08-08 22:38:23 +00:00
|
|
|
rxsc_stats->stats.InOctetsValidated += msdu_len;
|
2016-03-11 17:07:33 +00:00
|
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!macsec_skb_cb(skb)->valid) {
|
|
|
|
spin_unlock(&rx_sa->lock);
|
|
|
|
|
|
|
|
/* 10.6.5 */
|
|
|
|
if (hdr->tci_an & MACSEC_TCI_C ||
|
|
|
|
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
|
|
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
|
|
rxsc_stats->stats.InPktsNotValid++;
|
|
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
2022-08-08 22:38:23 +00:00
|
|
|
this_cpu_inc(rx_sa->stats->InPktsNotValid);
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(secy->netdev, rx_errors);
|
2016-03-11 17:07:33 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
|
|
if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
|
|
|
|
rxsc_stats->stats.InPktsInvalid++;
|
|
|
|
this_cpu_inc(rx_sa->stats->InPktsInvalid);
|
|
|
|
} else if (pn < lowest_pn) {
|
|
|
|
rxsc_stats->stats.InPktsDelayed++;
|
|
|
|
} else {
|
|
|
|
rxsc_stats->stats.InPktsUnchecked++;
|
|
|
|
}
|
|
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
|
|
|
} else {
|
|
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
|
|
if (pn < lowest_pn) {
|
|
|
|
rxsc_stats->stats.InPktsDelayed++;
|
|
|
|
} else {
|
|
|
|
rxsc_stats->stats.InPktsOK++;
|
|
|
|
this_cpu_inc(rx_sa->stats->InPktsOK);
|
|
|
|
}
|
|
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
// Instead of "pn >=" - to support pn overflow in xpn
|
|
|
|
if (pn + 1 > rx_sa->next_pn_halves.lower) {
|
|
|
|
rx_sa->next_pn_halves.lower = pn + 1;
|
|
|
|
} else if (secy->xpn &&
|
|
|
|
!pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
|
|
|
|
rx_sa->next_pn_halves.upper++;
|
|
|
|
rx_sa->next_pn_halves.lower = pn + 1;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_unlock(&rx_sa->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
|
|
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
if (!skb_transport_header_was_set(skb))
|
|
|
|
skb_reset_transport_header(skb);
|
|
|
|
skb_reset_mac_len(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
|
|
|
|
{
|
2019-06-30 20:46:45 +00:00
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
2016-03-11 17:07:33 +00:00
|
|
|
memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
|
|
|
|
skb_pull(skb, hdr_len);
|
|
|
|
pskb_trim_unique(skb, skb->len - icv_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void count_rx(struct net_device *dev, int len)
|
|
|
|
{
|
2023-08-10 08:56:41 +00:00
|
|
|
dev_sw_netstats_rx_add(dev, len);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
2023-02-06 10:22:36 +00:00
|
|
|
static void macsec_decrypt_done(void *data, int err)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2023-02-06 10:22:36 +00:00
|
|
|
struct sk_buff *skb = data;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct net_device *dev = skb->dev;
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
|
2016-07-29 13:37:53 +00:00
|
|
|
struct macsec_rx_sc *rx_sc = rx_sa->sc;
|
2017-05-19 22:25:44 +00:00
|
|
|
int len;
|
2016-03-11 17:07:33 +00:00
|
|
|
u32 pn;
|
|
|
|
|
|
|
|
aead_request_free(macsec_skb_cb(skb)->req);
|
|
|
|
|
2017-02-21 09:40:45 +00:00
|
|
|
if (!err)
|
|
|
|
macsec_skb_cb(skb)->valid = true;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
rcu_read_lock_bh();
|
|
|
|
pn = ntohl(macsec_ethhdr(skb)->packet_number);
|
|
|
|
if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
|
|
|
|
rcu_read_unlock_bh();
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
macsec_finalize_skb(skb, macsec->secy.icv_len,
|
|
|
|
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
|
2022-08-08 22:38:23 +00:00
|
|
|
len = skb->len;
|
2016-03-11 17:07:33 +00:00
|
|
|
macsec_reset_skb(skb, macsec->secy.netdev);
|
|
|
|
|
2017-05-19 22:25:44 +00:00
|
|
|
if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
|
2016-03-11 17:07:33 +00:00
|
|
|
count_rx(dev, len);
|
|
|
|
|
|
|
|
rcu_read_unlock_bh();
|
|
|
|
|
|
|
|
out:
|
|
|
|
macsec_rxsa_put(rx_sa);
|
2016-07-29 13:37:53 +00:00
|
|
|
macsec_rxsc_put(rx_sc);
|
2016-03-11 17:07:33 +00:00
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
struct macsec_rx_sa *rx_sa,
|
|
|
|
sci_t sci,
|
|
|
|
struct macsec_secy *secy)
|
|
|
|
{
|
|
|
|
int ret;
|
2016-06-14 13:25:15 +00:00
|
|
|
struct scatterlist *sg;
|
2017-04-25 17:08:18 +00:00
|
|
|
struct sk_buff *trailer;
|
2016-06-14 13:25:15 +00:00
|
|
|
unsigned char *iv;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct aead_request *req;
|
|
|
|
struct macsec_eth_header *hdr;
|
2020-03-09 19:47:01 +00:00
|
|
|
u32 hdr_pn;
|
2016-03-11 17:07:33 +00:00
|
|
|
u16 icv_len = secy->icv_len;
|
|
|
|
|
|
|
|
macsec_skb_cb(skb)->valid = false;
|
|
|
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
|
|
|
if (!skb)
|
2016-04-22 09:28:04 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2017-04-25 17:08:18 +00:00
|
|
|
ret = skb_cow_data(skb, 0, &trailer);
|
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!req) {
|
|
|
|
kfree_skb(skb);
|
2016-04-22 09:28:04 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
hdr = (struct macsec_eth_header *)skb->data;
|
2020-03-09 19:47:01 +00:00
|
|
|
hdr_pn = ntohl(hdr->packet_number);
|
|
|
|
|
|
|
|
if (secy->xpn) {
|
|
|
|
pn_t recovered_pn = rx_sa->next_pn_halves;
|
|
|
|
|
|
|
|
recovered_pn.lower = hdr_pn;
|
|
|
|
if (hdr_pn < rx_sa->next_pn_halves.lower &&
|
|
|
|
!pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
|
|
|
|
recovered_pn.upper++;
|
|
|
|
|
|
|
|
macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
|
|
|
|
rx_sa->key.salt);
|
|
|
|
} else {
|
|
|
|
macsec_fill_iv(iv, sci, hdr_pn);
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2017-04-25 17:08:18 +00:00
|
|
|
sg_init_table(sg, ret);
|
2017-06-04 02:16:25 +00:00
|
|
|
ret = skb_to_sgvec(skb, sg, 0, skb->len);
|
|
|
|
if (unlikely(ret < 0)) {
|
2017-10-10 15:07:12 +00:00
|
|
|
aead_request_free(req);
|
2017-06-04 02:16:25 +00:00
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (hdr->tci_an & MACSEC_TCI_E) {
|
|
|
|
/* confidentiality: ethernet + macsec header
|
|
|
|
* authenticated, encrypted payload
|
|
|
|
*/
|
|
|
|
int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
|
|
|
|
|
|
|
|
aead_request_set_crypt(req, sg, sg, len, iv);
|
|
|
|
aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
|
|
|
|
skb = skb_unshare(skb, GFP_ATOMIC);
|
|
|
|
if (!skb) {
|
|
|
|
aead_request_free(req);
|
2016-04-22 09:28:04 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* integrity only: all headers + data authenticated */
|
|
|
|
aead_request_set_crypt(req, sg, sg, icv_len, iv);
|
|
|
|
aead_request_set_ad(req, skb->len - icv_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
macsec_skb_cb(skb)->req = req;
|
|
|
|
skb->dev = dev;
|
|
|
|
aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
|
|
|
|
|
|
|
|
dev_hold(dev);
|
|
|
|
ret = crypto_aead_decrypt(req);
|
|
|
|
if (ret == -EINPROGRESS) {
|
2016-04-22 09:28:04 +00:00
|
|
|
return ERR_PTR(ret);
|
2016-03-11 17:07:33 +00:00
|
|
|
} else if (ret != 0) {
|
|
|
|
/* decryption/authentication failed
|
|
|
|
* 10.6 if validateFrames is disabled, deliver anyway
|
|
|
|
*/
|
|
|
|
if (ret != -EBADMSG) {
|
|
|
|
kfree_skb(skb);
|
2016-04-22 09:28:04 +00:00
|
|
|
skb = ERR_PTR(ret);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
macsec_skb_cb(skb)->valid = true;
|
|
|
|
}
|
|
|
|
dev_put(dev);
|
|
|
|
|
|
|
|
aead_request_free(req);
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
|
|
|
|
for_each_rxsc(secy, rx_sc) {
|
|
|
|
if (rx_sc->sci == sci)
|
|
|
|
return rx_sc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
|
|
|
|
for_each_rxsc_rtnl(secy, rx_sc) {
|
|
|
|
if (rx_sc->sci == sci)
|
|
|
|
return rx_sc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-01-13 22:31:43 +00:00
|
|
|
/* Deliver to the uncontrolled port by default */
|
|
|
|
enum rx_handler_result ret = RX_HANDLER_PASS;
|
2020-03-25 12:52:36 +00:00
|
|
|
struct ethhdr *hdr = eth_hdr(skb);
|
2022-09-06 05:21:14 +00:00
|
|
|
struct metadata_dst *md_dst;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct macsec_rxh_data *rxd;
|
|
|
|
struct macsec_dev *macsec;
|
2024-04-23 18:13:04 +00:00
|
|
|
bool is_macsec_md_dst;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
rxd = macsec_data_rcu(skb->dev);
|
2022-09-06 05:21:14 +00:00
|
|
|
md_dst = skb_metadata_dst(skb);
|
2024-04-23 18:13:04 +00:00
|
|
|
is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
|
|
|
|
struct sk_buff *nskb;
|
|
|
|
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
|
2020-03-25 12:52:36 +00:00
|
|
|
struct net_device *ndev = macsec->secy.netdev;
|
|
|
|
|
|
|
|
/* If h/w offloading is enabled, HW decodes frames and strips
|
|
|
|
* the SecTAG, so we have to deduce which port to deliver to.
|
|
|
|
*/
|
|
|
|
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
|
2024-04-23 18:13:04 +00:00
|
|
|
const struct macsec_ops *ops;
|
2023-04-19 14:21:26 +00:00
|
|
|
|
2024-04-23 18:13:04 +00:00
|
|
|
ops = macsec_get_ops(macsec, NULL);
|
2023-04-19 14:21:26 +00:00
|
|
|
|
2024-04-23 18:13:04 +00:00
|
|
|
if (ops->rx_uses_md_dst && !is_macsec_md_dst)
|
2022-09-06 05:21:14 +00:00
|
|
|
continue;
|
|
|
|
|
2024-04-23 18:13:04 +00:00
|
|
|
if (is_macsec_md_dst) {
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
|
|
|
|
/* All drivers that implement MACsec offload
|
|
|
|
* support using skb metadata destinations must
|
|
|
|
* indicate that they do so.
|
|
|
|
*/
|
|
|
|
DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
|
|
|
|
rx_sc = find_rx_sc(&macsec->secy,
|
|
|
|
md_dst->u.macsec_info.sci);
|
|
|
|
if (!rx_sc)
|
|
|
|
continue;
|
|
|
|
/* device indicated macsec offload occurred */
|
|
|
|
skb->dev = ndev;
|
|
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
eth_skb_pkt_type(skb, ndev);
|
|
|
|
ret = RX_HANDLER_ANOTHER;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This datapath is insecure because it is unable to
|
|
|
|
* enforce isolation of broadcast/multicast traffic and
|
|
|
|
* unicast traffic with promiscuous mode on the macsec
|
|
|
|
* netdev. Since the core stack has no mechanism to
|
|
|
|
* check that the hardware did indeed receive MACsec
|
|
|
|
* traffic, it is possible that the response handling
|
|
|
|
* done by the MACsec port was to a plaintext packet.
|
|
|
|
* This violates the MACsec protocol standard.
|
|
|
|
*/
|
2020-03-25 12:52:36 +00:00
|
|
|
if (ether_addr_equal_64bits(hdr->h_dest,
|
|
|
|
ndev->dev_addr)) {
|
|
|
|
/* exact match, divert skb to this port */
|
|
|
|
skb->dev = ndev;
|
|
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
ret = RX_HANDLER_ANOTHER;
|
|
|
|
goto out;
|
|
|
|
} else if (is_multicast_ether_addr_64bits(
|
|
|
|
hdr->h_dest)) {
|
|
|
|
/* multicast frame, deliver on this port too */
|
|
|
|
nskb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!nskb)
|
|
|
|
break;
|
|
|
|
|
|
|
|
nskb->dev = ndev;
|
2024-04-23 18:13:04 +00:00
|
|
|
eth_skb_pkt_type(nskb, ndev);
|
2020-03-25 12:52:36 +00:00
|
|
|
|
2022-02-11 23:38:38 +00:00
|
|
|
__netif_rx(nskb);
|
2024-04-23 18:13:04 +00:00
|
|
|
} else if (ndev->flags & IFF_PROMISC) {
|
2023-04-19 14:21:26 +00:00
|
|
|
skb->dev = ndev;
|
|
|
|
skb->pkt_type = PACKET_HOST;
|
|
|
|
ret = RX_HANDLER_ANOTHER;
|
|
|
|
goto out;
|
2020-03-25 12:52:36 +00:00
|
|
|
}
|
2023-04-19 14:21:26 +00:00
|
|
|
|
2020-03-25 12:52:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:36 +00:00
|
|
|
/* 10.6 If the management control validateFrames is not
|
|
|
|
* Strict, frames without a SecTAG are received, counted, and
|
|
|
|
* delivered to the Controlled Port
|
|
|
|
*/
|
|
|
|
if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
|
2016-03-11 17:07:33 +00:00
|
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
|
|
secy_stats->stats.InPktsNoTag++;
|
|
|
|
u64_stats_update_end(&secy_stats->syncp);
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
|
2016-03-11 17:07:33 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* deliver on this port */
|
|
|
|
nskb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!nskb)
|
|
|
|
break;
|
|
|
|
|
2020-03-25 12:52:36 +00:00
|
|
|
nskb->dev = ndev;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2022-02-11 23:38:38 +00:00
|
|
|
if (__netif_rx(nskb) == NET_RX_SUCCESS) {
|
2016-03-11 17:07:33 +00:00
|
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
|
|
secy_stats->stats.InPktsUntagged++;
|
|
|
|
u64_stats_update_end(&secy_stats->syncp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
out:
|
2016-03-11 17:07:33 +00:00
|
|
|
rcu_read_unlock();
|
2020-01-13 22:31:43 +00:00
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = *pskb;
|
|
|
|
struct net_device *dev = skb->dev;
|
|
|
|
struct macsec_eth_header *hdr;
|
|
|
|
struct macsec_secy *secy = NULL;
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
struct macsec_rx_sa *rx_sa;
|
|
|
|
struct macsec_rxh_data *rxd;
|
|
|
|
struct macsec_dev *macsec;
|
2020-10-07 08:42:46 +00:00
|
|
|
unsigned int len;
|
2016-03-11 17:07:33 +00:00
|
|
|
sci_t sci;
|
2020-03-09 19:47:01 +00:00
|
|
|
u32 hdr_pn;
|
2016-03-11 17:07:33 +00:00
|
|
|
bool cbit;
|
|
|
|
struct pcpu_rx_sc_stats *rxsc_stats;
|
|
|
|
struct pcpu_secy_stats *secy_stats;
|
|
|
|
bool pulled_sci;
|
2016-07-20 16:11:32 +00:00
|
|
|
int ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (skb_headroom(skb) < ETH_HLEN)
|
|
|
|
goto drop_direct;
|
|
|
|
|
|
|
|
hdr = macsec_ethhdr(skb);
|
2020-01-13 22:31:43 +00:00
|
|
|
if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
|
|
|
|
return handle_not_macsec(skb);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
skb = skb_unshare(skb, GFP_ATOMIC);
|
2019-06-30 20:46:42 +00:00
|
|
|
*pskb = skb;
|
|
|
|
if (!skb)
|
2016-03-11 17:07:33 +00:00
|
|
|
return RX_HANDLER_CONSUMED;
|
|
|
|
|
|
|
|
pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
|
|
|
|
if (!pulled_sci) {
|
|
|
|
if (!pskb_may_pull(skb, macsec_extra_len(false)))
|
|
|
|
goto drop_direct;
|
|
|
|
}
|
|
|
|
|
|
|
|
hdr = macsec_ethhdr(skb);
|
|
|
|
|
|
|
|
/* Frames with a SecTAG that has the TCI E bit set but the C
|
|
|
|
* bit clear are discarded, as this reserved encoding is used
|
|
|
|
* to identify frames with a SecTAG that are not to be
|
|
|
|
* delivered to the Controlled Port.
|
|
|
|
*/
|
|
|
|
if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
|
|
|
|
return RX_HANDLER_PASS;
|
|
|
|
|
|
|
|
/* now, pull the extra length */
|
|
|
|
if (hdr->tci_an & MACSEC_TCI_SC) {
|
|
|
|
if (!pulled_sci)
|
|
|
|
goto drop_direct;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ethernet header is part of crypto processing */
|
|
|
|
skb_push(skb, ETH_HLEN);
|
|
|
|
|
|
|
|
macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
|
|
|
|
macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
|
|
|
|
sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
rxd = macsec_data_rcu(skb->dev);
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
|
|
|
|
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
|
2018-09-20 19:33:08 +00:00
|
|
|
|
2016-07-29 13:37:53 +00:00
|
|
|
sc = sc ? macsec_rxsc_get(sc) : NULL;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (sc) {
|
|
|
|
secy = &macsec->secy;
|
|
|
|
rx_sc = sc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!secy)
|
|
|
|
goto nosci;
|
|
|
|
|
|
|
|
dev = secy->netdev;
|
|
|
|
macsec = macsec_priv(dev);
|
|
|
|
secy_stats = this_cpu_ptr(macsec->stats);
|
|
|
|
rxsc_stats = this_cpu_ptr(rx_sc->stats);
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
|
|
secy_stats->stats.InPktsBadTag++;
|
|
|
|
u64_stats_update_end(&secy_stats->syncp);
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(secy->netdev, rx_errors);
|
2016-03-11 17:07:33 +00:00
|
|
|
goto drop_nosa;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
|
|
|
|
if (!rx_sa) {
|
|
|
|
/* 10.6.1 if the SA is not in use */
|
|
|
|
|
|
|
|
/* If validateFrames is Strict or the C bit in the
|
|
|
|
* SecTAG is set, discard
|
|
|
|
*/
|
2022-08-08 22:38:23 +00:00
|
|
|
struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (hdr->tci_an & MACSEC_TCI_C ||
|
|
|
|
secy->validate_frames == MACSEC_VALIDATE_STRICT) {
|
|
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
|
|
rxsc_stats->stats.InPktsNotUsingSA++;
|
|
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(secy->netdev, rx_errors);
|
2022-08-08 22:38:23 +00:00
|
|
|
if (active_rx_sa)
|
|
|
|
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
|
2016-03-11 17:07:33 +00:00
|
|
|
goto drop_nosa;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* not Strict, the frame (with the SecTAG and ICV
|
|
|
|
* removed) is delivered to the Controlled Port.
|
|
|
|
*/
|
|
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
|
|
rxsc_stats->stats.InPktsUnusedSA++;
|
|
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
2022-08-08 22:38:23 +00:00
|
|
|
if (active_rx_sa)
|
|
|
|
this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
|
2016-03-11 17:07:33 +00:00
|
|
|
goto deliver;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First, PN check to avoid decrypting obviously wrong packets */
|
2020-03-09 19:47:01 +00:00
|
|
|
hdr_pn = ntohl(hdr->packet_number);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (secy->replay_protect) {
|
|
|
|
bool late;
|
|
|
|
|
|
|
|
spin_lock(&rx_sa->lock);
|
2020-03-09 19:47:01 +00:00
|
|
|
late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
|
|
|
|
hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
|
|
|
|
|
|
|
|
if (secy->xpn)
|
|
|
|
late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_unlock(&rx_sa->lock);
|
|
|
|
|
|
|
|
if (late) {
|
|
|
|
u64_stats_update_begin(&rxsc_stats->syncp);
|
|
|
|
rxsc_stats->stats.InPktsLate++;
|
|
|
|
u64_stats_update_end(&rxsc_stats->syncp);
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
|
2016-03-11 17:07:33 +00:00
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-26 10:24:53 +00:00
|
|
|
macsec_skb_cb(skb)->rx_sa = rx_sa;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
/* Disabled && !changed text => skip validation */
|
|
|
|
if (hdr->tci_an & MACSEC_TCI_C ||
|
|
|
|
secy->validate_frames != MACSEC_VALIDATE_DISABLED)
|
|
|
|
skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
|
|
|
|
|
2016-04-22 09:28:04 +00:00
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
/* the decrypt callback needs the reference */
|
2016-07-29 13:37:53 +00:00
|
|
|
if (PTR_ERR(skb) != -EINPROGRESS) {
|
2016-04-22 09:28:04 +00:00
|
|
|
macsec_rxsa_put(rx_sa);
|
2016-07-29 13:37:53 +00:00
|
|
|
macsec_rxsc_put(rx_sc);
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
*pskb = NULL;
|
|
|
|
return RX_HANDLER_CONSUMED;
|
|
|
|
}
|
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
if (!macsec_post_decrypt(skb, secy, hdr_pn))
|
2016-03-11 17:07:33 +00:00
|
|
|
goto drop;
|
|
|
|
|
|
|
|
deliver:
|
|
|
|
macsec_finalize_skb(skb, secy->icv_len,
|
|
|
|
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
|
2022-08-08 22:38:23 +00:00
|
|
|
len = skb->len;
|
2016-03-11 17:07:33 +00:00
|
|
|
macsec_reset_skb(skb, secy->netdev);
|
|
|
|
|
2016-04-22 09:28:03 +00:00
|
|
|
if (rx_sa)
|
|
|
|
macsec_rxsa_put(rx_sa);
|
2016-07-29 13:37:53 +00:00
|
|
|
macsec_rxsc_put(rx_sc);
|
2016-07-20 16:11:32 +00:00
|
|
|
|
2019-09-23 09:02:46 +00:00
|
|
|
skb_orphan(skb);
|
2016-07-20 16:11:32 +00:00
|
|
|
ret = gro_cells_receive(&macsec->gro_cells, skb);
|
|
|
|
if (ret == NET_RX_SUCCESS)
|
2020-10-07 08:42:46 +00:00
|
|
|
count_rx(dev, len);
|
2016-07-20 16:11:32 +00:00
|
|
|
else
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2016-07-20 16:11:32 +00:00
|
|
|
*pskb = NULL;
|
|
|
|
return RX_HANDLER_CONSUMED;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
drop:
|
|
|
|
macsec_rxsa_put(rx_sa);
|
|
|
|
drop_nosa:
|
2016-07-29 13:37:53 +00:00
|
|
|
macsec_rxsc_put(rx_sc);
|
2016-03-11 17:07:33 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
drop_direct:
|
|
|
|
kfree_skb(skb);
|
|
|
|
*pskb = NULL;
|
|
|
|
return RX_HANDLER_CONSUMED;
|
|
|
|
|
|
|
|
nosci:
|
|
|
|
/* 10.6.1 if the SC is not found */
|
|
|
|
cbit = !!(hdr->tci_an & MACSEC_TCI_C);
|
|
|
|
if (!cbit)
|
2022-09-06 05:21:15 +00:00
|
|
|
macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN,
|
2016-03-11 17:07:33 +00:00
|
|
|
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
|
|
|
|
|
|
|
|
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
|
|
|
|
struct sk_buff *nskb;
|
|
|
|
|
|
|
|
secy_stats = this_cpu_ptr(macsec->stats);
|
|
|
|
|
|
|
|
/* If validateFrames is Strict or the C bit in the
|
|
|
|
* SecTAG is set, discard
|
|
|
|
*/
|
|
|
|
if (cbit ||
|
|
|
|
macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
|
|
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
|
|
secy_stats->stats.InPktsNoSCI++;
|
|
|
|
u64_stats_update_end(&secy_stats->syncp);
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(macsec->secy.netdev, rx_errors);
|
2016-03-11 17:07:33 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* not strict, the frame (with the SecTAG and ICV
|
|
|
|
* removed) is delivered to the Controlled Port.
|
|
|
|
*/
|
|
|
|
nskb = skb_clone(skb, GFP_ATOMIC);
|
|
|
|
if (!nskb)
|
|
|
|
break;
|
|
|
|
|
|
|
|
macsec_reset_skb(nskb, macsec->secy.netdev);
|
|
|
|
|
2022-02-11 23:38:38 +00:00
|
|
|
ret = __netif_rx(nskb);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (ret == NET_RX_SUCCESS) {
|
|
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
|
|
secy_stats->stats.InPktsUnknownSCI++;
|
|
|
|
u64_stats_update_end(&secy_stats->syncp);
|
|
|
|
} else {
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
*pskb = skb;
|
|
|
|
return RX_HANDLER_PASS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
|
|
|
|
{
|
|
|
|
struct crypto_aead *tfm;
|
|
|
|
int ret;
|
|
|
|
|
2023-09-04 08:56:04 +00:00
|
|
|
tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
|
2016-07-22 13:07:57 +00:00
|
|
|
|
|
|
|
if (IS_ERR(tfm))
|
|
|
|
return tfm;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
ret = crypto_aead_setkey(tfm, key, key_len);
|
2016-07-22 13:07:57 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
ret = crypto_aead_setauthsize(tfm, icv_len);
|
2016-07-22 13:07:57 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto fail;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
return tfm;
|
2016-07-22 13:07:57 +00:00
|
|
|
fail:
|
|
|
|
crypto_free_aead(tfm);
|
|
|
|
return ERR_PTR(ret);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
|
|
|
|
int icv_len)
|
|
|
|
{
|
|
|
|
rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
|
|
|
|
if (!rx_sa->stats)
|
2016-07-22 13:07:57 +00:00
|
|
|
return -ENOMEM;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
|
2016-07-22 13:07:57 +00:00
|
|
|
if (IS_ERR(rx_sa->key.tfm)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
free_percpu(rx_sa->stats);
|
2016-07-22 13:07:57 +00:00
|
|
|
return PTR_ERR(rx_sa->key.tfm);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 19:47:02 +00:00
|
|
|
rx_sa->ssci = MACSEC_UNDEF_SSCI;
|
2016-03-11 17:07:33 +00:00
|
|
|
rx_sa->active = false;
|
|
|
|
rx_sa->next_pn = 1;
|
2017-10-20 07:23:43 +00:00
|
|
|
refcount_set(&rx_sa->refcnt, 1);
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_lock_init(&rx_sa->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
|
|
|
|
{
|
|
|
|
rx_sa->active = false;
|
|
|
|
|
|
|
|
macsec_rxsa_put(rx_sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_rx_sc(struct macsec_rx_sc *rx_sc)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MACSEC_NUM_AN; i++) {
|
|
|
|
struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
|
|
|
|
|
|
|
|
RCU_INIT_POINTER(rx_sc->sa[i], NULL);
|
|
|
|
if (sa)
|
|
|
|
clear_rx_sa(sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
macsec_rxsc_put(rx_sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
|
|
|
|
|
|
|
|
for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
|
|
|
|
rx_sc;
|
|
|
|
rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
|
|
|
|
if (rx_sc->sci == sci) {
|
|
|
|
if (rx_sc->active)
|
|
|
|
secy->n_rx_sc--;
|
|
|
|
rcu_assign_pointer(*rx_scp, rx_sc->next);
|
|
|
|
return rx_sc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-11-02 21:33:14 +00:00
|
|
|
static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
|
|
|
|
bool active)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
struct macsec_dev *macsec;
|
|
|
|
struct net_device *real_dev = macsec_priv(dev)->real_dev;
|
|
|
|
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
|
|
|
|
list_for_each_entry(macsec, &rxd->secys, secys) {
|
|
|
|
if (find_rx_sc_rtnl(&macsec->secy, sci))
|
|
|
|
return ERR_PTR(-EEXIST);
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
|
|
|
|
if (!rx_sc)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
|
|
|
|
if (!rx_sc->stats) {
|
|
|
|
kfree(rx_sc);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_sc->sci = sci;
|
2022-11-02 21:33:14 +00:00
|
|
|
rx_sc->active = active;
|
2017-10-20 07:23:44 +00:00
|
|
|
refcount_set(&rx_sc->refcnt, 1);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
secy = &macsec_priv(dev)->secy;
|
|
|
|
rcu_assign_pointer(rx_sc->next, secy->rx_sc);
|
|
|
|
rcu_assign_pointer(secy->rx_sc, rx_sc);
|
|
|
|
|
|
|
|
if (rx_sc->active)
|
|
|
|
secy->n_rx_sc++;
|
|
|
|
|
|
|
|
return rx_sc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
|
|
|
|
int icv_len)
|
|
|
|
{
|
|
|
|
tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
|
|
|
|
if (!tx_sa->stats)
|
2016-07-22 13:07:57 +00:00
|
|
|
return -ENOMEM;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
|
2016-07-22 13:07:57 +00:00
|
|
|
if (IS_ERR(tx_sa->key.tfm)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
free_percpu(tx_sa->stats);
|
2016-07-22 13:07:57 +00:00
|
|
|
return PTR_ERR(tx_sa->key.tfm);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 19:47:02 +00:00
|
|
|
tx_sa->ssci = MACSEC_UNDEF_SSCI;
|
2016-03-11 17:07:33 +00:00
|
|
|
tx_sa->active = false;
|
2017-10-20 07:23:45 +00:00
|
|
|
refcount_set(&tx_sa->refcnt, 1);
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_lock_init(&tx_sa->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
|
|
|
|
{
|
|
|
|
tx_sa->active = false;
|
|
|
|
|
|
|
|
macsec_txsa_put(tx_sa);
|
|
|
|
}
|
|
|
|
|
2016-10-24 12:40:03 +00:00
|
|
|
static struct genl_family macsec_fam;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
static struct net_device *get_dev_from_nl(struct net *net,
|
|
|
|
struct nlattr **attrs)
|
|
|
|
{
|
|
|
|
int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
|
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
dev = __dev_get_by_index(net, ifindex);
|
|
|
|
if (!dev)
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
if (!netif_is_macsec(dev))
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
2020-03-25 13:01:34 +00:00
|
|
|
static enum macsec_offload nla_get_offload(const struct nlattr *nla)
|
|
|
|
{
|
|
|
|
return (__force enum macsec_offload)nla_get_u8(nla);
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static sci_t nla_get_sci(const struct nlattr *nla)
|
|
|
|
{
|
|
|
|
return (__force sci_t)nla_get_u64(nla);
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:06:11 +00:00
|
|
|
static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
|
|
|
|
int padattr)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2016-04-26 08:06:11 +00:00
|
|
|
return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 19:47:02 +00:00
|
|
|
static ssci_t nla_get_ssci(const struct nlattr *nla)
|
|
|
|
{
|
|
|
|
return (__force ssci_t)nla_get_u32(nla);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
|
|
|
|
{
|
|
|
|
return nla_put_u32(skb, attrtype, (__force u64)value);
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
|
|
|
|
struct nlattr **attrs,
|
|
|
|
struct nlattr **tb_sa,
|
|
|
|
struct net_device **devp,
|
|
|
|
struct macsec_secy **secyp,
|
|
|
|
struct macsec_tx_sc **scp,
|
|
|
|
u8 *assoc_num)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_tx_sc *tx_sc;
|
|
|
|
struct macsec_tx_sa *tx_sa;
|
|
|
|
|
|
|
|
if (!tb_sa[MACSEC_SA_ATTR_AN])
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
|
|
|
|
|
|
|
|
dev = get_dev_from_nl(net, attrs);
|
|
|
|
if (IS_ERR(dev))
|
|
|
|
return ERR_CAST(dev);
|
|
|
|
|
|
|
|
if (*assoc_num >= MACSEC_NUM_AN)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
secy = &macsec_priv(dev)->secy;
|
|
|
|
tx_sc = &secy->tx_sc;
|
|
|
|
|
|
|
|
tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
|
|
|
|
if (!tx_sa)
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
*devp = dev;
|
|
|
|
*scp = tx_sc;
|
|
|
|
*secyp = secy;
|
|
|
|
return tx_sa;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
|
|
|
|
struct nlattr **attrs,
|
|
|
|
struct nlattr **tb_rxsc,
|
|
|
|
struct net_device **devp,
|
|
|
|
struct macsec_secy **secyp)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
sci_t sci;
|
|
|
|
|
|
|
|
dev = get_dev_from_nl(net, attrs);
|
|
|
|
if (IS_ERR(dev))
|
|
|
|
return ERR_CAST(dev);
|
|
|
|
|
|
|
|
secy = &macsec_priv(dev)->secy;
|
|
|
|
|
|
|
|
if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
|
|
|
|
rx_sc = find_rx_sc_rtnl(secy, sci);
|
|
|
|
if (!rx_sc)
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
*secyp = secy;
|
|
|
|
*devp = dev;
|
|
|
|
|
|
|
|
return rx_sc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
|
|
|
|
struct nlattr **attrs,
|
|
|
|
struct nlattr **tb_rxsc,
|
|
|
|
struct nlattr **tb_sa,
|
|
|
|
struct net_device **devp,
|
|
|
|
struct macsec_secy **secyp,
|
|
|
|
struct macsec_rx_sc **scp,
|
|
|
|
u8 *assoc_num)
|
|
|
|
{
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
struct macsec_rx_sa *rx_sa;
|
|
|
|
|
|
|
|
if (!tb_sa[MACSEC_SA_ATTR_AN])
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
|
|
|
|
if (*assoc_num >= MACSEC_NUM_AN)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
|
|
|
|
if (IS_ERR(rx_sc))
|
|
|
|
return ERR_CAST(rx_sc);
|
|
|
|
|
|
|
|
rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
|
|
|
|
if (!rx_sa)
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
|
|
|
*scp = rx_sc;
|
|
|
|
return rx_sa;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
|
|
|
|
[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
|
|
|
|
[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
|
|
|
|
[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
|
2020-01-13 22:31:44 +00:00
|
|
|
[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
|
2016-03-11 17:07:33 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
|
|
|
|
[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
|
|
|
|
[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
|
|
|
|
[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
|
|
|
|
[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
|
2020-08-18 08:17:32 +00:00
|
|
|
[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
|
2016-05-07 18:19:29 +00:00
|
|
|
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
|
|
|
|
.len = MACSEC_KEYID_LEN, },
|
2016-03-11 17:07:33 +00:00
|
|
|
[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
|
2018-01-18 16:48:18 +00:00
|
|
|
.len = MACSEC_MAX_KEY_LEN, },
|
2020-03-09 19:47:02 +00:00
|
|
|
[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
|
|
|
|
[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
|
|
|
|
.len = MACSEC_SALT_LEN, },
|
2016-03-11 17:07:33 +00:00
|
|
|
};
|
|
|
|
|
2020-01-13 22:31:44 +00:00
|
|
|
static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
|
|
|
|
[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
|
|
|
|
};
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* Offloads an operation to a device driver */
|
|
|
|
static int macsec_offload(int (* const func)(struct macsec_context *),
|
|
|
|
struct macsec_context *ctx)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (unlikely(!func))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (ctx->offload == MACSEC_OFFLOAD_PHY)
|
|
|
|
mutex_lock(&ctx->phydev->lock);
|
|
|
|
|
|
|
|
ret = (*func)(ctx);
|
|
|
|
|
|
|
|
if (ctx->offload == MACSEC_OFFLOAD_PHY)
|
|
|
|
mutex_unlock(&ctx->phydev->lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
|
|
|
|
{
|
|
|
|
if (!attrs[MACSEC_ATTR_SA_CONFIG])
|
|
|
|
return -EINVAL;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
|
2016-03-11 17:07:33 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
|
|
|
|
{
|
|
|
|
if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
|
|
|
|
return -EINVAL;
|
|
|
|
|
netlink: make validation more configurable for future strictness
We currently have two levels of strict validation:
1) liberal (default)
- undefined (type >= max) & NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
- garbage at end of message accepted
2) strict (opt-in)
- NLA_UNSPEC attributes accepted
- attribute length >= expected accepted
Split out parsing strictness into four different options:
* TRAILING - check that there's no trailing data after parsing
attributes (in message or nested)
* MAXTYPE - reject attrs > max known type
* UNSPEC - reject attributes with NLA_UNSPEC policy entries
* STRICT_ATTRS - strictly validate attribute size
The default for future things should be *everything*.
The current *_strict() is a combination of TRAILING and MAXTYPE,
and is renamed to _deprecated_strict().
The current regular parsing has none of this, and is renamed to
*_parse_deprecated().
Additionally it allows us to selectively set one of the new flags
even on old policies. Notably, the UNSPEC flag could be useful in
this case, since it can be arranged (by filling in the policy) to
not be an incompatible userspace ABI change, but would then going
forward prevent forgetting attribute entries. Similar can apply
to the POLICY flag.
We end up with the following renames:
* nla_parse -> nla_parse_deprecated
* nla_parse_strict -> nla_parse_deprecated_strict
* nlmsg_parse -> nlmsg_parse_deprecated
* nlmsg_parse_strict -> nlmsg_parse_deprecated_strict
* nla_parse_nested -> nla_parse_nested_deprecated
* nla_validate_nested -> nla_validate_nested_deprecated
Using spatch, of course:
@@
expression TB, MAX, HEAD, LEN, POL, EXT;
@@
-nla_parse(TB, MAX, HEAD, LEN, POL, EXT)
+nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression NLH, HDRLEN, TB, MAX, POL, EXT;
@@
-nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
+nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT)
@@
expression TB, MAX, NLA, POL, EXT;
@@
-nla_parse_nested(TB, MAX, NLA, POL, EXT)
+nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT)
@@
expression START, MAX, POL, EXT;
@@
-nla_validate_nested(START, MAX, POL, EXT)
+nla_validate_nested_deprecated(START, MAX, POL, EXT)
@@
expression NLH, HDRLEN, MAX, POL, EXT;
@@
-nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT)
+nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT)
For this patch, don't actually add the strict, non-renamed versions
yet so that it breaks compile if I get it wrong.
Also, while at it, make nla_validate and nla_parse go down to a
common __nla_validate_parse() function to avoid code duplication.
Ultimately, this allows us to have very strict validation for every
new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the
next patch, while existing things will continue to work as is.
In effect then, this adds fully strict validation for any new command.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 12:07:28 +00:00
|
|
|
if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
|
2016-03-11 17:07:33 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool validate_add_rxsa(struct nlattr **attrs)
|
|
|
|
{
|
|
|
|
if (!attrs[MACSEC_SA_ATTR_AN] ||
|
|
|
|
!attrs[MACSEC_SA_ATTR_KEY] ||
|
|
|
|
!attrs[MACSEC_SA_ATTR_KEYID])
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
|
|
|
|
return false;
|
|
|
|
|
2020-03-09 19:47:02 +00:00
|
|
|
if (attrs[MACSEC_SA_ATTR_PN] &&
|
2022-07-22 09:16:30 +00:00
|
|
|
nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
2016-03-11 17:07:33 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
|
|
|
|
if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-05-07 18:19:29 +00:00
|
|
|
if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
|
|
|
|
return false;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
struct macsec_rx_sa *rx_sa;
|
|
|
|
unsigned char assoc_num;
|
2020-03-09 19:47:02 +00:00
|
|
|
int pn_len;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
|
|
|
|
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
|
2016-07-22 13:07:57 +00:00
|
|
|
int err;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_sa_config(attrs, tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_rxsc_config(attrs, tb_rxsc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!validate_add_rxsa(tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
|
2016-07-29 13:37:54 +00:00
|
|
|
if (IS_ERR(rx_sc)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(rx_sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
|
|
|
|
|
|
|
|
if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
|
|
|
|
pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-03-09 19:47:02 +00:00
|
|
|
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
|
2022-07-22 09:16:27 +00:00
|
|
|
if (tb_sa[MACSEC_SA_ATTR_PN] &&
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
|
2020-03-09 19:47:02 +00:00
|
|
|
pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (secy->xpn) {
|
|
|
|
if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
|
|
|
|
pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
|
2022-07-22 09:16:28 +00:00
|
|
|
MACSEC_SALT_LEN);
|
2020-03-09 19:47:02 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
|
|
|
|
if (rx_sa) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
|
2016-07-22 13:07:57 +00:00
|
|
|
if (!rx_sa) {
|
2016-03-11 17:07:33 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-07-22 13:07:57 +00:00
|
|
|
err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
|
|
|
|
secy->key_len, secy->icv_len);
|
|
|
|
if (err < 0) {
|
|
|
|
kfree(rx_sa);
|
|
|
|
rtnl_unlock();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (tb_sa[MACSEC_SA_ATTR_PN]) {
|
|
|
|
spin_lock_bh(&rx_sa->lock);
|
2020-03-09 19:47:02 +00:00
|
|
|
rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_unlock_bh(&rx_sa->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
|
|
|
|
rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
|
|
|
|
|
|
|
|
rx_sa->sc = rx_sc;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
2022-09-21 18:10:45 +00:00
|
|
|
if (secy->xpn) {
|
|
|
|
rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
|
|
|
|
nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
|
|
|
|
MACSEC_SALT_LEN);
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.sa.assoc_num = assoc_num;
|
|
|
|
ctx.sa.rx_sa = rx_sa;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
|
2021-06-24 09:38:28 +00:00
|
|
|
secy->key_len);
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
err = macsec_offload(ops->mdo_add_rxsa, &ctx);
|
2022-11-02 21:33:16 +00:00
|
|
|
memzero_explicit(ctx.sa.key, secy->key_len);
|
2020-01-13 22:31:43 +00:00
|
|
|
if (err)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
|
2016-03-11 17:07:33 +00:00
|
|
|
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
2022-07-22 09:29:02 +00:00
|
|
|
macsec_rxsa_put(rx_sa);
|
2020-01-13 22:31:43 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return err;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool validate_add_rxsc(struct nlattr **attrs)
|
|
|
|
{
|
|
|
|
if (!attrs[MACSEC_RXSC_ATTR_SCI])
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
|
|
|
|
if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
sci_t sci = MACSEC_UNDEF_SCI;
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
|
2020-03-25 12:52:34 +00:00
|
|
|
struct macsec_secy *secy;
|
2022-11-02 21:33:14 +00:00
|
|
|
bool active = true;
|
2020-01-13 22:31:43 +00:00
|
|
|
int ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_rxsc_config(attrs, tb_rxsc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!validate_add_rxsc(tb_rxsc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
dev = get_dev_from_nl(genl_info_net(info), attrs);
|
|
|
|
if (IS_ERR(dev)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(dev);
|
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:34 +00:00
|
|
|
secy = &macsec_priv(dev)->secy;
|
2016-03-11 17:07:33 +00:00
|
|
|
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
|
|
|
|
|
2022-11-02 21:33:14 +00:00
|
|
|
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
|
|
|
|
active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
|
|
|
|
|
|
|
|
rx_sc = create_rx_sc(dev, sci, active);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (IS_ERR(rx_sc)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(rx_sc);
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.rx_sc = rx_sc;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
2022-11-02 21:33:13 +00:00
|
|
|
del_rx_sc(secy, sci);
|
|
|
|
free_rx_sc(rx_sc);
|
2020-01-13 22:31:43 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool validate_add_txsa(struct nlattr **attrs)
|
|
|
|
{
|
|
|
|
if (!attrs[MACSEC_SA_ATTR_AN] ||
|
|
|
|
!attrs[MACSEC_SA_ATTR_PN] ||
|
|
|
|
!attrs[MACSEC_SA_ATTR_KEY] ||
|
|
|
|
!attrs[MACSEC_SA_ATTR_KEYID])
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
|
|
|
|
return false;
|
|
|
|
|
2022-07-22 09:16:30 +00:00
|
|
|
if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
2016-03-11 17:07:33 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
|
|
|
|
if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-05-07 18:19:29 +00:00
|
|
|
if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
|
|
|
|
return false;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_tx_sc *tx_sc;
|
|
|
|
struct macsec_tx_sa *tx_sa;
|
|
|
|
unsigned char assoc_num;
|
2020-03-09 19:47:02 +00:00
|
|
|
int pn_len;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
|
2020-01-13 22:31:43 +00:00
|
|
|
bool was_operational;
|
2016-07-22 13:07:57 +00:00
|
|
|
int err;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_sa_config(attrs, tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!validate_add_txsa(tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
dev = get_dev_from_nl(genl_info_net(info), attrs);
|
|
|
|
if (IS_ERR(dev)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
secy = &macsec_priv(dev)->secy;
|
|
|
|
tx_sc = &secy->tx_sc;
|
|
|
|
|
|
|
|
assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
|
|
|
|
|
|
|
|
if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
|
|
|
|
pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-03-09 19:47:02 +00:00
|
|
|
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
|
|
|
|
if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
|
|
|
|
pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (secy->xpn) {
|
|
|
|
if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
|
|
|
|
pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
|
2022-07-22 09:16:28 +00:00
|
|
|
MACSEC_SALT_LEN);
|
2020-03-09 19:47:02 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
|
|
|
|
if (tx_sa) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
|
2016-07-22 13:07:57 +00:00
|
|
|
if (!tx_sa) {
|
2016-03-11 17:07:33 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-07-22 13:07:57 +00:00
|
|
|
err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
|
|
|
|
secy->key_len, secy->icv_len);
|
|
|
|
if (err < 0) {
|
|
|
|
kfree(tx_sa);
|
|
|
|
rtnl_unlock();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_lock_bh(&tx_sa->lock);
|
2020-03-09 19:47:02 +00:00
|
|
|
tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_unlock_bh(&tx_sa->lock);
|
|
|
|
|
|
|
|
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
|
|
|
|
tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
was_operational = secy->operational;
|
2016-03-11 17:07:33 +00:00
|
|
|
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
|
|
|
|
secy->operational = true;
|
|
|
|
|
2022-09-21 18:10:45 +00:00
|
|
|
if (secy->xpn) {
|
|
|
|
tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
|
|
|
|
nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
|
|
|
|
MACSEC_SALT_LEN);
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.sa.assoc_num = assoc_num;
|
|
|
|
ctx.sa.tx_sa = tx_sa;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
|
2021-06-24 09:38:28 +00:00
|
|
|
secy->key_len);
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
err = macsec_offload(ops->mdo_add_txsa, &ctx);
|
2022-11-02 21:33:16 +00:00
|
|
|
memzero_explicit(ctx.sa.key, secy->key_len);
|
2020-01-13 22:31:43 +00:00
|
|
|
if (err)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
|
2016-03-11 17:07:33 +00:00
|
|
|
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
secy->operational = was_operational;
|
2022-07-22 09:29:02 +00:00
|
|
|
macsec_txsa_put(tx_sa);
|
2020-01-13 22:31:43 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return err;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
struct macsec_rx_sa *rx_sa;
|
|
|
|
u8 assoc_num;
|
|
|
|
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
|
|
|
|
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
|
2020-01-13 22:31:43 +00:00
|
|
|
int ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_sa_config(attrs, tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_rxsc_config(attrs, tb_rxsc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
|
|
|
|
&dev, &secy, &rx_sc, &assoc_num);
|
|
|
|
if (IS_ERR(rx_sa)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(rx_sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rx_sa->active) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.sa.assoc_num = assoc_num;
|
|
|
|
ctx.sa.rx_sa = rx_sa;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
|
|
|
|
clear_rx_sa(rx_sa);
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
rtnl_unlock();
|
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
sci_t sci;
|
|
|
|
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
|
2020-01-13 22:31:43 +00:00
|
|
|
int ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_rxsc_config(attrs, tb_rxsc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
dev = get_dev_from_nl(genl_info_net(info), info->attrs);
|
|
|
|
if (IS_ERR(dev)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
secy = &macsec_priv(dev)->secy;
|
|
|
|
sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
|
|
|
|
|
|
|
|
rx_sc = del_rx_sc(secy, sci);
|
|
|
|
if (!rx_sc) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.rx_sc = rx_sc;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
free_rx_sc(rx_sc);
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
rtnl_unlock();
|
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_tx_sc *tx_sc;
|
|
|
|
struct macsec_tx_sa *tx_sa;
|
|
|
|
u8 assoc_num;
|
|
|
|
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
|
2020-01-13 22:31:43 +00:00
|
|
|
int ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_sa_config(attrs, tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
|
|
|
|
&dev, &secy, &tx_sc, &assoc_num);
|
|
|
|
if (IS_ERR(tx_sa)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(tx_sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx_sa->active) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.sa.assoc_num = assoc_num;
|
|
|
|
ctx.sa.tx_sa = tx_sa;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
ret = macsec_offload(ops->mdo_del_txsa, &ctx);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
|
|
|
|
clear_tx_sa(tx_sa);
|
|
|
|
|
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
rtnl_unlock();
|
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool validate_upd_sa(struct nlattr **attrs)
|
|
|
|
{
|
|
|
|
if (!attrs[MACSEC_SA_ATTR_AN] ||
|
|
|
|
attrs[MACSEC_SA_ATTR_KEY] ||
|
2020-03-09 19:47:02 +00:00
|
|
|
attrs[MACSEC_SA_ATTR_KEYID] ||
|
|
|
|
attrs[MACSEC_SA_ATTR_SSCI] ||
|
|
|
|
attrs[MACSEC_SA_ATTR_SALT])
|
2016-03-11 17:07:33 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
|
|
|
|
return false;
|
|
|
|
|
2022-07-22 09:16:30 +00:00
|
|
|
if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
|
2016-03-11 17:07:33 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
|
|
|
|
if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_tx_sc *tx_sc;
|
|
|
|
struct macsec_tx_sa *tx_sa;
|
|
|
|
u8 assoc_num;
|
|
|
|
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
|
2020-01-13 22:31:43 +00:00
|
|
|
bool was_operational, was_active;
|
2020-03-09 19:47:01 +00:00
|
|
|
pn_t prev_pn;
|
2020-01-13 22:31:43 +00:00
|
|
|
int ret = 0;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
prev_pn.full64 = 0;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_sa_config(attrs, tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!validate_upd_sa(tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
|
|
|
|
&dev, &secy, &tx_sc, &assoc_num);
|
|
|
|
if (IS_ERR(tx_sa)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(tx_sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb_sa[MACSEC_SA_ATTR_PN]) {
|
2020-03-09 19:47:02 +00:00
|
|
|
int pn_len;
|
|
|
|
|
|
|
|
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
|
|
|
|
if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
|
|
|
|
pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_lock_bh(&tx_sa->lock);
|
2020-03-09 19:47:01 +00:00
|
|
|
prev_pn = tx_sa->next_pn_halves;
|
2020-03-09 19:47:02 +00:00
|
|
|
tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_unlock_bh(&tx_sa->lock);
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
was_active = tx_sa->active;
|
2016-03-11 17:07:33 +00:00
|
|
|
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
|
|
|
|
tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
was_operational = secy->operational;
|
2016-03-11 17:07:33 +00:00
|
|
|
if (assoc_num == tx_sc->encoding_sa)
|
|
|
|
secy->operational = tx_sa->active;
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.sa.assoc_num = assoc_num;
|
|
|
|
ctx.sa.tx_sa = tx_sa;
|
2023-10-05 18:06:33 +00:00
|
|
|
ctx.sa.update_pn = !!prev_pn.full64;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (tb_sa[MACSEC_SA_ATTR_PN]) {
|
|
|
|
spin_lock_bh(&tx_sa->lock);
|
2020-03-09 19:47:01 +00:00
|
|
|
tx_sa->next_pn_halves = prev_pn;
|
2020-01-13 22:31:43 +00:00
|
|
|
spin_unlock_bh(&tx_sa->lock);
|
|
|
|
}
|
|
|
|
tx_sa->active = was_active;
|
|
|
|
secy->operational = was_operational;
|
|
|
|
rtnl_unlock();
|
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
struct macsec_rx_sa *rx_sa;
|
|
|
|
u8 assoc_num;
|
|
|
|
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
|
|
|
|
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
|
2020-01-13 22:31:43 +00:00
|
|
|
bool was_active;
|
2020-03-09 19:47:01 +00:00
|
|
|
pn_t prev_pn;
|
2020-01-13 22:31:43 +00:00
|
|
|
int ret = 0;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-09 19:47:01 +00:00
|
|
|
prev_pn.full64 = 0;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_rxsc_config(attrs, tb_rxsc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_sa_config(attrs, tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!validate_upd_sa(tb_sa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
|
|
|
|
&dev, &secy, &rx_sc, &assoc_num);
|
|
|
|
if (IS_ERR(rx_sa)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(rx_sa);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb_sa[MACSEC_SA_ATTR_PN]) {
|
2020-03-09 19:47:02 +00:00
|
|
|
int pn_len;
|
|
|
|
|
|
|
|
pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
|
|
|
|
if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
|
|
|
|
pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
|
|
|
|
nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
|
|
|
|
rtnl_unlock();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_lock_bh(&rx_sa->lock);
|
2020-03-09 19:47:01 +00:00
|
|
|
prev_pn = rx_sa->next_pn_halves;
|
2020-03-09 19:47:02 +00:00
|
|
|
rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
|
2016-03-11 17:07:33 +00:00
|
|
|
spin_unlock_bh(&rx_sa->lock);
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
was_active = rx_sa->active;
|
2016-03-11 17:07:33 +00:00
|
|
|
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
|
|
|
|
rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.sa.assoc_num = assoc_num;
|
|
|
|
ctx.sa.rx_sa = rx_sa;
|
2023-10-05 18:06:33 +00:00
|
|
|
ctx.sa.update_pn = !!prev_pn.full64;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (tb_sa[MACSEC_SA_ATTR_PN]) {
|
|
|
|
spin_lock_bh(&rx_sa->lock);
|
2020-03-09 19:47:01 +00:00
|
|
|
rx_sa->next_pn_halves = prev_pn;
|
2020-01-13 22:31:43 +00:00
|
|
|
spin_unlock_bh(&rx_sa->lock);
|
|
|
|
}
|
|
|
|
rx_sa->active = was_active;
|
|
|
|
rtnl_unlock();
|
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct nlattr **attrs = info->attrs;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_rx_sc *rx_sc;
|
|
|
|
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
|
2020-01-13 22:31:43 +00:00
|
|
|
unsigned int prev_n_rx_sc;
|
|
|
|
bool was_active;
|
|
|
|
int ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (parse_rxsc_config(attrs, tb_rxsc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!validate_add_rxsc(tb_rxsc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
|
|
|
|
if (IS_ERR(rx_sc)) {
|
|
|
|
rtnl_unlock();
|
|
|
|
return PTR_ERR(rx_sc);
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
was_active = rx_sc->active;
|
|
|
|
prev_n_rx_sc = secy->n_rx_sc;
|
2016-03-11 17:07:33 +00:00
|
|
|
if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
|
|
|
|
bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
|
|
|
|
|
|
|
|
if (rx_sc->active != new)
|
|
|
|
secy->n_rx_sc += new ? 1 : -1;
|
|
|
|
|
|
|
|
rx_sc->active = new;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.rx_sc = rx_sc;
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
|
|
|
|
return 0;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
secy->n_rx_sc = prev_n_rx_sc;
|
|
|
|
rx_sc->active = was_active;
|
|
|
|
rtnl_unlock();
|
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:44 +00:00
|
|
|
static bool macsec_is_configured(struct macsec_dev *macsec)
|
|
|
|
{
|
|
|
|
struct macsec_secy *secy = &macsec->secy;
|
|
|
|
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
|
|
|
|
int i;
|
|
|
|
|
2022-11-02 21:33:15 +00:00
|
|
|
if (secy->rx_sc)
|
2020-01-13 22:31:44 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < MACSEC_NUM_AN; i++)
|
|
|
|
if (tx_sc->sa[i])
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-12-19 14:53:30 +00:00
|
|
|
static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
|
|
|
|
const struct macsec_ops *ops)
|
|
|
|
{
|
|
|
|
return macsec->offload == MACSEC_OFFLOAD_PHY &&
|
|
|
|
ops->mdo_insert_tx_tag;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_set_head_tail_room(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct net_device *real_dev = macsec->real_dev;
|
|
|
|
int needed_headroom, needed_tailroom;
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, NULL);
|
|
|
|
if (ops) {
|
|
|
|
needed_headroom = ops->needed_headroom;
|
|
|
|
needed_tailroom = ops->needed_tailroom;
|
|
|
|
} else {
|
|
|
|
needed_headroom = MACSEC_NEEDED_HEADROOM;
|
|
|
|
needed_tailroom = MACSEC_NEEDED_TAILROOM;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
|
|
|
|
dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
|
|
|
|
}
|
|
|
|
|
2023-01-11 15:02:09 +00:00
|
|
|
static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
|
|
|
|
{
|
|
|
|
enum macsec_offload prev_offload;
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
struct macsec_dev *macsec;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
macsec = macsec_priv(dev);
|
|
|
|
|
|
|
|
/* Check if the offloading mode is supported by the underlying layers */
|
|
|
|
if (offload != MACSEC_OFFLOAD_OFF &&
|
|
|
|
!macsec_check_offload(offload, macsec))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/* Check if the net device is busy. */
|
|
|
|
if (netif_running(dev))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/* Check if the device already has rules configured: we do not support
|
|
|
|
* rules migration.
|
|
|
|
*/
|
|
|
|
if (macsec_is_configured(macsec))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
prev_offload = macsec->offload;
|
|
|
|
|
|
|
|
ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
|
|
|
|
macsec, &ctx);
|
|
|
|
if (!ops)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
macsec->offload = offload;
|
|
|
|
|
|
|
|
ctx.secy = &macsec->secy;
|
|
|
|
ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
|
|
|
|
: macsec_offload(ops->mdo_add_secy, &ctx);
|
2023-12-19 14:53:30 +00:00
|
|
|
if (ret) {
|
2023-01-11 15:02:09 +00:00
|
|
|
macsec->offload = prev_offload;
|
2023-12-19 14:53:30 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
macsec_set_head_tail_room(dev);
|
|
|
|
macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
|
2023-01-11 15:02:09 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:44 +00:00
|
|
|
static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
|
|
|
|
struct nlattr **attrs = info->attrs;
|
2023-01-11 15:02:09 +00:00
|
|
|
enum macsec_offload offload;
|
2020-01-13 22:31:44 +00:00
|
|
|
struct macsec_dev *macsec;
|
2023-01-11 15:02:09 +00:00
|
|
|
struct net_device *dev;
|
2022-12-11 07:55:32 +00:00
|
|
|
int ret = 0;
|
2020-01-13 22:31:44 +00:00
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_IFINDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!attrs[MACSEC_ATTR_OFFLOAD])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
|
|
|
|
attrs[MACSEC_ATTR_OFFLOAD],
|
|
|
|
macsec_genl_offload_policy, NULL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-12-11 07:55:32 +00:00
|
|
|
rtnl_lock();
|
|
|
|
|
2020-01-13 22:31:44 +00:00
|
|
|
dev = get_dev_from_nl(genl_info_net(info), attrs);
|
2022-12-11 07:55:32 +00:00
|
|
|
if (IS_ERR(dev)) {
|
|
|
|
ret = PTR_ERR(dev);
|
|
|
|
goto out;
|
|
|
|
}
|
2020-01-13 22:31:44 +00:00
|
|
|
macsec = macsec_priv(dev);
|
|
|
|
|
2022-12-11 07:55:32 +00:00
|
|
|
if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2020-04-06 09:38:29 +00:00
|
|
|
|
2020-01-13 22:31:44 +00:00
|
|
|
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
|
|
|
|
|
2023-01-11 15:02:09 +00:00
|
|
|
if (macsec->offload != offload)
|
|
|
|
ret = macsec_update_offload(dev, offload);
|
2022-12-11 07:55:32 +00:00
|
|
|
out:
|
2020-01-13 22:31:44 +00:00
|
|
|
rtnl_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static void get_tx_sa_stats(struct net_device *dev, int an,
|
|
|
|
struct macsec_tx_sa *tx_sa,
|
|
|
|
struct macsec_tx_sa_stats *sum)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-03-25 12:52:37 +00:00
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2016-03-11 17:07:33 +00:00
|
|
|
int cpu;
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
|
|
|
if (ops) {
|
|
|
|
ctx.sa.assoc_num = an;
|
|
|
|
ctx.sa.tx_sa = tx_sa;
|
|
|
|
ctx.stats.tx_sa_stats = sum;
|
|
|
|
ctx.secy = &macsec_priv(dev)->secy;
|
|
|
|
macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
for_each_possible_cpu(cpu) {
|
2020-03-25 12:52:37 +00:00
|
|
|
const struct macsec_tx_sa_stats *stats =
|
|
|
|
per_cpu_ptr(tx_sa->stats, cpu);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutPktsProtected += stats->OutPktsProtected;
|
|
|
|
sum->OutPktsEncrypted += stats->OutPktsEncrypted;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
2020-03-25 12:52:37 +00:00
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
|
|
|
|
{
|
|
|
|
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
|
|
|
|
sum->OutPktsProtected) ||
|
|
|
|
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
|
|
|
|
sum->OutPktsEncrypted))
|
2016-03-11 17:07:33 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static void get_rx_sa_stats(struct net_device *dev,
|
|
|
|
struct macsec_rx_sc *rx_sc, int an,
|
|
|
|
struct macsec_rx_sa *rx_sa,
|
|
|
|
struct macsec_rx_sa_stats *sum)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-03-25 12:52:37 +00:00
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2016-03-11 17:07:33 +00:00
|
|
|
int cpu;
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
|
|
|
if (ops) {
|
|
|
|
ctx.sa.assoc_num = an;
|
|
|
|
ctx.sa.rx_sa = rx_sa;
|
|
|
|
ctx.stats.rx_sa_stats = sum;
|
|
|
|
ctx.secy = &macsec_priv(dev)->secy;
|
|
|
|
ctx.rx_sc = rx_sc;
|
|
|
|
macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
for_each_possible_cpu(cpu) {
|
2020-03-25 12:52:37 +00:00
|
|
|
const struct macsec_rx_sa_stats *stats =
|
|
|
|
per_cpu_ptr(rx_sa->stats, cpu);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsOK += stats->InPktsOK;
|
|
|
|
sum->InPktsInvalid += stats->InPktsInvalid;
|
|
|
|
sum->InPktsNotValid += stats->InPktsNotValid;
|
|
|
|
sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
|
|
|
|
sum->InPktsUnusedSA += stats->InPktsUnusedSA;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
2020-03-25 12:52:37 +00:00
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static int copy_rx_sa_stats(struct sk_buff *skb,
|
|
|
|
struct macsec_rx_sa_stats *sum)
|
|
|
|
{
|
|
|
|
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
|
|
|
|
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
|
|
|
|
sum->InPktsInvalid) ||
|
|
|
|
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
|
|
|
|
sum->InPktsNotValid) ||
|
|
|
|
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
|
|
|
|
sum->InPktsNotUsingSA) ||
|
|
|
|
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
|
|
|
|
sum->InPktsUnusedSA))
|
2016-03-11 17:07:33 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static void get_rx_sc_stats(struct net_device *dev,
|
|
|
|
struct macsec_rx_sc *rx_sc,
|
|
|
|
struct macsec_rx_sc_stats *sum)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-03-25 12:52:37 +00:00
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2016-03-11 17:07:33 +00:00
|
|
|
int cpu;
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
|
|
|
if (ops) {
|
|
|
|
ctx.stats.rx_sc_stats = sum;
|
|
|
|
ctx.secy = &macsec_priv(dev)->secy;
|
|
|
|
ctx.rx_sc = rx_sc;
|
|
|
|
macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
const struct pcpu_rx_sc_stats *stats;
|
|
|
|
struct macsec_rx_sc_stats tmp;
|
|
|
|
unsigned int start;
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
stats = per_cpu_ptr(rx_sc->stats, cpu);
|
2016-03-11 17:07:33 +00:00
|
|
|
do {
|
2022-10-26 13:22:14 +00:00
|
|
|
start = u64_stats_fetch_begin(&stats->syncp);
|
2016-03-11 17:07:33 +00:00
|
|
|
memcpy(&tmp, &stats->stats, sizeof(tmp));
|
2022-10-26 13:22:14 +00:00
|
|
|
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InOctetsValidated += tmp.InOctetsValidated;
|
|
|
|
sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
|
|
|
|
sum->InPktsUnchecked += tmp.InPktsUnchecked;
|
|
|
|
sum->InPktsDelayed += tmp.InPktsDelayed;
|
|
|
|
sum->InPktsOK += tmp.InPktsOK;
|
|
|
|
sum->InPktsInvalid += tmp.InPktsInvalid;
|
|
|
|
sum->InPktsLate += tmp.InPktsLate;
|
|
|
|
sum->InPktsNotValid += tmp.InPktsNotValid;
|
|
|
|
sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
|
|
|
|
sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
2020-03-25 12:52:37 +00:00
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
|
|
|
|
{
|
2016-04-26 08:06:11 +00:00
|
|
|
if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InOctetsValidated,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InOctetsDecrypted,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsUnchecked,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsDelayed,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsOK,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsInvalid,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsLate,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsNotValid,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsNotUsingSA,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsUnusedSA,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_RXSC_STATS_ATTR_PAD))
|
2016-03-11 17:07:33 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static void get_tx_sc_stats(struct net_device *dev,
|
|
|
|
struct macsec_tx_sc_stats *sum)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-03-25 12:52:37 +00:00
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2016-03-11 17:07:33 +00:00
|
|
|
int cpu;
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
|
|
|
if (ops) {
|
|
|
|
ctx.stats.tx_sc_stats = sum;
|
|
|
|
ctx.secy = &macsec_priv(dev)->secy;
|
|
|
|
macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
const struct pcpu_tx_sc_stats *stats;
|
|
|
|
struct macsec_tx_sc_stats tmp;
|
|
|
|
unsigned int start;
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
|
2016-03-11 17:07:33 +00:00
|
|
|
do {
|
2022-10-26 13:22:14 +00:00
|
|
|
start = u64_stats_fetch_begin(&stats->syncp);
|
2016-03-11 17:07:33 +00:00
|
|
|
memcpy(&tmp, &stats->stats, sizeof(tmp));
|
2022-10-26 13:22:14 +00:00
|
|
|
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutPktsProtected += tmp.OutPktsProtected;
|
|
|
|
sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
|
|
|
|
sum->OutOctetsProtected += tmp.OutOctetsProtected;
|
|
|
|
sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
2020-03-25 12:52:37 +00:00
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
|
|
|
|
{
|
2016-04-26 08:06:11 +00:00
|
|
|
if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutPktsProtected,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_TXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutPktsEncrypted,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_TXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutOctetsProtected,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_TXSC_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutOctetsEncrypted,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_TXSC_STATS_ATTR_PAD))
|
2016-03-11 17:07:33 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-03-25 12:52:37 +00:00
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2016-03-11 17:07:33 +00:00
|
|
|
int cpu;
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
|
|
|
if (ops) {
|
|
|
|
ctx.stats.dev_stats = sum;
|
|
|
|
ctx.secy = &macsec_priv(dev)->secy;
|
|
|
|
macsec_offload(ops->mdo_get_dev_stats, &ctx);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
const struct pcpu_secy_stats *stats;
|
|
|
|
struct macsec_dev_stats tmp;
|
|
|
|
unsigned int start;
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
|
2016-03-11 17:07:33 +00:00
|
|
|
do {
|
2022-10-26 13:22:14 +00:00
|
|
|
start = u64_stats_fetch_begin(&stats->syncp);
|
2016-03-11 17:07:33 +00:00
|
|
|
memcpy(&tmp, &stats->stats, sizeof(tmp));
|
2022-10-26 13:22:14 +00:00
|
|
|
} while (u64_stats_fetch_retry(&stats->syncp, start));
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutPktsUntagged += tmp.OutPktsUntagged;
|
|
|
|
sum->InPktsUntagged += tmp.InPktsUntagged;
|
|
|
|
sum->OutPktsTooLong += tmp.OutPktsTooLong;
|
|
|
|
sum->InPktsNoTag += tmp.InPktsNoTag;
|
|
|
|
sum->InPktsBadTag += tmp.InPktsBadTag;
|
|
|
|
sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
|
|
|
|
sum->InPktsNoSCI += tmp.InPktsNoSCI;
|
|
|
|
sum->InPktsOverrun += tmp.InPktsOverrun;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
2020-03-25 12:52:37 +00:00
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
|
|
|
|
{
|
2016-04-26 08:06:11 +00:00
|
|
|
if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutPktsUntagged,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_SECY_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsUntagged,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_SECY_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->OutPktsTooLong,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_SECY_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsNoTag,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_SECY_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsBadTag,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_SECY_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsUnknownSCI,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_SECY_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsNoSCI,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_SECY_STATS_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
|
2020-03-25 12:52:37 +00:00
|
|
|
sum->InPktsOverrun,
|
2016-04-26 08:06:11 +00:00
|
|
|
MACSEC_SECY_STATS_ATTR_PAD))
|
2016-03-11 17:07:33 +00:00
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
|
2019-04-26 09:13:06 +00:00
|
|
|
struct nlattr *secy_nest = nla_nest_start_noflag(skb,
|
|
|
|
MACSEC_ATTR_SECY);
|
2018-01-05 13:33:31 +00:00
|
|
|
u64 csid;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!secy_nest)
|
|
|
|
return 1;
|
|
|
|
|
2018-01-05 13:33:31 +00:00
|
|
|
switch (secy->key_len) {
|
|
|
|
case MACSEC_GCM_AES_128_SAK_LEN:
|
2020-03-09 19:47:02 +00:00
|
|
|
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
|
2018-01-05 13:33:31 +00:00
|
|
|
break;
|
|
|
|
case MACSEC_GCM_AES_256_SAK_LEN:
|
2020-03-09 19:47:02 +00:00
|
|
|
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
|
2018-01-05 13:33:31 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto cancel;
|
|
|
|
}
|
|
|
|
|
2016-04-26 08:06:11 +00:00
|
|
|
if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
|
|
|
|
MACSEC_SECY_ATTR_PAD) ||
|
|
|
|
nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
|
2018-01-05 13:33:31 +00:00
|
|
|
csid, MACSEC_SECY_ATTR_PAD) ||
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
|
|
|
|
nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
|
|
|
|
goto cancel;
|
|
|
|
|
|
|
|
if (secy->replay_protect) {
|
|
|
|
if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
|
|
|
|
goto cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, secy_nest);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cancel:
|
|
|
|
nla_nest_cancel(skb, secy_nest);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2019-04-01 20:59:09 +00:00
|
|
|
static noinline_for_stack int
|
|
|
|
dump_secy(struct macsec_secy *secy, struct net_device *dev,
|
|
|
|
struct sk_buff *skb, struct netlink_callback *cb)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-03-25 12:52:37 +00:00
|
|
|
struct macsec_tx_sc_stats tx_sc_stats = {0, };
|
|
|
|
struct macsec_tx_sa_stats tx_sa_stats = {0, };
|
|
|
|
struct macsec_rx_sc_stats rx_sc_stats = {0, };
|
|
|
|
struct macsec_rx_sa_stats rx_sa_stats = {0, };
|
2020-01-13 22:31:44 +00:00
|
|
|
struct macsec_dev *macsec = netdev_priv(dev);
|
2020-03-25 12:52:37 +00:00
|
|
|
struct macsec_dev_stats dev_stats = {0, };
|
2016-03-11 17:07:33 +00:00
|
|
|
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
|
|
|
|
struct nlattr *txsa_list, *rxsc_list;
|
2020-01-13 22:31:44 +00:00
|
|
|
struct macsec_rx_sc *rx_sc;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct nlattr *attr;
|
2020-01-13 22:31:44 +00:00
|
|
|
void *hdr;
|
|
|
|
int i, j;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
|
|
|
|
&macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
|
|
|
|
if (!hdr)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2017-11-15 12:09:32 +00:00
|
|
|
genl_dump_check_consistent(cb, hdr);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2020-01-13 22:31:44 +00:00
|
|
|
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
|
|
|
|
if (!attr)
|
|
|
|
goto nla_put_failure;
|
|
|
|
if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
|
|
|
|
goto nla_put_failure;
|
|
|
|
nla_nest_end(skb, attr);
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (nla_put_secy(secy, skb))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!attr)
|
|
|
|
goto nla_put_failure;
|
2020-03-25 12:52:37 +00:00
|
|
|
|
|
|
|
get_tx_sc_stats(dev, &tx_sc_stats);
|
|
|
|
if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_nest_cancel(skb, attr);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, attr);
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!attr)
|
|
|
|
goto nla_put_failure;
|
2020-03-25 12:52:37 +00:00
|
|
|
get_secy_stats(dev, &dev_stats);
|
|
|
|
if (copy_secy_stats(skb, &dev_stats)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_nest_cancel(skb, attr);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, attr);
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!txsa_list)
|
|
|
|
goto nla_put_failure;
|
|
|
|
for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
|
|
|
|
struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
|
|
|
|
struct nlattr *txsa_nest;
|
2020-03-09 19:47:02 +00:00
|
|
|
u64 pn;
|
|
|
|
int pn_len;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!tx_sa)
|
|
|
|
continue;
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
txsa_nest = nla_nest_start_noflag(skb, j++);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!txsa_nest) {
|
|
|
|
nla_nest_cancel(skb, txsa_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:37 +00:00
|
|
|
attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
|
|
|
|
if (!attr) {
|
|
|
|
nla_nest_cancel(skb, txsa_nest);
|
|
|
|
nla_nest_cancel(skb, txsa_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
|
|
|
|
get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
|
|
|
|
if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
|
|
|
|
nla_nest_cancel(skb, attr);
|
|
|
|
nla_nest_cancel(skb, txsa_nest);
|
|
|
|
nla_nest_cancel(skb, txsa_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, attr);
|
|
|
|
|
2020-03-09 19:47:02 +00:00
|
|
|
if (secy->xpn) {
|
|
|
|
pn = tx_sa->next_pn;
|
|
|
|
pn_len = MACSEC_XPN_PN_LEN;
|
|
|
|
} else {
|
|
|
|
pn = tx_sa->next_pn_halves.lower;
|
|
|
|
pn_len = MACSEC_DEFAULT_PN_LEN;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
|
2020-03-09 19:47:02 +00:00
|
|
|
nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
|
2016-05-07 18:19:29 +00:00
|
|
|
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
|
2020-03-09 19:47:02 +00:00
|
|
|
(secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
|
|
|
|
nla_nest_cancel(skb, txsa_nest);
|
|
|
|
nla_nest_cancel(skb, txsa_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, txsa_nest);
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, txsa_list);
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!rxsc_list)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
j = 1;
|
|
|
|
for_each_rxsc_rtnl(secy, rx_sc) {
|
|
|
|
int k;
|
|
|
|
struct nlattr *rxsa_list;
|
2019-04-26 09:13:06 +00:00
|
|
|
struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!rxsc_nest) {
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
|
2016-04-26 08:06:11 +00:00
|
|
|
nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
|
|
|
|
MACSEC_RXSC_ATTR_PAD)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_nest_cancel(skb, rxsc_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!attr) {
|
|
|
|
nla_nest_cancel(skb, rxsc_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
2020-03-25 12:52:37 +00:00
|
|
|
memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
|
|
|
|
get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
|
|
|
|
if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_nest_cancel(skb, attr);
|
|
|
|
nla_nest_cancel(skb, rxsc_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, attr);
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
rxsa_list = nla_nest_start_noflag(skb,
|
|
|
|
MACSEC_RXSC_ATTR_SA_LIST);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!rxsa_list) {
|
|
|
|
nla_nest_cancel(skb, rxsc_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
|
|
|
|
struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
|
|
|
|
struct nlattr *rxsa_nest;
|
2020-03-09 19:47:02 +00:00
|
|
|
u64 pn;
|
|
|
|
int pn_len;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (!rx_sa)
|
|
|
|
continue;
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
rxsa_nest = nla_nest_start_noflag(skb, k++);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!rxsa_nest) {
|
|
|
|
nla_nest_cancel(skb, rxsa_list);
|
|
|
|
nla_nest_cancel(skb, rxsc_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
2019-04-26 09:13:06 +00:00
|
|
|
attr = nla_nest_start_noflag(skb,
|
|
|
|
MACSEC_SA_ATTR_STATS);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!attr) {
|
|
|
|
nla_nest_cancel(skb, rxsa_list);
|
|
|
|
nla_nest_cancel(skb, rxsc_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
2020-03-25 12:52:37 +00:00
|
|
|
memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
|
|
|
|
get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
|
|
|
|
if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_nest_cancel(skb, attr);
|
|
|
|
nla_nest_cancel(skb, rxsa_list);
|
|
|
|
nla_nest_cancel(skb, rxsc_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, attr);
|
|
|
|
|
2020-03-09 19:47:02 +00:00
|
|
|
if (secy->xpn) {
|
|
|
|
pn = rx_sa->next_pn;
|
|
|
|
pn_len = MACSEC_XPN_PN_LEN;
|
|
|
|
} else {
|
|
|
|
pn = rx_sa->next_pn_halves.lower;
|
|
|
|
pn_len = MACSEC_DEFAULT_PN_LEN;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
|
2020-03-09 19:47:02 +00:00
|
|
|
nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
|
2016-05-07 18:19:29 +00:00
|
|
|
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
|
2020-03-09 19:47:02 +00:00
|
|
|
(secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
|
|
|
|
nla_nest_cancel(skb, rxsa_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_nest);
|
|
|
|
nla_nest_cancel(skb, rxsc_list);
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
nla_nest_end(skb, rxsa_nest);
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, rxsa_list);
|
|
|
|
nla_nest_end(skb, rxsc_nest);
|
|
|
|
}
|
|
|
|
|
|
|
|
nla_nest_end(skb, rxsc_list);
|
|
|
|
|
|
|
|
genlmsg_end(skb, hdr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
genlmsg_cancel(skb, hdr);
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
2016-04-22 09:28:05 +00:00
|
|
|
static int macsec_generation = 1; /* protected by RTNL */
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
struct net *net = sock_net(skb->sk);
|
|
|
|
struct net_device *dev;
|
|
|
|
int dev_idx, d;
|
|
|
|
|
|
|
|
dev_idx = cb->args[0];
|
|
|
|
|
|
|
|
d = 0;
|
2016-04-22 09:28:02 +00:00
|
|
|
rtnl_lock();
|
2016-04-22 09:28:05 +00:00
|
|
|
|
|
|
|
cb->seq = macsec_generation;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
for_each_netdev(net, dev) {
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
|
|
|
|
if (d < dev_idx)
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
if (!netif_is_macsec(dev))
|
|
|
|
goto next;
|
|
|
|
|
|
|
|
secy = &macsec_priv(dev)->secy;
|
|
|
|
if (dump_secy(secy, dev, skb, cb) < 0)
|
|
|
|
goto done;
|
|
|
|
next:
|
|
|
|
d++;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2016-04-22 09:28:02 +00:00
|
|
|
rtnl_unlock();
|
2016-03-11 17:07:33 +00:00
|
|
|
cb->args[0] = d;
|
|
|
|
return skb->len;
|
|
|
|
}
|
|
|
|
|
2020-10-02 21:49:54 +00:00
|
|
|
static const struct genl_small_ops macsec_genl_ops[] = {
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_GET_TXSC,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.dumpit = macsec_dump_txsc,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_ADD_RXSC,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_add_rxsc,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_DEL_RXSC,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_del_rxsc,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_UPD_RXSC,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_upd_rxsc,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_ADD_TXSA,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_add_txsa,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_DEL_TXSA,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_del_txsa,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_UPD_TXSA,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_upd_txsa,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_ADD_RXSA,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_add_rxsa,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_DEL_RXSA,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_del_rxsa,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_UPD_RXSA,
|
2019-04-26 12:07:31 +00:00
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
2016-03-11 17:07:33 +00:00
|
|
|
.doit = macsec_upd_rxsa,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
2020-01-13 22:31:44 +00:00
|
|
|
{
|
|
|
|
.cmd = MACSEC_CMD_UPD_OFFLOAD,
|
|
|
|
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
|
|
|
|
.doit = macsec_upd_offload,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
2016-03-11 17:07:33 +00:00
|
|
|
};
|
|
|
|
|
2016-10-24 12:40:05 +00:00
|
|
|
static struct genl_family macsec_fam __ro_after_init = {
|
2016-10-24 12:40:03 +00:00
|
|
|
.name = MACSEC_GENL_NAME,
|
|
|
|
.hdrsize = 0,
|
|
|
|
.version = MACSEC_GENL_VERSION,
|
|
|
|
.maxattr = MACSEC_ATTR_MAX,
|
genetlink: make policy common to family
Since maxattr is common, the policy can't really differ sanely,
so make it common as well.
The only user that did in fact manage to make a non-common policy
is taskstats, which has to be really careful about it (since it's
still using a common maxattr!). This is no longer supported, but
we can fake it using pre_doit.
This reduces the size of e.g. nl80211.o (which has lots of commands):
text data bss dec hex filename
398745 14323 2240 415308 6564c net/wireless/nl80211.o (before)
397913 14331 2240 414484 65314 net/wireless/nl80211.o (after)
--------------------------------
-832 +8 0 -824
Which is obviously just 8 bytes for each command, and an added 8
bytes for the new policy pointer. I'm not sure why the ops list is
counted as .text though.
Most of the code transformations were done using the following spatch:
@ops@
identifier OPS;
expression POLICY;
@@
struct genl_ops OPS[] = {
...,
{
- .policy = POLICY,
},
...
};
@@
identifier ops.OPS;
expression ops.POLICY;
identifier fam;
expression M;
@@
struct genl_family fam = {
.ops = OPS,
.maxattr = M,
+ .policy = POLICY,
...
};
This also gets rid of devlink_nl_cmd_region_read_dumpit() accessing
the cb->data as ops, which we want to change in a later genl patch.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-03-21 21:51:02 +00:00
|
|
|
.policy = macsec_genl_policy,
|
2016-10-24 12:40:03 +00:00
|
|
|
.netnsok = true,
|
|
|
|
.module = THIS_MODULE,
|
2020-10-02 21:49:54 +00:00
|
|
|
.small_ops = macsec_genl_ops,
|
|
|
|
.n_small_ops = ARRAY_SIZE(macsec_genl_ops),
|
2022-08-25 00:18:30 +00:00
|
|
|
.resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
|
2016-10-24 12:40:03 +00:00
|
|
|
};
|
|
|
|
|
2023-12-19 14:53:30 +00:00
|
|
|
static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct phy_device *phydev;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
int skb_final_len;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
|
|
|
skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
|
|
|
|
ops->needed_tailroom;
|
|
|
|
if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
phydev = macsec->real_dev->phydev;
|
|
|
|
|
|
|
|
err = skb_ensure_writable_head_tail(skb, dev);
|
|
|
|
if (unlikely(err < 0))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
err = ops->mdo_insert_tx_tag(phydev, skb);
|
|
|
|
if (unlikely(err))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
cleanup:
|
|
|
|
kfree_skb(skb);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = netdev_priv(dev);
|
|
|
|
struct macsec_secy *secy = &macsec->secy;
|
|
|
|
struct pcpu_secy_stats *secy_stats;
|
|
|
|
int ret, len;
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
if (macsec_is_offloaded(netdev_priv(dev))) {
|
2022-09-06 05:21:13 +00:00
|
|
|
struct metadata_dst *md_dst = secy->tx_sc.md_dst;
|
|
|
|
|
|
|
|
skb_dst_drop(skb);
|
|
|
|
dst_hold(&md_dst->dst);
|
|
|
|
skb_dst_set(skb, &md_dst->dst);
|
2023-12-19 14:53:30 +00:00
|
|
|
|
|
|
|
if (macsec->insert_tx_tag) {
|
|
|
|
skb = macsec_insert_tx_tag(skb, dev);
|
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
DEV_STATS_INC(dev, tx_dropped);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
skb->dev = macsec->real_dev;
|
|
|
|
return dev_queue_xmit(skb);
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
/* 10.5 */
|
|
|
|
if (!secy->protect_frames) {
|
|
|
|
secy_stats = this_cpu_ptr(macsec->stats);
|
|
|
|
u64_stats_update_begin(&secy_stats->syncp);
|
|
|
|
secy_stats->stats.OutPktsUntagged++;
|
|
|
|
u64_stats_update_end(&secy_stats->syncp);
|
2016-06-30 22:00:54 +00:00
|
|
|
skb->dev = macsec->real_dev;
|
2016-03-11 17:07:33 +00:00
|
|
|
len = skb->len;
|
|
|
|
ret = dev_queue_xmit(skb);
|
|
|
|
count_tx(dev, ret, len);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!secy->operational) {
|
|
|
|
kfree_skb(skb);
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(dev, tx_dropped);
|
2016-03-11 17:07:33 +00:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
2022-08-08 22:38:23 +00:00
|
|
|
len = skb->len;
|
2016-03-11 17:07:33 +00:00
|
|
|
skb = macsec_encrypt(skb, dev);
|
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
if (PTR_ERR(skb) != -EINPROGRESS)
|
2023-08-04 17:26:52 +00:00
|
|
|
DEV_STATS_INC(dev, tx_dropped);
|
2016-03-11 17:07:33 +00:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
|
|
|
|
|
|
|
|
macsec_encrypt_finish(skb, dev);
|
|
|
|
ret = dev_queue_xmit(skb);
|
|
|
|
count_tx(dev, ret, len);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-11-02 21:33:12 +00:00
|
|
|
#define MACSEC_FEATURES \
|
2016-03-11 17:07:33 +00:00
|
|
|
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
|
2016-08-12 14:10:32 +00:00
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static int macsec_dev_init(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct net_device *real_dev = macsec->real_dev;
|
2016-07-20 16:11:32 +00:00
|
|
|
int err;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2016-07-20 16:11:32 +00:00
|
|
|
err = gro_cells_init(&macsec->gro_cells, dev);
|
2024-03-05 11:37:27 +00:00
|
|
|
if (err)
|
2016-07-20 16:11:32 +00:00
|
|
|
return err;
|
|
|
|
|
2022-11-02 21:33:12 +00:00
|
|
|
dev->features = real_dev->features & MACSEC_FEATURES;
|
2024-08-29 12:33:37 +00:00
|
|
|
dev->features |= NETIF_F_GSO_SOFTWARE;
|
|
|
|
dev->lltx = true;
|
2024-03-05 11:37:27 +00:00
|
|
|
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2023-12-19 14:53:30 +00:00
|
|
|
macsec_set_head_tail_room(dev);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
if (is_zero_ether_addr(dev->dev_addr))
|
|
|
|
eth_hw_addr_inherit(dev, real_dev);
|
|
|
|
if (is_zero_ether_addr(dev->broadcast))
|
|
|
|
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
|
|
|
|
|
2022-05-31 07:45:00 +00:00
|
|
|
/* Get macsec's reference to real_dev */
|
2022-06-08 04:39:55 +00:00
|
|
|
netdev_hold(real_dev, &macsec->dev_tracker, GFP_KERNEL);
|
2022-05-31 07:45:00 +00:00
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_dev_uninit(struct net_device *dev)
|
|
|
|
{
|
2016-07-20 16:11:32 +00:00
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
|
|
|
|
gro_cells_destroy(&macsec->gro_cells);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static netdev_features_t macsec_fix_features(struct net_device *dev,
|
|
|
|
netdev_features_t features)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct net_device *real_dev = macsec->real_dev;
|
|
|
|
|
2022-11-02 21:33:12 +00:00
|
|
|
features &= (real_dev->features & MACSEC_FEATURES) |
|
2016-07-20 16:11:32 +00:00
|
|
|
NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
return features;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_dev_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct net_device *real_dev = macsec->real_dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = dev_uc_add(real_dev, dev->dev_addr);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
|
err = dev_set_allmulti(real_dev, 1);
|
|
|
|
if (err < 0)
|
|
|
|
goto del_unicast;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->flags & IFF_PROMISC) {
|
|
|
|
err = dev_set_promiscuity(real_dev, 1);
|
|
|
|
if (err < 0)
|
|
|
|
goto clear_allmulti;
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto clear_allmulti;
|
|
|
|
}
|
|
|
|
|
2020-03-25 12:52:34 +00:00
|
|
|
ctx.secy = &macsec->secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
err = macsec_offload(ops->mdo_dev_open, &ctx);
|
|
|
|
if (err)
|
|
|
|
goto clear_allmulti;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (netif_carrier_ok(real_dev))
|
|
|
|
netif_carrier_on(dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
clear_allmulti:
|
|
|
|
if (dev->flags & IFF_ALLMULTI)
|
|
|
|
dev_set_allmulti(real_dev, -1);
|
|
|
|
del_unicast:
|
|
|
|
dev_uc_del(real_dev, dev->dev_addr);
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_dev_stop(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct net_device *real_dev = macsec->real_dev;
|
|
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
2020-03-25 12:52:34 +00:00
|
|
|
if (ops) {
|
|
|
|
ctx.secy = &macsec->secy;
|
2020-01-13 22:31:43 +00:00
|
|
|
macsec_offload(ops->mdo_dev_stop, &ctx);
|
2020-03-25 12:52:34 +00:00
|
|
|
}
|
2020-01-13 22:31:43 +00:00
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
dev_mc_unsync(real_dev, dev);
|
|
|
|
dev_uc_unsync(real_dev, dev);
|
|
|
|
|
|
|
|
if (dev->flags & IFF_ALLMULTI)
|
|
|
|
dev_set_allmulti(real_dev, -1);
|
|
|
|
|
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
|
|
dev_set_promiscuity(real_dev, -1);
|
|
|
|
|
|
|
|
dev_uc_del(real_dev, dev->dev_addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
|
|
|
|
{
|
|
|
|
struct net_device *real_dev = macsec_priv(dev)->real_dev;
|
|
|
|
|
|
|
|
if (!(dev->flags & IFF_UP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (change & IFF_ALLMULTI)
|
|
|
|
dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
|
|
|
|
|
|
|
|
if (change & IFF_PROMISC)
|
|
|
|
dev_set_promiscuity(real_dev,
|
|
|
|
dev->flags & IFF_PROMISC ? 1 : -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_dev_set_rx_mode(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_device *real_dev = macsec_priv(dev)->real_dev;
|
|
|
|
|
|
|
|
dev_mc_sync(real_dev, dev);
|
|
|
|
dev_uc_sync(real_dev, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_set_mac_address(struct net_device *dev, void *p)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct net_device *real_dev = macsec->real_dev;
|
|
|
|
struct sockaddr *addr = p;
|
2023-12-19 14:53:29 +00:00
|
|
|
u8 old_addr[ETH_ALEN];
|
2016-03-11 17:07:33 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(addr->sa_data))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
2023-12-19 14:53:29 +00:00
|
|
|
if (dev->flags & IFF_UP) {
|
|
|
|
err = dev_uc_add(real_dev, addr->sa_data);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2023-12-19 14:53:29 +00:00
|
|
|
ether_addr_copy(old_addr, dev->dev_addr);
|
2021-10-01 21:32:22 +00:00
|
|
|
eth_hw_addr_set(dev, addr->sa_data);
|
2020-03-10 15:22:25 +00:00
|
|
|
|
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
2023-12-19 14:53:29 +00:00
|
|
|
if (!ops) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto restore_old_addr;
|
2020-03-10 15:22:25 +00:00
|
|
|
}
|
2023-12-19 14:53:29 +00:00
|
|
|
|
|
|
|
ctx.secy = &macsec->secy;
|
|
|
|
err = macsec_offload(ops->mdo_upd_secy, &ctx);
|
|
|
|
if (err)
|
|
|
|
goto restore_old_addr;
|
2020-03-10 15:22:25 +00:00
|
|
|
}
|
|
|
|
|
2023-12-19 14:53:29 +00:00
|
|
|
if (dev->flags & IFF_UP)
|
|
|
|
dev_uc_del(real_dev, old_addr);
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
return 0;
|
2023-12-19 14:53:29 +00:00
|
|
|
|
|
|
|
restore_old_addr:
|
|
|
|
if (dev->flags & IFF_UP)
|
|
|
|
dev_uc_del(real_dev, addr->sa_data);
|
|
|
|
|
|
|
|
eth_hw_addr_set(dev, old_addr);
|
|
|
|
|
|
|
|
return err;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
|
|
|
|
|
|
|
|
if (macsec->real_dev->mtu - extra < new_mtu)
|
|
|
|
return -ERANGE;
|
|
|
|
|
2024-05-06 10:28:12 +00:00
|
|
|
WRITE_ONCE(dev->mtu, new_mtu);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-07 03:12:52 +00:00
|
|
|
static void macsec_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *s)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
|
|
|
if (!dev->tstats)
|
2017-01-07 03:12:52 +00:00
|
|
|
return;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-10-12 08:04:11 +00:00
|
|
|
dev_fetch_sw_netstats(s, dev->tstats);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2023-09-21 08:52:16 +00:00
|
|
|
s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
|
|
|
|
s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
|
|
|
|
s->rx_errors = DEV_STATS_READ(dev, rx_errors);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_get_iflink(const struct net_device *dev)
|
|
|
|
{
|
2024-02-22 10:50:08 +00:00
|
|
|
return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_device_ops macsec_netdev_ops = {
|
|
|
|
.ndo_init = macsec_dev_init,
|
|
|
|
.ndo_uninit = macsec_dev_uninit,
|
|
|
|
.ndo_open = macsec_dev_open,
|
|
|
|
.ndo_stop = macsec_dev_stop,
|
|
|
|
.ndo_fix_features = macsec_fix_features,
|
|
|
|
.ndo_change_mtu = macsec_change_mtu,
|
|
|
|
.ndo_set_rx_mode = macsec_dev_set_rx_mode,
|
|
|
|
.ndo_change_rx_flags = macsec_dev_change_rx_flags,
|
|
|
|
.ndo_set_mac_address = macsec_set_mac_address,
|
|
|
|
.ndo_start_xmit = macsec_start_xmit,
|
|
|
|
.ndo_get_stats64 = macsec_get_stats64,
|
|
|
|
.ndo_get_iflink = macsec_get_iflink,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct device_type macsec_type = {
|
|
|
|
.name = "macsec",
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
|
|
|
|
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
|
2020-03-03 05:05:17 +00:00
|
|
|
[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
|
2016-03-11 17:07:33 +00:00
|
|
|
[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
|
|
|
|
[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
|
|
|
|
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
|
|
|
|
[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
|
|
|
|
[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
|
|
|
|
[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
|
|
|
|
[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
|
|
|
|
[IFLA_MACSEC_ES] = { .type = NLA_U8 },
|
|
|
|
[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
|
|
|
|
[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
|
|
|
|
[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
|
2022-12-07 10:16:18 +00:00
|
|
|
[IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
|
2016-03-11 17:07:33 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void macsec_free_netdev(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
|
2022-09-23 09:07:09 +00:00
|
|
|
if (macsec->secy.tx_sc.md_dst)
|
|
|
|
metadata_dst_free(macsec->secy.tx_sc.md_dst);
|
2016-03-11 17:07:33 +00:00
|
|
|
free_percpu(macsec->stats);
|
|
|
|
free_percpu(macsec->secy.tx_sc.stats);
|
|
|
|
|
2022-05-31 07:45:00 +00:00
|
|
|
/* Get rid of the macsec's reference to real_dev */
|
2022-06-08 04:39:55 +00:00
|
|
|
netdev_put(macsec->real_dev, &macsec->dev_tracker);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_setup(struct net_device *dev)
|
|
|
|
{
|
|
|
|
ether_setup(dev);
|
net: use core MTU range checking in core net infra
geneve:
- Merge __geneve_change_mtu back into geneve_change_mtu, set max_mtu
- This one isn't quite as straight-forward as others, could use some
closer inspection and testing
macvlan:
- set min/max_mtu
tun:
- set min/max_mtu, remove tun_net_change_mtu
vxlan:
- Merge __vxlan_change_mtu back into vxlan_change_mtu
- Set max_mtu to IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
- This one is also not as straight-forward and could use closer inspection
and testing from vxlan folks
bridge:
- set max_mtu of IP_MAX_MTU and retain dynamic MTU range checks in
change_mtu function
openvswitch:
- set min/max_mtu, remove internal_dev_change_mtu
- note: max_mtu wasn't checked previously, it's been set to 65535, which
is the largest possible size supported
sch_teql:
- set min/max_mtu (note: max_mtu previously unchecked, used max of 65535)
macsec:
- min_mtu = 0, max_mtu = 65535
macvlan:
- min_mtu = 0, max_mtu = 65535
ntb_netdev:
- min_mtu = 0, max_mtu = 65535
veth:
- min_mtu = 68, max_mtu = 65535
8021q:
- min_mtu = 0, max_mtu = 65535
CC: netdev@vger.kernel.org
CC: Nicolas Dichtel <nicolas.dichtel@6wind.com>
CC: Hannes Frederic Sowa <hannes@stressinduktion.org>
CC: Tom Herbert <tom@herbertland.com>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Paolo Abeni <pabeni@redhat.com>
CC: Jiri Benc <jbenc@redhat.com>
CC: WANG Cong <xiyou.wangcong@gmail.com>
CC: Roopa Prabhu <roopa@cumulusnetworks.com>
CC: Pravin B Shelar <pshelar@ovn.org>
CC: Sabrina Dubroca <sd@queasysnail.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Stephen Hemminger <stephen@networkplumber.org>
CC: Pravin Shelar <pshelar@nicira.com>
CC: Maxim Krasnyansky <maxk@qti.qualcomm.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-10-20 17:55:20 +00:00
|
|
|
dev->min_mtu = 0;
|
|
|
|
dev->max_mtu = ETH_MAX_MTU;
|
2016-04-22 12:02:42 +00:00
|
|
|
dev->priv_flags |= IFF_NO_QUEUE;
|
2016-03-11 17:07:33 +00:00
|
|
|
dev->netdev_ops = &macsec_netdev_ops;
|
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-08 16:52:56 +00:00
|
|
|
dev->needs_free_netdev = true;
|
|
|
|
dev->priv_destructor = macsec_free_netdev;
|
2016-09-07 21:07:32 +00:00
|
|
|
SET_NETDEV_DEVTYPE(dev, &macsec_type);
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
eth_zero_addr(dev->broadcast);
|
|
|
|
}
|
|
|
|
|
2018-01-05 13:33:31 +00:00
|
|
|
static int macsec_changelink_common(struct net_device *dev,
|
|
|
|
struct nlattr *data[])
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
|
|
|
struct macsec_secy *secy;
|
|
|
|
struct macsec_tx_sc *tx_sc;
|
|
|
|
|
|
|
|
secy = &macsec_priv(dev)->secy;
|
|
|
|
tx_sc = &secy->tx_sc;
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_ENCODING_SA]) {
|
|
|
|
struct macsec_tx_sa *tx_sa;
|
|
|
|
|
|
|
|
tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
|
|
|
|
tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
|
|
|
|
|
|
|
|
secy->operational = tx_sa && tx_sa->active;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_ENCRYPT])
|
|
|
|
tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_PROTECT])
|
|
|
|
secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_INC_SCI])
|
|
|
|
tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_ES])
|
|
|
|
tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_SCB])
|
|
|
|
tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_REPLAY_PROTECT])
|
|
|
|
secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_VALIDATION])
|
|
|
|
secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
|
2018-01-05 13:33:31 +00:00
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_CIPHER_SUITE]) {
|
|
|
|
switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
|
|
|
|
case MACSEC_CIPHER_ID_GCM_AES_128:
|
2018-01-18 16:48:18 +00:00
|
|
|
case MACSEC_DEFAULT_CIPHER_ID:
|
2018-01-05 13:33:31 +00:00
|
|
|
secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
|
2020-03-09 19:47:02 +00:00
|
|
|
secy->xpn = false;
|
2018-01-05 13:33:31 +00:00
|
|
|
break;
|
|
|
|
case MACSEC_CIPHER_ID_GCM_AES_256:
|
|
|
|
secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
|
2020-03-09 19:47:02 +00:00
|
|
|
secy->xpn = false;
|
|
|
|
break;
|
|
|
|
case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
|
|
|
|
secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
|
|
|
|
secy->xpn = true;
|
|
|
|
break;
|
|
|
|
case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
|
|
|
|
secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
|
|
|
|
secy->xpn = true;
|
2018-01-05 13:33:31 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-22 09:16:29 +00:00
|
|
|
if (data[IFLA_MACSEC_WINDOW]) {
|
|
|
|
secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
|
|
|
|
|
|
|
|
/* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
|
|
|
|
* for XPN cipher suites */
|
|
|
|
if (secy->xpn &&
|
|
|
|
secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-01-05 13:33:31 +00:00
|
|
|
return 0;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
|
2017-06-25 21:56:00 +00:00
|
|
|
struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2020-01-13 22:31:43 +00:00
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2023-01-11 15:02:09 +00:00
|
|
|
bool macsec_offload_state_change = false;
|
|
|
|
enum macsec_offload offload;
|
2020-04-09 14:08:08 +00:00
|
|
|
struct macsec_tx_sc tx_sc;
|
2020-01-13 22:31:43 +00:00
|
|
|
struct macsec_secy secy;
|
|
|
|
int ret;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (!data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_CIPHER_SUITE] ||
|
|
|
|
data[IFLA_MACSEC_ICV_LEN] ||
|
|
|
|
data[IFLA_MACSEC_SCI] ||
|
|
|
|
data[IFLA_MACSEC_PORT])
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* Keep a copy of unmodified secy and tx_sc, in case the offload
|
|
|
|
* propagation fails, to revert macsec_changelink_common.
|
|
|
|
*/
|
|
|
|
memcpy(&secy, &macsec->secy, sizeof(secy));
|
|
|
|
memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
|
|
|
|
|
|
|
|
ret = macsec_changelink_common(dev, data);
|
|
|
|
if (ret)
|
2022-07-22 09:16:29 +00:00
|
|
|
goto cleanup;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
2023-01-11 15:02:09 +00:00
|
|
|
if (data[IFLA_MACSEC_OFFLOAD]) {
|
|
|
|
offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
|
|
|
|
if (macsec->offload != offload) {
|
|
|
|
macsec_offload_state_change = true;
|
|
|
|
ret = macsec_update_offload(dev, offload);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-13 22:31:43 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
2023-01-11 15:02:09 +00:00
|
|
|
if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) {
|
2020-01-13 22:31:43 +00:00
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (!ops) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.secy = &macsec->secy;
|
|
|
|
ret = macsec_offload(ops->mdo_upd_secy, &ctx);
|
|
|
|
if (ret)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
|
|
|
|
memcpy(&macsec->secy, &secy, sizeof(secy));
|
|
|
|
|
|
|
|
return ret;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macsec_del_dev(struct macsec_dev *macsec)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
while (macsec->secy.rx_sc) {
|
|
|
|
struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
|
|
|
|
|
|
|
|
rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
|
|
|
|
free_rx_sc(rx_sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < MACSEC_NUM_AN; i++) {
|
|
|
|
struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
|
|
|
|
|
|
|
|
if (sa) {
|
|
|
|
RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
|
|
|
|
clear_tx_sa(sa);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-11 13:24:27 +00:00
|
|
|
static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2016-08-12 14:10:32 +00:00
|
|
|
struct net_device *real_dev = macsec->real_dev;
|
2016-08-11 13:24:27 +00:00
|
|
|
|
2022-01-30 11:29:01 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(netdev_priv(dev), &ctx);
|
|
|
|
if (ops) {
|
|
|
|
ctx.secy = &macsec->secy;
|
|
|
|
macsec_offload(ops->mdo_del_secy, &ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-11 13:24:27 +00:00
|
|
|
unregister_netdevice_queue(dev, head);
|
|
|
|
list_del_rcu(&macsec->secys);
|
|
|
|
macsec_del_dev(macsec);
|
2016-08-12 14:10:32 +00:00
|
|
|
netdev_upper_dev_unlink(real_dev, dev);
|
2016-08-11 13:24:27 +00:00
|
|
|
|
|
|
|
macsec_generation++;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static void macsec_dellink(struct net_device *dev, struct list_head *head)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct net_device *real_dev = macsec->real_dev;
|
|
|
|
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
|
|
|
|
|
2016-08-11 13:24:27 +00:00
|
|
|
macsec_common_dellink(dev, head);
|
2016-04-22 09:28:05 +00:00
|
|
|
|
2016-04-22 09:28:06 +00:00
|
|
|
if (list_empty(&rxd->secys)) {
|
2016-03-11 17:07:33 +00:00
|
|
|
netdev_rx_handler_unregister(real_dev);
|
2016-04-22 09:28:06 +00:00
|
|
|
kfree(rxd);
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int register_macsec_dev(struct net_device *real_dev,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
|
|
|
|
|
|
|
|
if (!rxd) {
|
|
|
|
int err;
|
|
|
|
|
|
|
|
rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
|
|
|
|
if (!rxd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&rxd->secys);
|
|
|
|
|
|
|
|
err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
|
|
|
|
rxd);
|
2016-04-22 09:28:06 +00:00
|
|
|
if (err < 0) {
|
|
|
|
kfree(rxd);
|
2016-03-11 17:07:33 +00:00
|
|
|
return err;
|
2016-04-22 09:28:06 +00:00
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
list_add_tail_rcu(&macsec->secys, &rxd->secys);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool sci_exists(struct net_device *dev, sci_t sci)
|
|
|
|
{
|
|
|
|
struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
|
|
|
|
struct macsec_dev *macsec;
|
|
|
|
|
|
|
|
list_for_each_entry(macsec, &rxd->secys, secys) {
|
|
|
|
if (macsec->secy.sci == sci)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-08-17 12:54:36 +00:00
|
|
|
static sci_t dev_to_sci(struct net_device *dev, __be16 port)
|
|
|
|
{
|
|
|
|
return make_sci(dev->dev_addr, port);
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
|
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
|
|
|
struct macsec_secy *secy = &macsec->secy;
|
|
|
|
|
|
|
|
macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
|
|
|
|
if (!macsec->stats)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
|
2023-06-13 19:22:20 +00:00
|
|
|
if (!secy->tx_sc.stats)
|
2016-03-11 17:07:33 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-09-06 05:21:13 +00:00
|
|
|
secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
|
2023-06-13 19:22:20 +00:00
|
|
|
if (!secy->tx_sc.md_dst)
|
|
|
|
/* macsec and secy percpu stats will be freed when unregistering
|
|
|
|
* net_device in macsec_free_netdev()
|
|
|
|
*/
|
2022-09-06 05:21:13 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (sci == MACSEC_UNDEF_SCI)
|
|
|
|
sci = dev_to_sci(dev, MACSEC_PORT_ES);
|
|
|
|
|
|
|
|
secy->netdev = dev;
|
|
|
|
secy->operational = true;
|
|
|
|
secy->key_len = DEFAULT_SAK_LEN;
|
|
|
|
secy->icv_len = icv_len;
|
|
|
|
secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
|
|
|
|
secy->protect_frames = true;
|
|
|
|
secy->replay_protect = false;
|
2020-03-09 19:47:02 +00:00
|
|
|
secy->xpn = DEFAULT_XPN;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
secy->sci = sci;
|
2022-09-06 05:21:13 +00:00
|
|
|
secy->tx_sc.md_dst->u.macsec_info.sci = sci;
|
2016-03-11 17:07:33 +00:00
|
|
|
secy->tx_sc.active = true;
|
|
|
|
secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
|
|
|
|
secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
|
|
|
|
secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
|
|
|
|
secy->tx_sc.end_station = false;
|
|
|
|
secy->tx_sc.scb = false;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-08 21:53:01 +00:00
|
|
|
static struct lock_class_key macsec_netdev_addr_lock_key;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static int macsec_newlink(struct net *net, struct net_device *dev,
|
2017-06-25 21:55:59 +00:00
|
|
|
struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
|
|
|
struct macsec_dev *macsec = macsec_priv(dev);
|
2020-04-23 13:40:47 +00:00
|
|
|
rx_handler_func_t *rx_handler;
|
2022-09-06 05:21:15 +00:00
|
|
|
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
|
2016-03-11 17:07:33 +00:00
|
|
|
struct net_device *real_dev;
|
2020-04-23 13:40:47 +00:00
|
|
|
int err, mtu;
|
2016-03-11 17:07:33 +00:00
|
|
|
sci_t sci;
|
|
|
|
|
|
|
|
if (!tb[IFLA_LINK])
|
|
|
|
return -EINVAL;
|
|
|
|
real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
|
|
|
|
if (!real_dev)
|
|
|
|
return -ENODEV;
|
2020-03-22 17:51:13 +00:00
|
|
|
if (real_dev->type != ARPHRD_ETHER)
|
|
|
|
return -EINVAL;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
dev->priv_flags |= IFF_MACSEC;
|
|
|
|
|
|
|
|
macsec->real_dev = real_dev;
|
|
|
|
|
2020-03-25 13:01:34 +00:00
|
|
|
if (data && data[IFLA_MACSEC_OFFLOAD])
|
|
|
|
macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
|
|
|
|
else
|
|
|
|
/* MACsec offloading is off by default */
|
|
|
|
macsec->offload = MACSEC_OFFLOAD_OFF;
|
|
|
|
|
|
|
|
/* Check if the offloading mode is supported by the underlying layers */
|
|
|
|
if (macsec->offload != MACSEC_OFFLOAD_OFF &&
|
|
|
|
!macsec_check_offload(macsec->offload, macsec))
|
|
|
|
return -EOPNOTSUPP;
|
2020-01-13 22:31:43 +00:00
|
|
|
|
2022-01-30 11:37:52 +00:00
|
|
|
/* send_sci must be set to true when transmit sci explicitly is set */
|
|
|
|
if ((data && data[IFLA_MACSEC_SCI]) &&
|
|
|
|
(data && data[IFLA_MACSEC_INC_SCI])) {
|
|
|
|
u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
|
|
|
|
|
|
|
|
if (!send_sci)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
if (data && data[IFLA_MACSEC_ICV_LEN])
|
|
|
|
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
|
2020-04-23 13:40:47 +00:00
|
|
|
mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
|
|
|
|
if (mtu < 0)
|
|
|
|
dev->mtu = 0;
|
|
|
|
else
|
|
|
|
dev->mtu = mtu;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
rx_handler = rtnl_dereference(real_dev->rx_handler);
|
|
|
|
if (rx_handler && rx_handler != macsec_handle_frame)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
err = register_netdevice(dev);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2020-05-03 05:22:19 +00:00
|
|
|
netdev_lockdep_set_classes(dev);
|
2020-06-26 18:24:22 +00:00
|
|
|
lockdep_set_class(&dev->addr_list_lock,
|
|
|
|
&macsec_netdev_addr_lock_key);
|
2020-05-03 05:22:19 +00:00
|
|
|
|
2017-10-05 00:48:47 +00:00
|
|
|
err = netdev_upper_dev_link(real_dev, dev, extack);
|
2016-08-12 14:10:32 +00:00
|
|
|
if (err < 0)
|
2018-04-16 10:17:50 +00:00
|
|
|
goto unregister;
|
2016-08-12 14:10:32 +00:00
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
/* need to be already registered so that ->init has run and
|
|
|
|
* the MAC addr is set
|
|
|
|
*/
|
|
|
|
if (data && data[IFLA_MACSEC_SCI])
|
|
|
|
sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
|
|
|
|
else if (data && data[IFLA_MACSEC_PORT])
|
|
|
|
sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
|
|
|
|
else
|
|
|
|
sci = dev_to_sci(dev, MACSEC_PORT_ES);
|
|
|
|
|
|
|
|
if (rx_handler && sci_exists(real_dev, sci)) {
|
|
|
|
err = -EBUSY;
|
2016-08-12 14:10:32 +00:00
|
|
|
goto unlink;
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = macsec_add_dev(dev, sci, icv_len);
|
|
|
|
if (err)
|
2016-08-12 14:10:32 +00:00
|
|
|
goto unlink;
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2018-01-05 13:33:31 +00:00
|
|
|
if (data) {
|
|
|
|
err = macsec_changelink_common(dev, data);
|
|
|
|
if (err)
|
|
|
|
goto del_dev;
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2020-03-25 13:01:34 +00:00
|
|
|
/* If h/w offloading is available, propagate to the device */
|
|
|
|
if (macsec_is_offloaded(macsec)) {
|
|
|
|
const struct macsec_ops *ops;
|
|
|
|
struct macsec_context ctx;
|
|
|
|
|
|
|
|
ops = macsec_get_ops(macsec, &ctx);
|
|
|
|
if (ops) {
|
|
|
|
ctx.secy = &macsec->secy;
|
|
|
|
err = macsec_offload(ops->mdo_add_secy, &ctx);
|
|
|
|
if (err)
|
|
|
|
goto del_dev;
|
2023-12-19 14:53:30 +00:00
|
|
|
|
|
|
|
macsec->insert_tx_tag =
|
|
|
|
macsec_needs_tx_tag(macsec, ops);
|
2020-03-25 13:01:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
err = register_macsec_dev(real_dev, dev);
|
|
|
|
if (err < 0)
|
|
|
|
goto del_dev;
|
|
|
|
|
2018-10-28 08:33:09 +00:00
|
|
|
netif_stacked_transfer_operstate(real_dev, dev);
|
|
|
|
linkwatch_fire_event(dev);
|
|
|
|
|
2016-04-22 09:28:05 +00:00
|
|
|
macsec_generation++;
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
del_dev:
|
|
|
|
macsec_del_dev(macsec);
|
2016-08-12 14:10:32 +00:00
|
|
|
unlink:
|
|
|
|
netdev_upper_dev_unlink(real_dev, dev);
|
2018-04-16 10:17:50 +00:00
|
|
|
unregister:
|
2016-03-11 17:07:33 +00:00
|
|
|
unregister_netdevice(dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-06-25 21:56:01 +00:00
|
|
|
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-03-11 17:07:33 +00:00
|
|
|
{
|
2016-04-22 09:28:08 +00:00
|
|
|
u64 csid = MACSEC_DEFAULT_CIPHER_ID;
|
2022-09-06 05:21:15 +00:00
|
|
|
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
|
2016-03-11 17:07:33 +00:00
|
|
|
int flag;
|
|
|
|
bool es, scb, sci;
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_CIPHER_SUITE])
|
|
|
|
csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
|
|
|
|
|
2016-07-22 13:07:58 +00:00
|
|
|
if (data[IFLA_MACSEC_ICV_LEN]) {
|
2016-03-11 17:07:33 +00:00
|
|
|
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
|
2022-09-06 05:21:15 +00:00
|
|
|
if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
|
2016-07-22 13:07:58 +00:00
|
|
|
char dummy_key[DEFAULT_SAK_LEN] = { 0 };
|
|
|
|
struct crypto_aead *dummy_tfm;
|
|
|
|
|
|
|
|
dummy_tfm = macsec_alloc_tfm(dummy_key,
|
|
|
|
DEFAULT_SAK_LEN,
|
|
|
|
icv_len);
|
|
|
|
if (IS_ERR(dummy_tfm))
|
|
|
|
return PTR_ERR(dummy_tfm);
|
|
|
|
crypto_free_aead(dummy_tfm);
|
|
|
|
}
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
switch (csid) {
|
2018-01-05 13:33:31 +00:00
|
|
|
case MACSEC_CIPHER_ID_GCM_AES_128:
|
|
|
|
case MACSEC_CIPHER_ID_GCM_AES_256:
|
2020-03-09 19:47:02 +00:00
|
|
|
case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
|
|
|
|
case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
|
2018-01-18 16:48:18 +00:00
|
|
|
case MACSEC_DEFAULT_CIPHER_ID:
|
2016-03-11 17:07:33 +00:00
|
|
|
if (icv_len < MACSEC_MIN_ICV_LEN ||
|
2016-07-22 13:07:56 +00:00
|
|
|
icv_len > MACSEC_STD_ICV_LEN)
|
2016-03-11 17:07:33 +00:00
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_ENCODING_SA]) {
|
|
|
|
if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (flag = IFLA_MACSEC_ENCODING_SA + 1;
|
|
|
|
flag < IFLA_MACSEC_VALIDATION;
|
|
|
|
flag++) {
|
|
|
|
if (data[flag]) {
|
|
|
|
if (nla_get_u8(data[flag]) > 1)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
|
|
|
|
sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
|
|
|
|
scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
|
|
|
|
|
|
|
|
if ((sci && (scb || es)) || (scb && es))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (data[IFLA_MACSEC_VALIDATION] &&
|
|
|
|
nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-04-22 09:28:09 +00:00
|
|
|
if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
|
|
|
|
nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
|
2016-03-11 17:07:33 +00:00
|
|
|
!data[IFLA_MACSEC_WINDOW])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct net *macsec_get_link_net(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return dev_net(macsec_priv(dev)->real_dev);
|
|
|
|
}
|
|
|
|
|
2022-05-02 11:40:56 +00:00
|
|
|
struct net_device *macsec_get_real_dev(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return macsec_priv(dev)->real_dev;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(macsec_get_real_dev);
|
|
|
|
|
|
|
|
bool macsec_netdev_is_offloaded(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return macsec_is_offloaded(macsec_priv(dev));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(macsec_netdev_is_offloaded);
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
static size_t macsec_get_size(const struct net_device *dev)
|
|
|
|
{
|
2016-12-07 15:02:09 +00:00
|
|
|
return nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
|
|
|
|
nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
|
|
|
|
nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_ES */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_SCB */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
|
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
|
2023-01-11 15:02:10 +00:00
|
|
|
nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
|
2016-03-11 17:07:33 +00:00
|
|
|
0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_fill_info(struct sk_buff *skb,
|
|
|
|
const struct net_device *dev)
|
|
|
|
{
|
2023-01-11 15:02:10 +00:00
|
|
|
struct macsec_tx_sc *tx_sc;
|
|
|
|
struct macsec_dev *macsec;
|
|
|
|
struct macsec_secy *secy;
|
2018-01-05 13:33:31 +00:00
|
|
|
u64 csid;
|
|
|
|
|
2023-01-11 15:02:10 +00:00
|
|
|
macsec = macsec_priv(dev);
|
|
|
|
secy = &macsec->secy;
|
|
|
|
tx_sc = &secy->tx_sc;
|
|
|
|
|
2018-01-05 13:33:31 +00:00
|
|
|
switch (secy->key_len) {
|
|
|
|
case MACSEC_GCM_AES_128_SAK_LEN:
|
2020-03-09 19:47:02 +00:00
|
|
|
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
|
2018-01-05 13:33:31 +00:00
|
|
|
break;
|
|
|
|
case MACSEC_GCM_AES_256_SAK_LEN:
|
2020-03-09 19:47:02 +00:00
|
|
|
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
|
2018-01-05 13:33:31 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
|
2016-04-26 08:06:11 +00:00
|
|
|
if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
|
|
|
|
IFLA_MACSEC_PAD) ||
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
|
2016-04-26 08:06:11 +00:00
|
|
|
nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
|
2018-01-05 13:33:31 +00:00
|
|
|
csid, IFLA_MACSEC_PAD) ||
|
2016-03-11 17:07:33 +00:00
|
|
|
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
|
|
|
|
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
|
|
|
|
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
|
|
|
|
nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
|
|
|
|
nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
|
|
|
|
nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
|
|
|
|
nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
|
|
|
|
nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
|
2023-01-11 15:02:10 +00:00
|
|
|
nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
|
2016-03-11 17:07:33 +00:00
|
|
|
0)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (secy->replay_protect) {
|
|
|
|
if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
|
|
|
|
goto nla_put_failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
return -EMSGSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rtnl_link_ops macsec_link_ops __read_mostly = {
|
|
|
|
.kind = "macsec",
|
|
|
|
.priv_size = sizeof(struct macsec_dev),
|
|
|
|
.maxtype = IFLA_MACSEC_MAX,
|
|
|
|
.policy = macsec_rtnl_policy,
|
|
|
|
.setup = macsec_setup,
|
|
|
|
.validate = macsec_validate_attr,
|
|
|
|
.newlink = macsec_newlink,
|
|
|
|
.changelink = macsec_changelink,
|
|
|
|
.dellink = macsec_dellink,
|
|
|
|
.get_size = macsec_get_size,
|
|
|
|
.fill_info = macsec_fill_info,
|
|
|
|
.get_link_net = macsec_get_link_net,
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool is_macsec_master(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int macsec_notify(struct notifier_block *this, unsigned long event,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
LIST_HEAD(head);
|
|
|
|
|
|
|
|
if (!is_macsec_master(real_dev))
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
switch (event) {
|
2018-10-28 08:33:09 +00:00
|
|
|
case NETDEV_DOWN:
|
|
|
|
case NETDEV_UP:
|
|
|
|
case NETDEV_CHANGE: {
|
|
|
|
struct macsec_dev *m, *n;
|
|
|
|
struct macsec_rxh_data *rxd;
|
|
|
|
|
|
|
|
rxd = macsec_data_rtnl(real_dev);
|
|
|
|
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
|
|
|
|
struct net_device *dev = m->secy.netdev;
|
|
|
|
|
|
|
|
netif_stacked_transfer_operstate(real_dev, dev);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-03-11 17:07:33 +00:00
|
|
|
case NETDEV_UNREGISTER: {
|
|
|
|
struct macsec_dev *m, *n;
|
|
|
|
struct macsec_rxh_data *rxd;
|
|
|
|
|
|
|
|
rxd = macsec_data_rtnl(real_dev);
|
|
|
|
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
|
2016-08-11 13:24:27 +00:00
|
|
|
macsec_common_dellink(m->secy.netdev, &head);
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
2016-08-11 13:24:27 +00:00
|
|
|
|
|
|
|
netdev_rx_handler_unregister(real_dev);
|
|
|
|
kfree(rxd);
|
|
|
|
|
2016-03-11 17:07:33 +00:00
|
|
|
unregister_netdevice_many(&head);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case NETDEV_CHANGEMTU: {
|
|
|
|
struct macsec_dev *m;
|
|
|
|
struct macsec_rxh_data *rxd;
|
|
|
|
|
|
|
|
rxd = macsec_data_rtnl(real_dev);
|
|
|
|
list_for_each_entry(m, &rxd->secys, secys) {
|
|
|
|
struct net_device *dev = m->secy.netdev;
|
|
|
|
unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
|
|
|
|
macsec_extra_len(true));
|
|
|
|
|
|
|
|
if (dev->mtu > mtu)
|
|
|
|
dev_set_mtu(dev, mtu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block macsec_notifier = {
|
|
|
|
.notifier_call = macsec_notify,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init macsec_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
pr_info("MACsec IEEE 802.1AE\n");
|
|
|
|
err = register_netdevice_notifier(&macsec_notifier);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = rtnl_link_register(&macsec_link_ops);
|
|
|
|
if (err)
|
|
|
|
goto notifier;
|
|
|
|
|
2016-10-24 12:40:03 +00:00
|
|
|
err = genl_register_family(&macsec_fam);
|
2016-03-11 17:07:33 +00:00
|
|
|
if (err)
|
|
|
|
goto rtnl;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rtnl:
|
|
|
|
rtnl_link_unregister(&macsec_link_ops);
|
|
|
|
notifier:
|
|
|
|
unregister_netdevice_notifier(&macsec_notifier);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit macsec_exit(void)
|
|
|
|
{
|
|
|
|
genl_unregister_family(&macsec_fam);
|
|
|
|
rtnl_link_unregister(&macsec_link_ops);
|
|
|
|
unregister_netdevice_notifier(&macsec_notifier);
|
2016-06-14 13:25:14 +00:00
|
|
|
rcu_barrier();
|
2016-03-11 17:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(macsec_init);
|
|
|
|
module_exit(macsec_exit);
|
|
|
|
|
|
|
|
MODULE_ALIAS_RTNL_LINK("macsec");
|
2017-08-22 13:36:08 +00:00
|
|
|
MODULE_ALIAS_GENL_FAMILY("macsec");
|
2016-03-11 17:07:33 +00:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
|
|
|
|
MODULE_LICENSE("GPL v2");
|