mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Unbalanced locking in mwifiex_process_country_ie, from Brian Norris. 2) Fix thermal zone registration in iwlwifi, from Andrei Otcheretianski. 3) Fix double free_irq in sgi ioc3 eth, from Thomas Bogendoerfer. 4) Use after free in mptcp, from Florian Westphal. 5) Use after free in wireguard's root_remove_peer_lists, from Eric Dumazet. 6) Properly access packets heads in bonding alb code, from Eric Dumazet. 7) Fix data race in skb_queue_len(), from Qian Cai. 8) Fix regression in r8169 on some chips, from Heiner Kallweit. 9) Fix XDP program ref counting in hv_netvsc, from Haiyang Zhang. 10) Certain kinds of set link netlink operations can cause a NULL deref in the ipv6 addrconf code. Fix from Eric Dumazet. 11) Don't cancel uninitialized work queue in drop monitor, from Ido Schimmel. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits) net: thunderx: use proper interface type for RGMII mt76: mt7615: fix max_nss in mt7615_eeprom_parse_hw_cap bpf: Improve bucket_log calculation logic selftests/bpf: Test freeing sockmap/sockhash with a socket in it bpf, sockhash: Synchronize_rcu before free'ing map bpf, sockmap: Don't sleep while holding RCU lock on tear-down bpftool: Don't crash on missing xlated program instructions bpf, sockmap: Check update requirements after locking drop_monitor: Do not cancel uninitialized work item mlxsw: spectrum_dpipe: Add missing error path mlxsw: core: Add validation of hardware device types for MGPIR register mlxsw: spectrum_router: Clear offload indication from IPv6 nexthops on abort selftests: mlxsw: Add test cases for local table route replacement mlxsw: spectrum_router: Prevent incorrect replacement of local table routes net: dsa: microchip: enable module autoprobe ipv6/addrconf: fix potential NULL deref in inet6_set_link_af() dpaa_eth: support all modes with rate adapting PHYs net: stmmac: update pci platform data to use phy_interface net: stmmac: xgmac: fix missing IFF_MULTICAST checki in dwxgmac2_set_filter net: stmmac: fix missing IFF_MULTICAST check in dwmac4_set_filter ...
This commit is contained in:
commit
291abfea47
@ -1383,26 +1383,31 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
||||
bool do_tx_balance = true;
|
||||
u32 hash_index = 0;
|
||||
const u8 *hash_start = NULL;
|
||||
struct ipv6hdr *ip6hdr;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
eth_data = eth_hdr(skb);
|
||||
|
||||
switch (ntohs(skb->protocol)) {
|
||||
case ETH_P_IP: {
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
const struct iphdr *iph;
|
||||
|
||||
if (is_broadcast_ether_addr(eth_data->h_dest) ||
|
||||
iph->daddr == ip_bcast ||
|
||||
iph->protocol == IPPROTO_IGMP) {
|
||||
!pskb_network_may_pull(skb, sizeof(*iph))) {
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
iph = ip_hdr(skb);
|
||||
if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
hash_start = (char *)&(iph->daddr);
|
||||
hash_size = sizeof(iph->daddr);
|
||||
}
|
||||
break;
|
||||
case ETH_P_IPV6:
|
||||
}
|
||||
case ETH_P_IPV6: {
|
||||
const struct ipv6hdr *ip6hdr;
|
||||
|
||||
/* IPv6 doesn't really use broadcast mac address, but leave
|
||||
* that here just in case.
|
||||
*/
|
||||
@ -1419,7 +1424,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Additianally, DAD probes should not be tx-balanced as that
|
||||
if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
/* Additionally, DAD probes should not be tx-balanced as that
|
||||
* will lead to false positives for duplicate addresses and
|
||||
* prevent address configuration from working.
|
||||
*/
|
||||
@ -1429,17 +1438,26 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
||||
break;
|
||||
}
|
||||
|
||||
hash_start = (char *)&(ipv6_hdr(skb)->daddr);
|
||||
hash_size = sizeof(ipv6_hdr(skb)->daddr);
|
||||
hash_start = (char *)&ip6hdr->daddr;
|
||||
hash_size = sizeof(ip6hdr->daddr);
|
||||
break;
|
||||
case ETH_P_IPX:
|
||||
if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
|
||||
}
|
||||
case ETH_P_IPX: {
|
||||
const struct ipxhdr *ipxhdr;
|
||||
|
||||
if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
ipxhdr = (struct ipxhdr *)skb_network_header(skb);
|
||||
|
||||
if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
|
||||
/* something is wrong with this packet */
|
||||
do_tx_balance = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
|
||||
if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
|
||||
/* The only protocol worth balancing in
|
||||
* this family since it has an "ARP" like
|
||||
* mechanism
|
||||
@ -1448,9 +1466,11 @@ netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
|
||||
break;
|
||||
}
|
||||
|
||||
eth_data = eth_hdr(skb);
|
||||
hash_start = (char *)eth_data->h_dest;
|
||||
hash_size = ETH_ALEN;
|
||||
break;
|
||||
}
|
||||
case ETH_P_ARP:
|
||||
do_tx_balance = false;
|
||||
if (bond_info->rlb_enabled)
|
||||
|
@ -693,7 +693,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
|
||||
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
|
||||
}
|
||||
|
||||
b53_enable_vlan(dev, false, ds->vlan_filtering);
|
||||
b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering);
|
||||
|
||||
b53_for_each_port(dev, i)
|
||||
b53_write16(dev, B53_VLAN_PAGE,
|
||||
|
@ -68,7 +68,9 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
|
||||
|
||||
/* Force link status for IMP port */
|
||||
reg = core_readl(priv, offset);
|
||||
reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G);
|
||||
reg |= (MII_SW_OR | LINK_STS);
|
||||
if (priv->type == BCM7278_DEVICE_ID)
|
||||
reg |= GMII_SPEED_UP_2G;
|
||||
core_writel(priv, reg, offset);
|
||||
|
||||
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
|
||||
|
@ -101,6 +101,12 @@ static struct spi_driver ksz9477_spi_driver = {
|
||||
|
||||
module_spi_driver(ksz9477_spi_driver);
|
||||
|
||||
MODULE_ALIAS("spi:ksz9477");
|
||||
MODULE_ALIAS("spi:ksz9897");
|
||||
MODULE_ALIAS("spi:ksz9893");
|
||||
MODULE_ALIAS("spi:ksz9563");
|
||||
MODULE_ALIAS("spi:ksz8563");
|
||||
MODULE_ALIAS("spi:ksz9567");
|
||||
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
|
||||
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -2736,6 +2736,9 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
|
||||
|
||||
umac_reset(priv);
|
||||
|
||||
/* Disable the UniMAC RX/TX */
|
||||
umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
|
||||
|
||||
/* We may have been suspended and never received a WOL event that
|
||||
* would turn off MPD detection, take care of that now
|
||||
*/
|
||||
|
@ -73,7 +73,11 @@ struct sifive_fu540_macb_mgmt {
|
||||
/* Max length of transmit frame must be a multiple of 8 bytes */
|
||||
#define MACB_TX_LEN_ALIGN 8
|
||||
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
|
||||
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
|
||||
/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
|
||||
* false amba_error in TX path from the DMA assuming there is not enough
|
||||
* space in the SRAM (16KB) even when there is.
|
||||
*/
|
||||
#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
|
||||
|
||||
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
|
||||
#define MACB_NETIF_LSO NETIF_F_TSO
|
||||
@ -1791,16 +1795,14 @@ static netdev_features_t macb_features_check(struct sk_buff *skb,
|
||||
|
||||
/* Validate LSO compatibility */
|
||||
|
||||
/* there is only one buffer */
|
||||
if (!skb_is_nonlinear(skb))
|
||||
/* there is only one buffer or protocol is not UDP */
|
||||
if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
|
||||
return features;
|
||||
|
||||
/* length of header */
|
||||
hdrlen = skb_transport_offset(skb);
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
||||
hdrlen += tcp_hdrlen(skb);
|
||||
|
||||
/* For LSO:
|
||||
/* For UFO only:
|
||||
* When software supplies two or more payload buffers all payload buffers
|
||||
* apart from the last must be a multiple of 8 bytes in size.
|
||||
*/
|
||||
|
@ -1039,7 +1039,7 @@ static int phy_interface_mode(u8 lmac_type)
|
||||
if (lmac_type == BGX_MODE_QSGMII)
|
||||
return PHY_INTERFACE_MODE_QSGMII;
|
||||
if (lmac_type == BGX_MODE_RGMII)
|
||||
return PHY_INTERFACE_MODE_RGMII;
|
||||
return PHY_INTERFACE_MODE_RGMII_RXID;
|
||||
|
||||
return PHY_INTERFACE_MODE_SGMII;
|
||||
}
|
||||
|
@ -3403,6 +3403,13 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
|
||||
atomic_read(&adap->chcr_stats.fallback));
|
||||
seq_printf(seq, "IPSec PDU: %10u\n",
|
||||
atomic_read(&adap->chcr_stats.ipsec_cnt));
|
||||
seq_printf(seq, "TLS PDU Tx: %10u\n",
|
||||
atomic_read(&adap->chcr_stats.tls_pdu_tx));
|
||||
seq_printf(seq, "TLS PDU Rx: %10u\n",
|
||||
atomic_read(&adap->chcr_stats.tls_pdu_rx));
|
||||
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
|
||||
atomic_read(&adap->chcr_stats.tls_key));
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(chcr_stats);
|
||||
|
@ -417,7 +417,10 @@ static void de_rx (struct de_private *de)
|
||||
if (status & DescOwn)
|
||||
break;
|
||||
|
||||
len = ((status >> 16) & 0x7ff) - 4;
|
||||
/* the length is actually a 15 bit value here according
|
||||
* to Table 4-1 in the DE2104x spec so mask is 0x7fff
|
||||
*/
|
||||
len = ((status >> 16) & 0x7fff) - 4;
|
||||
mapping = de->rx_skb[rx_tail].mapping;
|
||||
|
||||
if (unlikely(drop)) {
|
||||
|
@ -2453,6 +2453,9 @@ static void dpaa_adjust_link(struct net_device *net_dev)
|
||||
mac_dev->adjust_link(mac_dev);
|
||||
}
|
||||
|
||||
/* The Aquantia PHYs are capable of performing rate adaptation */
|
||||
#define PHY_VEND_AQUANTIA 0x03a1b400
|
||||
|
||||
static int dpaa_phy_init(struct net_device *net_dev)
|
||||
{
|
||||
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
|
||||
@ -2471,9 +2474,14 @@ static int dpaa_phy_init(struct net_device *net_dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Remove any features not supported by the controller */
|
||||
ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
|
||||
linkmode_and(phy_dev->supported, phy_dev->supported, mask);
|
||||
/* Unless the PHY is capable of rate adaptation */
|
||||
if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
|
||||
((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
|
||||
/* remove any features not supported by the controller */
|
||||
ethtool_convert_legacy_u32_to_link_mode(mask,
|
||||
mac_dev->if_support);
|
||||
linkmode_and(phy_dev->supported, phy_dev->supported, mask);
|
||||
}
|
||||
|
||||
phy_support_asym_pause(phy_dev);
|
||||
|
||||
|
@ -791,7 +791,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
|
||||
struct i40e_ring *ring;
|
||||
|
||||
if (test_bit(__I40E_CONFIG_BUSY, pf->state))
|
||||
return -ENETDOWN;
|
||||
return -EAGAIN;
|
||||
|
||||
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
||||
return -ENETDOWN;
|
||||
|
@ -401,6 +401,8 @@ struct mvneta_pcpu_stats {
|
||||
struct u64_stats_sync syncp;
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 rx_dropped;
|
||||
u64 rx_errors;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
};
|
||||
@ -738,6 +740,8 @@ mvneta_get_stats64(struct net_device *dev,
|
||||
struct mvneta_pcpu_stats *cpu_stats;
|
||||
u64 rx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 rx_dropped;
|
||||
u64 rx_errors;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
|
||||
@ -746,19 +750,20 @@ mvneta_get_stats64(struct net_device *dev,
|
||||
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||
rx_packets = cpu_stats->rx_packets;
|
||||
rx_bytes = cpu_stats->rx_bytes;
|
||||
rx_dropped = cpu_stats->rx_dropped;
|
||||
rx_errors = cpu_stats->rx_errors;
|
||||
tx_packets = cpu_stats->tx_packets;
|
||||
tx_bytes = cpu_stats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
|
||||
|
||||
stats->rx_packets += rx_packets;
|
||||
stats->rx_bytes += rx_bytes;
|
||||
stats->rx_dropped += rx_dropped;
|
||||
stats->rx_errors += rx_errors;
|
||||
stats->tx_packets += tx_packets;
|
||||
stats->tx_bytes += tx_bytes;
|
||||
}
|
||||
|
||||
stats->rx_errors = dev->stats.rx_errors;
|
||||
stats->rx_dropped = dev->stats.rx_dropped;
|
||||
|
||||
stats->tx_dropped = dev->stats.tx_dropped;
|
||||
}
|
||||
|
||||
@ -1736,8 +1741,14 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
|
||||
static void mvneta_rx_error(struct mvneta_port *pp,
|
||||
struct mvneta_rx_desc *rx_desc)
|
||||
{
|
||||
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
||||
u32 status = rx_desc->status;
|
||||
|
||||
/* update per-cpu counter */
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->rx_errors++;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
|
||||
switch (status & MVNETA_RXD_ERR_CODE_MASK) {
|
||||
case MVNETA_RXD_ERR_CRC:
|
||||
netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
|
||||
@ -2179,11 +2190,15 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
|
||||
|
||||
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
|
||||
if (unlikely(!rxq->skb)) {
|
||||
netdev_err(dev,
|
||||
"Can't allocate skb on queue %d\n",
|
||||
rxq->id);
|
||||
dev->stats.rx_dropped++;
|
||||
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
||||
|
||||
netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
|
||||
rxq->skb_alloc_err++;
|
||||
|
||||
u64_stats_update_begin(&stats->syncp);
|
||||
stats->rx_dropped++;
|
||||
u64_stats_update_end(&stats->syncp);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
page_pool_release_page(rxq->page_pool, page);
|
||||
@ -2270,7 +2285,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
|
||||
/* Check errors only for FIRST descriptor */
|
||||
if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
|
||||
mvneta_rx_error(pp, rx_desc);
|
||||
dev->stats.rx_errors++;
|
||||
/* leave the descriptor untouched */
|
||||
continue;
|
||||
}
|
||||
@ -2372,7 +2386,6 @@ err_drop_frame_ret_pool:
|
||||
mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
|
||||
rx_desc->buf_phys_addr);
|
||||
err_drop_frame:
|
||||
dev->stats.rx_errors++;
|
||||
mvneta_rx_error(pp, rx_desc);
|
||||
/* leave the descriptor untouched */
|
||||
continue;
|
||||
|
@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
|
||||
|
||||
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
if (!MLX5_CAP_GEN(mdev, tls))
|
||||
if (!MLX5_CAP_GEN(mdev, tls_tx))
|
||||
return false;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, log_max_dek))
|
||||
|
@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
|
||||
int datalen;
|
||||
u32 skb_seq;
|
||||
|
||||
if (MLX5_CAP_GEN(sq->channel->mdev, tls)) {
|
||||
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
|
||||
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
|
||||
goto out;
|
||||
}
|
||||
|
@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
|
||||
wqe_counter = be16_to_cpu(cqe->wqe_counter);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(cq->channel->priv->wq, &sq->recover_work);
|
||||
break;
|
||||
}
|
||||
do {
|
||||
struct mlx5e_sq_wqe_info *wi;
|
||||
u16 ci;
|
||||
@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.ico_wqe[ci];
|
||||
|
||||
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OP in ICOSQ CQE: 0x%x\n",
|
||||
get_cqe_opcode(cqe));
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(cq->channel->priv->wq, &sq->recover_work);
|
||||
break;
|
||||
}
|
||||
|
||||
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
|
||||
sqcc += MLX5E_UMR_WQEBBS;
|
||||
wi->umr.rq->mpwqe.umr_completed++;
|
||||
|
@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
|
||||
i = 0;
|
||||
do {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
u16 wqe_counter;
|
||||
bool last_wqe;
|
||||
u16 ci;
|
||||
|
||||
mlx5_cqwq_pop(&cq->wq);
|
||||
|
||||
wqe_counter = be16_to_cpu(cqe->wqe_counter);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
|
||||
&sq->state)) {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
u16 ci;
|
||||
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.wqe_info[ci];
|
||||
mlx5e_dump_error_cqe(sq,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
|
||||
queue_work(cq->channel->priv->wq,
|
||||
&sq->recover_work);
|
||||
}
|
||||
stats->cqe_err++;
|
||||
}
|
||||
|
||||
do {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
struct sk_buff *skb;
|
||||
u16 ci;
|
||||
int j;
|
||||
|
||||
last_wqe = (sqcc == wqe_counter);
|
||||
@ -516,6 +499,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
napi_consume_skb(skb, napi_budget);
|
||||
} while (!last_wqe);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
|
||||
&sq->state)) {
|
||||
mlx5e_dump_error_cqe(sq,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
|
||||
queue_work(cq->channel->priv->wq,
|
||||
&sq->recover_work);
|
||||
}
|
||||
stats->cqe_err++;
|
||||
}
|
||||
|
||||
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
||||
|
||||
stats->cqes += i;
|
||||
|
@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
|
||||
mutex_lock(&fpga_xfrm->lock);
|
||||
if (!--fpga_xfrm->num_rules) {
|
||||
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
|
||||
kfree(fpga_xfrm->sa_ctx);
|
||||
fpga_xfrm->sa_ctx = NULL;
|
||||
}
|
||||
mutex_unlock(&fpga_xfrm->lock);
|
||||
@ -1478,7 +1479,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
|
||||
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
|
||||
return 0;
|
||||
|
||||
if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
|
||||
if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
|
||||
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -1582,16 +1582,16 @@ struct match_list_head {
|
||||
struct match_list first;
|
||||
};
|
||||
|
||||
static void free_match_list(struct match_list_head *head)
|
||||
static void free_match_list(struct match_list_head *head, bool ft_locked)
|
||||
{
|
||||
if (!list_empty(&head->list)) {
|
||||
struct match_list *iter, *match_tmp;
|
||||
|
||||
list_del(&head->first.list);
|
||||
tree_put_node(&head->first.g->node, false);
|
||||
tree_put_node(&head->first.g->node, ft_locked);
|
||||
list_for_each_entry_safe(iter, match_tmp, &head->list,
|
||||
list) {
|
||||
tree_put_node(&iter->g->node, false);
|
||||
tree_put_node(&iter->g->node, ft_locked);
|
||||
list_del(&iter->list);
|
||||
kfree(iter);
|
||||
}
|
||||
@ -1600,7 +1600,8 @@ static void free_match_list(struct match_list_head *head)
|
||||
|
||||
static int build_match_list(struct match_list_head *match_head,
|
||||
struct mlx5_flow_table *ft,
|
||||
const struct mlx5_flow_spec *spec)
|
||||
const struct mlx5_flow_spec *spec,
|
||||
bool ft_locked)
|
||||
{
|
||||
struct rhlist_head *tmp, *list;
|
||||
struct mlx5_flow_group *g;
|
||||
@ -1625,7 +1626,7 @@ static int build_match_list(struct match_list_head *match_head,
|
||||
|
||||
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
|
||||
if (!curr_match) {
|
||||
free_match_list(match_head);
|
||||
free_match_list(match_head, ft_locked);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -1805,7 +1806,7 @@ search_again_locked:
|
||||
version = atomic_read(&ft->node.version);
|
||||
|
||||
/* Collect all fgs which has a matching match_criteria */
|
||||
err = build_match_list(&match_head, ft, spec);
|
||||
err = build_match_list(&match_head, ft, spec, take_write);
|
||||
if (err) {
|
||||
if (take_write)
|
||||
up_write_ref_node(&ft->node, false);
|
||||
@ -1819,7 +1820,7 @@ search_again_locked:
|
||||
|
||||
rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
|
||||
dest_num, version);
|
||||
free_match_list(&match_head);
|
||||
free_match_list(&match_head, take_write);
|
||||
if (!IS_ERR(rule) ||
|
||||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
|
||||
if (take_write)
|
||||
|
@ -242,7 +242,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, tls)) {
|
||||
if (MLX5_CAP_GEN(dev, tls_tx)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -573,6 +573,7 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
|
||||
|
||||
static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
|
||||
{
|
||||
enum mlxsw_reg_mgpir_device_type device_type;
|
||||
int index, max_index, sensor_index;
|
||||
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
|
||||
char mtmp_pl[MLXSW_REG_MTMP_LEN];
|
||||
@ -584,8 +585,9 @@ static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL);
|
||||
if (!gbox_num)
|
||||
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL);
|
||||
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
|
||||
!gbox_num)
|
||||
return 0;
|
||||
|
||||
index = mlxsw_hwmon->module_sensor_max;
|
||||
|
@ -895,8 +895,10 @@ static int
|
||||
mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
|
||||
struct mlxsw_thermal *thermal)
|
||||
{
|
||||
enum mlxsw_reg_mgpir_device_type device_type;
|
||||
struct mlxsw_thermal_module *gearbox_tz;
|
||||
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
|
||||
u8 gbox_num;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
@ -908,11 +910,13 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL,
|
||||
mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL,
|
||||
NULL);
|
||||
if (!thermal->tz_gearbox_num)
|
||||
if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE ||
|
||||
!gbox_num)
|
||||
return 0;
|
||||
|
||||
thermal->tz_gearbox_num = gbox_num;
|
||||
thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num,
|
||||
sizeof(*thermal->tz_gearbox_arr),
|
||||
GFP_KERNEL);
|
||||
|
@ -215,7 +215,7 @@ mlxsw_sp_dpipe_table_erif_entries_dump(void *priv, bool counters_enabled,
|
||||
start_again:
|
||||
err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_ctx_prepare;
|
||||
j = 0;
|
||||
for (; i < rif_count; i++) {
|
||||
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
|
||||
@ -247,6 +247,7 @@ start_again:
|
||||
return 0;
|
||||
err_entry_append:
|
||||
err_entry_get:
|
||||
err_ctx_prepare:
|
||||
rtnl_unlock();
|
||||
devlink_dpipe_entry_clear(&entry);
|
||||
return err;
|
||||
|
@ -4844,6 +4844,23 @@ mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
|
||||
fib_node->fib_entry = NULL;
|
||||
}
|
||||
|
||||
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
|
||||
{
|
||||
struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
|
||||
struct mlxsw_sp_fib4_entry *fib4_replaced;
|
||||
|
||||
if (!fib_node->fib_entry)
|
||||
return true;
|
||||
|
||||
fib4_replaced = container_of(fib_node->fib_entry,
|
||||
struct mlxsw_sp_fib4_entry, common);
|
||||
if (fib4_entry->tb_id == RT_TABLE_MAIN &&
|
||||
fib4_replaced->tb_id == RT_TABLE_LOCAL)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
const struct fib_entry_notifier_info *fen_info)
|
||||
@ -4872,6 +4889,12 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
goto err_fib4_entry_create;
|
||||
}
|
||||
|
||||
if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
|
||||
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
|
||||
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
replaced = fib_node->fib_entry;
|
||||
err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
|
||||
if (err) {
|
||||
@ -4908,7 +4931,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
|
||||
return;
|
||||
|
||||
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
|
||||
if (WARN_ON(!fib4_entry))
|
||||
if (!fib4_entry)
|
||||
return;
|
||||
fib_node = fib4_entry->common.fib_node;
|
||||
|
||||
@ -4970,6 +4993,9 @@ static void mlxsw_sp_rt6_release(struct fib6_info *rt)
|
||||
|
||||
static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
|
||||
{
|
||||
struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
|
||||
|
||||
fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
|
||||
mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
|
||||
kfree(mlxsw_sp_rt6);
|
||||
}
|
||||
@ -5408,6 +5434,27 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
|
||||
{
|
||||
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
|
||||
struct mlxsw_sp_fib6_entry *fib6_replaced;
|
||||
struct fib6_info *rt, *rt_replaced;
|
||||
|
||||
if (!fib_node->fib_entry)
|
||||
return true;
|
||||
|
||||
fib6_replaced = container_of(fib_node->fib_entry,
|
||||
struct mlxsw_sp_fib6_entry,
|
||||
common);
|
||||
rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
|
||||
rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
|
||||
if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
|
||||
rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
struct fib6_info **rt_arr,
|
||||
unsigned int nrt6)
|
||||
@ -5442,6 +5489,12 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
|
||||
goto err_fib6_entry_create;
|
||||
}
|
||||
|
||||
if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
|
||||
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
|
||||
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
replaced = fib_node->fib_entry;
|
||||
err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
|
||||
if (err)
|
||||
|
@ -44,8 +44,8 @@
|
||||
/* Add/subtract the Adjustment_Value when making a Drift adjustment */
|
||||
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
|
||||
#define QED_TIMESTAMP_MASK BIT(16)
|
||||
/* Param mask for Hardware to detect/timestamp the unicast PTP packets */
|
||||
#define QED_PTP_UCAST_PARAM_MASK 0xF
|
||||
/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
|
||||
#define QED_PTP_UCAST_PARAM_MASK 0x70F
|
||||
|
||||
static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
|
@ -2477,15 +2477,18 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
|
||||
switch (tp->mac_version) {
|
||||
case RTL_GIGA_MAC_VER_12:
|
||||
case RTL_GIGA_MAC_VER_17:
|
||||
pcie_set_readrq(tp->pci_dev, 512);
|
||||
r8168b_1_hw_jumbo_enable(tp);
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
|
||||
pcie_set_readrq(tp->pci_dev, 512);
|
||||
r8168c_hw_jumbo_enable(tp);
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
|
||||
r8168dp_hw_jumbo_enable(tp);
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
|
||||
pcie_set_readrq(tp->pci_dev, 512);
|
||||
r8168e_hw_jumbo_enable(tp);
|
||||
break;
|
||||
default:
|
||||
@ -2515,6 +2518,9 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
|
||||
break;
|
||||
}
|
||||
rtl_lock_config_regs(tp);
|
||||
|
||||
if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
|
||||
pcie_set_readrq(tp->pci_dev, 4096);
|
||||
}
|
||||
|
||||
static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
|
||||
|
@ -823,7 +823,6 @@ static int ioc3_close(struct net_device *dev)
|
||||
netif_stop_queue(dev);
|
||||
|
||||
ioc3_stop(ip);
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
ioc3_free_rx_bufs(ip);
|
||||
ioc3_clean_tx_ring(ip);
|
||||
|
@ -413,6 +413,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
|
||||
dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
|
||||
if (dll_lock & SDC4_STATUS_DLL_LOCK)
|
||||
break;
|
||||
retry--;
|
||||
} while (retry > 0);
|
||||
if (!retry)
|
||||
dev_err(ðqos->pdev->dev,
|
||||
|
@ -420,7 +420,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
|
||||
value |= GMAC_PACKET_FILTER_PM;
|
||||
/* Set all the bits of the HASH tab */
|
||||
memset(mc_filter, 0xff, sizeof(mc_filter));
|
||||
} else if (!netdev_mc_empty(dev)) {
|
||||
} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
/* Hash filter for multicast */
|
||||
@ -736,11 +736,14 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
||||
__le16 perfect_match, bool is_double)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
u32 value;
|
||||
|
||||
writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
|
||||
|
||||
value = readl(ioaddr + GMAC_VLAN_TAG);
|
||||
|
||||
if (hash) {
|
||||
u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
|
||||
value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
|
||||
if (is_double) {
|
||||
value |= GMAC_VLAN_EDVLP;
|
||||
value |= GMAC_VLAN_ESVL;
|
||||
@ -759,8 +762,6 @@ static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
||||
|
||||
writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
|
||||
} else {
|
||||
u32 value = readl(ioaddr + GMAC_VLAN_TAG);
|
||||
|
||||
value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
|
||||
value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
|
||||
value &= ~GMAC_VLAN_DOVLTC;
|
||||
|
@ -458,7 +458,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
|
||||
|
||||
for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
|
||||
writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
|
||||
} else if (!netdev_mc_empty(dev)) {
|
||||
} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
value |= XGMAC_FILTER_HMC;
|
||||
@ -569,7 +569,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
||||
|
||||
writel(value, ioaddr + XGMAC_PACKET_FILTER);
|
||||
|
||||
value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
|
||||
value = readl(ioaddr + XGMAC_VLAN_TAG);
|
||||
|
||||
value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
|
||||
if (is_double) {
|
||||
value |= XGMAC_VLAN_EDVLP;
|
||||
value |= XGMAC_VLAN_ESVL;
|
||||
@ -584,7 +586,9 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
|
||||
|
||||
writel(value, ioaddr + XGMAC_PACKET_FILTER);
|
||||
|
||||
value = XGMAC_VLAN_ETV;
|
||||
value = readl(ioaddr + XGMAC_VLAN_TAG);
|
||||
|
||||
value |= XGMAC_VLAN_ETV;
|
||||
if (is_double) {
|
||||
value |= XGMAC_VLAN_EDVLP;
|
||||
value |= XGMAC_VLAN_ESVL;
|
||||
|
@ -95,7 +95,7 @@ static int stmmac_default_data(struct pci_dev *pdev,
|
||||
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = 0;
|
||||
plat->interface = PHY_INTERFACE_MODE_GMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
|
||||
|
||||
plat->dma_cfg->pbl = 32;
|
||||
plat->dma_cfg->pblx8 = true;
|
||||
@ -217,7 +217,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
|
||||
{
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = 0;
|
||||
plat->interface = PHY_INTERFACE_MODE_SGMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
|
||||
|
||||
return ehl_common_data(pdev, plat);
|
||||
}
|
||||
|
||||
@ -230,7 +231,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
|
||||
{
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = 0;
|
||||
plat->interface = PHY_INTERFACE_MODE_RGMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
|
||||
|
||||
return ehl_common_data(pdev, plat);
|
||||
}
|
||||
|
||||
@ -258,7 +260,7 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
|
||||
{
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = 0;
|
||||
plat->interface = PHY_INTERFACE_MODE_SGMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
|
||||
return tgl_common_data(pdev, plat);
|
||||
}
|
||||
|
||||
@ -358,7 +360,7 @@ static int quark_default_data(struct pci_dev *pdev,
|
||||
|
||||
plat->bus_id = pci_dev_id(pdev);
|
||||
plat->phy_addr = ret;
|
||||
plat->interface = PHY_INTERFACE_MODE_RMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_RMII;
|
||||
|
||||
plat->dma_cfg->pbl = 16;
|
||||
plat->dma_cfg->pblx8 = true;
|
||||
@ -415,7 +417,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
|
||||
|
||||
plat->bus_id = 1;
|
||||
plat->phy_addr = -1;
|
||||
plat->interface = PHY_INTERFACE_MODE_GMII;
|
||||
plat->phy_interface = PHY_INTERFACE_MODE_GMII;
|
||||
|
||||
plat->dma_cfg->pbl = 32;
|
||||
plat->dma_cfg->pblx8 = true;
|
||||
|
@ -120,7 +120,7 @@ int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
}
|
||||
|
||||
if (prog)
|
||||
bpf_prog_add(prog, nvdev->num_chn);
|
||||
bpf_prog_add(prog, nvdev->num_chn - 1);
|
||||
|
||||
for (i = 0; i < nvdev->num_chn; i++)
|
||||
rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
|
||||
@ -136,6 +136,7 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
|
||||
{
|
||||
struct netdev_bpf xdp;
|
||||
bpf_op_t ndo_bpf;
|
||||
int ret;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
@ -148,10 +149,18 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
|
||||
|
||||
memset(&xdp, 0, sizeof(xdp));
|
||||
|
||||
if (prog)
|
||||
bpf_prog_inc(prog);
|
||||
|
||||
xdp.command = XDP_SETUP_PROG;
|
||||
xdp.prog = prog;
|
||||
|
||||
return ndo_bpf(vf_netdev, &xdp);
|
||||
ret = ndo_bpf(vf_netdev, &xdp);
|
||||
|
||||
if (ret && prog)
|
||||
bpf_prog_put(prog);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
|
||||
|
@ -1059,9 +1059,12 @@ static int netvsc_attach(struct net_device *ndev,
|
||||
|
||||
prog = dev_info->bprog;
|
||||
if (prog) {
|
||||
bpf_prog_inc(prog);
|
||||
ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bpf_prog_put(prog);
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
||||
/* In any case device is now ready */
|
||||
|
@ -934,9 +934,7 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
|
||||
int nsim_dev_init(void)
|
||||
{
|
||||
nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
|
||||
if (IS_ERR(nsim_dev_ddir))
|
||||
return PTR_ERR(nsim_dev_ddir);
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(nsim_dev_ddir);
|
||||
}
|
||||
|
||||
void nsim_dev_exit(void)
|
||||
|
@ -263,6 +263,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
|
||||
} else {
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (unlikely(!node)) {
|
||||
list_del(&newnode->peer_list);
|
||||
kfree(newnode);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -569,10 +569,8 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
|
||||
private_key);
|
||||
list_for_each_entry_safe(peer, temp, &wg->peer_list,
|
||||
peer_list) {
|
||||
if (wg_noise_precompute_static_static(peer))
|
||||
wg_noise_expire_current_peer_keypairs(peer);
|
||||
else
|
||||
wg_peer_remove(peer);
|
||||
BUG_ON(!wg_noise_precompute_static_static(peer));
|
||||
wg_noise_expire_current_peer_keypairs(peer);
|
||||
}
|
||||
wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
|
||||
up_write(&wg->static_identity.lock);
|
||||
|
@ -46,17 +46,21 @@ void __init wg_noise_init(void)
|
||||
/* Must hold peer->handshake.static_identity->lock */
|
||||
bool wg_noise_precompute_static_static(struct wg_peer *peer)
|
||||
{
|
||||
bool ret = true;
|
||||
bool ret;
|
||||
|
||||
down_write(&peer->handshake.lock);
|
||||
if (peer->handshake.static_identity->has_identity)
|
||||
if (peer->handshake.static_identity->has_identity) {
|
||||
ret = curve25519(
|
||||
peer->handshake.precomputed_static_static,
|
||||
peer->handshake.static_identity->static_private,
|
||||
peer->handshake.remote_static);
|
||||
else
|
||||
} else {
|
||||
u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 };
|
||||
|
||||
ret = curve25519(empty, empty, peer->handshake.remote_static);
|
||||
memset(peer->handshake.precomputed_static_static, 0,
|
||||
NOISE_PUBLIC_KEY_LEN);
|
||||
}
|
||||
up_write(&peer->handshake.lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1897,27 +1897,55 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
|
||||
ieee80211_resume_disconnect(vif);
|
||||
}
|
||||
|
||||
static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id)
|
||||
{
|
||||
u32 base = mvm->trans->dbg.lmac_error_event_table[0];
|
||||
struct error_table_start {
|
||||
/* cf. struct iwl_error_event_table */
|
||||
u32 valid;
|
||||
u32 error_id;
|
||||
__le32 err_id;
|
||||
} err_info;
|
||||
|
||||
iwl_trans_read_mem_bytes(mvm->trans, base,
|
||||
&err_info, sizeof(err_info));
|
||||
if (!base)
|
||||
return false;
|
||||
|
||||
if (err_info.valid &&
|
||||
err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
|
||||
struct cfg80211_wowlan_wakeup wakeup = {
|
||||
.rfkill_release = true,
|
||||
};
|
||||
ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
|
||||
iwl_trans_read_mem_bytes(trans, base,
|
||||
&err_info, sizeof(err_info));
|
||||
if (err_info.valid && err_id)
|
||||
*err_id = le32_to_cpu(err_info.err_id);
|
||||
|
||||
return !!err_info.valid;
|
||||
}
|
||||
|
||||
static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
u32 err_id;
|
||||
|
||||
/* check for lmac1 error */
|
||||
if (iwl_mvm_rt_status(mvm->trans,
|
||||
mvm->trans->dbg.lmac_error_event_table[0],
|
||||
&err_id)) {
|
||||
if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
|
||||
struct cfg80211_wowlan_wakeup wakeup = {
|
||||
.rfkill_release = true,
|
||||
};
|
||||
ieee80211_report_wowlan_wakeup(vif, &wakeup,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return err_info.valid;
|
||||
|
||||
/* check if we have lmac2 set and check for error */
|
||||
if (iwl_mvm_rt_status(mvm->trans,
|
||||
mvm->trans->dbg.lmac_error_event_table[1], NULL))
|
||||
return true;
|
||||
|
||||
/* check for umac error */
|
||||
if (iwl_mvm_rt_status(mvm->trans,
|
||||
mvm->trans->dbg.umac_error_event_table, NULL))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
||||
|
@ -8,6 +8,7 @@
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* Copyright (C) 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -30,6 +31,7 @@
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
* Copyright (C) 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -528,6 +530,8 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
|
||||
if (req != mvm->ftm_initiator.req)
|
||||
return;
|
||||
|
||||
iwl_mvm_ftm_reset(mvm);
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD,
|
||||
LOCATION_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd))
|
||||
@ -641,7 +645,6 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!mvm->ftm_initiator.req) {
|
||||
IWL_ERR(mvm, "Got FTM response but have no request?\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -5,10 +5,9 @@
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -28,10 +27,9 @@
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -2037,7 +2035,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
|
||||
rcu_read_lock();
|
||||
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
|
||||
if (IS_ERR(sta)) {
|
||||
if (IS_ERR_OR_NULL(sta)) {
|
||||
rcu_read_unlock();
|
||||
WARN(1, "Can't find STA to configure HE\n");
|
||||
return;
|
||||
@ -3293,7 +3291,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
|
||||
iwl_mvm_schedule_session_protection(mvm, vif, 900,
|
||||
min_duration);
|
||||
min_duration, false);
|
||||
else
|
||||
iwl_mvm_protect_session(mvm, vif, duration,
|
||||
min_duration, 500, false);
|
||||
|
@ -3320,6 +3320,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
|
||||
igtk_cmd.sta_id = cpu_to_le32(sta_id);
|
||||
|
||||
if (remove_key) {
|
||||
/* This is a valid situation for IGTK */
|
||||
if (sta_id == IWL_MVM_INVALID_STA)
|
||||
return 0;
|
||||
|
||||
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
|
||||
} else {
|
||||
struct ieee80211_key_seq seq;
|
||||
@ -3574,9 +3578,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
|
||||
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
|
||||
keyconf->keyidx, sta_id);
|
||||
|
||||
if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
|
||||
keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
|
||||
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
|
||||
|
||||
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
|
||||
|
@ -205,9 +205,15 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
/* Protect the session to hear the TDLS setup response on the channel */
|
||||
iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
|
||||
mutex_lock(&mvm->mutex);
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
|
||||
iwl_mvm_schedule_session_protection(mvm, vif, duration,
|
||||
duration, true);
|
||||
else
|
||||
iwl_mvm_protect_session(mvm, vif, duration,
|
||||
duration, 100, true);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
||||
|
@ -1056,13 +1056,42 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
|
||||
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
|
||||
}
|
||||
|
||||
static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
|
||||
struct iwl_rx_packet *pkt, void *data)
|
||||
{
|
||||
struct iwl_mvm *mvm =
|
||||
container_of(notif_wait, struct iwl_mvm, notif_wait);
|
||||
struct iwl_mvm_session_prot_notif *resp;
|
||||
int resp_len = iwl_rx_packet_payload_len(pkt);
|
||||
|
||||
if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF ||
|
||||
pkt->hdr.group_id != MAC_CONF_GROUP))
|
||||
return true;
|
||||
|
||||
if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
|
||||
IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
resp = (void *)pkt->data;
|
||||
|
||||
if (!resp->status)
|
||||
IWL_ERR(mvm,
|
||||
"TIME_EVENT_NOTIFICATION received but not executed\n");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 duration, u32 min_duration)
|
||||
u32 duration, u32 min_duration,
|
||||
bool wait_for_notif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
|
||||
|
||||
const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF,
|
||||
MAC_CONF_GROUP, 0) };
|
||||
struct iwl_notification_wait wait_notif;
|
||||
struct iwl_mvm_session_prot_cmd cmd = {
|
||||
.id_and_color =
|
||||
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
@ -1071,7 +1100,6 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
|
||||
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
|
||||
};
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
@ -1092,14 +1120,35 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
|
||||
le32_to_cpu(cmd.duration_tu));
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd);
|
||||
if (ret) {
|
||||
if (!wait_for_notif) {
|
||||
if (iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd)) {
|
||||
IWL_ERR(mvm,
|
||||
"Couldn't send the SESSION_PROTECTION_CMD\n");
|
||||
spin_lock_bh(&mvm->time_event_lock);
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
spin_unlock_bh(&mvm->time_event_lock);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_init_notification_wait(&mvm->notif_wait, &wait_notif,
|
||||
notif, ARRAY_SIZE(notif),
|
||||
iwl_mvm_session_prot_notif, NULL);
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(SESSION_PROTECTION_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd)) {
|
||||
IWL_ERR(mvm,
|
||||
"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
|
||||
spin_lock_bh(&mvm->time_event_lock);
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
spin_unlock_bh(&mvm->time_event_lock);
|
||||
"Couldn't send the SESSION_PROTECTION_CMD\n");
|
||||
iwl_remove_notification(&mvm->notif_wait, &wait_notif);
|
||||
} else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
|
||||
TU_TO_JIFFIES(100))) {
|
||||
IWL_ERR(mvm,
|
||||
"Failed to protect session until session protection\n");
|
||||
}
|
||||
}
|
||||
|
@ -250,10 +250,12 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
|
||||
* @mvm: the mvm component
|
||||
* @vif: the virtual interface for which the protection issued
|
||||
* @duration: the duration of the protection
|
||||
* @wait_for_notif: if true, will block until the start of the protection
|
||||
*/
|
||||
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 duration, u32 min_duration);
|
||||
u32 duration, u32 min_duration,
|
||||
bool wait_for_notif);
|
||||
|
||||
/**
|
||||
* iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* Copyright(c) 2019 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* Copyright(c) 2019 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -234,7 +234,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
|
||||
.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
|
||||
};
|
||||
struct iwl_ext_dts_measurement_cmd extcmd = {
|
||||
.control_mode = cpu_to_le32(DTS_AUTOMATIC),
|
||||
.control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE),
|
||||
};
|
||||
u32 cmdid;
|
||||
|
||||
@ -734,7 +734,8 @@ static struct thermal_zone_device_ops tzone_ops = {
|
||||
static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
|
||||
{
|
||||
int i;
|
||||
char name[] = "iwlwifi";
|
||||
char name[16];
|
||||
static atomic_t counter = ATOMIC_INIT(0);
|
||||
|
||||
if (!iwl_mvm_is_tt_in_fw(mvm)) {
|
||||
mvm->tz_device.tzone = NULL;
|
||||
@ -744,6 +745,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
|
||||
|
||||
sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
|
||||
mvm->tz_device.tzone = thermal_zone_device_register(name,
|
||||
IWL_MAX_DTS_TRIPS,
|
||||
IWL_WRITABLE_TRIPS_MSK,
|
||||
|
@ -1785,6 +1785,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
|
||||
rates_max = rates_eid[1];
|
||||
if (rates_max > MAX_RATES) {
|
||||
lbs_deb_join("invalid rates");
|
||||
rcu_read_unlock();
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
rates = cmd.bss.rates;
|
||||
|
@ -2884,6 +2884,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
|
||||
vs_param_set->header.len =
|
||||
cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
|
||||
& 0x00FF) + 2);
|
||||
if (le16_to_cpu(vs_param_set->header.len) >
|
||||
MWIFIEX_MAX_VSIE_LEN) {
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"Invalid param length!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
|
||||
le16_to_cpu(vs_param_set->header.len));
|
||||
*buffer += le16_to_cpu(vs_param_set->header.len) +
|
||||
|
@ -232,6 +232,7 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
|
||||
|
||||
if (country_ie_len >
|
||||
(IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
|
||||
rcu_read_unlock();
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"11D: country_ie_len overflow!, deauth AP\n");
|
||||
return -EINVAL;
|
||||
|
@ -970,6 +970,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
|
||||
"WMM Parameter Set Count: %d\n",
|
||||
wmm_param_ie->qos_info_bitmap & mask);
|
||||
|
||||
if (wmm_param_ie->vend_hdr.len + 2 >
|
||||
sizeof(struct ieee_types_wmm_parameter))
|
||||
break;
|
||||
|
||||
memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
|
||||
wmm_ie, wmm_param_ie,
|
||||
wmm_param_ie->vend_hdr.len + 2);
|
||||
|
@ -92,8 +92,9 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
|
||||
|
||||
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
|
||||
{
|
||||
u8 val, *eeprom = dev->mt76.eeprom.data;
|
||||
u8 *eeprom = dev->mt76.eeprom.data;
|
||||
u8 tx_mask, rx_mask, max_nss;
|
||||
u32 val;
|
||||
|
||||
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
|
||||
eeprom[MT_EE_WIFI_CONF]);
|
||||
|
@ -281,27 +281,26 @@ static void rtw_wow_rx_dma_start(struct rtw_dev *rtwdev)
|
||||
rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE);
|
||||
}
|
||||
|
||||
static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
|
||||
static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
/* wait 100ms for wow firmware to finish work */
|
||||
msleep(100);
|
||||
|
||||
if (wow_enable) {
|
||||
if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
|
||||
ret = 0;
|
||||
if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON))
|
||||
goto wow_fail;
|
||||
} else {
|
||||
if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 &&
|
||||
rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0)
|
||||
ret = 0;
|
||||
if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) ||
|
||||
rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE))
|
||||
goto wow_fail;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
rtw_err(rtwdev, "failed to check wow status %s\n",
|
||||
wow_enable ? "enabled" : "disabled");
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
wow_fail:
|
||||
rtw_err(rtwdev, "failed to check wow status %s\n",
|
||||
wow_enable ? "enabled" : "disabled");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
|
||||
|
@ -728,7 +728,7 @@ struct bpf_struct_ops {
|
||||
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
|
||||
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
|
||||
const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
|
||||
void bpf_struct_ops_init(struct btf *btf);
|
||||
void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
|
||||
bool bpf_struct_ops_get(const void *kdata);
|
||||
void bpf_struct_ops_put(const void *kdata);
|
||||
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
|
||||
@ -752,7 +752,10 @@ static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void bpf_struct_ops_init(struct btf *btf) { }
|
||||
static inline void bpf_struct_ops_init(struct btf *btf,
|
||||
struct bpf_verifier_log *log)
|
||||
{
|
||||
}
|
||||
static inline bool bpf_try_module_get(const void *data, struct module *owner)
|
||||
{
|
||||
return try_module_get(owner);
|
||||
|
@ -1448,14 +1448,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
|
||||
u8 reserved_at_440[0x20];
|
||||
|
||||
u8 tls[0x1];
|
||||
u8 reserved_at_461[0x2];
|
||||
u8 reserved_at_460[0x3];
|
||||
u8 log_max_uctx[0x5];
|
||||
u8 reserved_at_468[0x3];
|
||||
u8 log_max_umem[0x5];
|
||||
u8 max_num_eqs[0x10];
|
||||
|
||||
u8 reserved_at_480[0x3];
|
||||
u8 reserved_at_480[0x1];
|
||||
u8 tls_tx[0x1];
|
||||
u8 reserved_at_482[0x1];
|
||||
u8 log_max_l2_table[0x5];
|
||||
u8 reserved_at_488[0x8];
|
||||
u8 log_uar_page_sz[0x10];
|
||||
|
@ -19,7 +19,7 @@
|
||||
#ifndef __B53_H
|
||||
#define __B53_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/platform_data/dsa.h>
|
||||
|
||||
struct b53_platform_data {
|
||||
|
@ -19,7 +19,7 @@
|
||||
#ifndef __MICROCHIP_KSZ_H
|
||||
#define __MICROCHIP_KSZ_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct ksz_platform_data {
|
||||
u32 chip_id;
|
||||
|
@ -1821,6 +1821,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
|
||||
return list_->qlen;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_queue_len_lockless - get queue length
|
||||
* @list_: list to measure
|
||||
*
|
||||
* Return the length of an &sk_buff queue.
|
||||
* This variant can be used in lockless contexts.
|
||||
*/
|
||||
static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
|
||||
{
|
||||
return READ_ONCE(list_->qlen);
|
||||
}
|
||||
|
||||
/**
|
||||
* __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
|
||||
* @list: queue to initialize
|
||||
@ -2026,7 +2038,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
|
||||
{
|
||||
struct sk_buff *next, *prev;
|
||||
|
||||
list->qlen--;
|
||||
WRITE_ONCE(list->qlen, list->qlen - 1);
|
||||
next = skb->next;
|
||||
prev = skb->prev;
|
||||
skb->next = skb->prev = NULL;
|
||||
|
@ -47,11 +47,6 @@ struct ipxhdr {
|
||||
/* From af_ipx.c */
|
||||
extern int sysctl_ipx_pprop_broadcasting;
|
||||
|
||||
static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
|
||||
{
|
||||
return (struct ipxhdr *)skb_transport_header(skb);
|
||||
}
|
||||
|
||||
struct ipx_interface {
|
||||
/* IPX address */
|
||||
__be32 if_netnum;
|
||||
|
@ -96,12 +96,11 @@ const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
|
||||
|
||||
static const struct btf_type *module_type;
|
||||
|
||||
void bpf_struct_ops_init(struct btf *btf)
|
||||
void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
|
||||
{
|
||||
s32 type_id, value_id, module_id;
|
||||
const struct btf_member *member;
|
||||
struct bpf_struct_ops *st_ops;
|
||||
struct bpf_verifier_log log = {};
|
||||
const struct btf_type *t;
|
||||
char value_name[128];
|
||||
const char *mname;
|
||||
@ -172,7 +171,7 @@ void bpf_struct_ops_init(struct btf *btf)
|
||||
member->type,
|
||||
NULL);
|
||||
if (func_proto &&
|
||||
btf_distill_func_proto(&log, btf,
|
||||
btf_distill_func_proto(log, btf,
|
||||
func_proto, mname,
|
||||
&st_ops->func_models[j])) {
|
||||
pr_warn("Error in parsing func ptr %s in struct %s\n",
|
||||
|
@ -3643,7 +3643,7 @@ struct btf *btf_parse_vmlinux(void)
|
||||
goto errout;
|
||||
}
|
||||
|
||||
bpf_struct_ops_init(btf);
|
||||
bpf_struct_ops_init(btf, log);
|
||||
|
||||
btf_verifier_env_free(env);
|
||||
refcount_set(&btf->refcnt, 1);
|
||||
@ -3931,6 +3931,7 @@ again:
|
||||
|
||||
if (btf_type_is_ptr(mtype)) {
|
||||
const struct btf_type *stype;
|
||||
u32 id;
|
||||
|
||||
if (msize != size || off != moff) {
|
||||
bpf_log(log,
|
||||
@ -3939,12 +3940,9 @@ again:
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
stype = btf_type_by_id(btf_vmlinux, mtype->type);
|
||||
/* skip modifiers */
|
||||
while (btf_type_is_modifier(stype))
|
||||
stype = btf_type_by_id(btf_vmlinux, stype->type);
|
||||
stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id);
|
||||
if (btf_type_is_struct(stype)) {
|
||||
*next_btf_id = mtype->type;
|
||||
*next_btf_id = id;
|
||||
return PTR_TO_BTF_ID;
|
||||
}
|
||||
}
|
||||
|
@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
bpf_map_init_from_attr(&smap->map, attr);
|
||||
|
||||
nbuckets = roundup_pow_of_two(num_possible_cpus());
|
||||
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
|
||||
smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
|
||||
nbuckets = 1U << smap->bucket_log;
|
||||
nbuckets = max_t(u32, 2, nbuckets);
|
||||
smap->bucket_log = ilog2(nbuckets);
|
||||
cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
|
||||
|
||||
ret = bpf_map_charge_init(&smap->map.memory, cost);
|
||||
|
@ -3986,6 +3986,12 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* return 0 if there is no further data to read */
|
||||
if (start_offset >= region->size) {
|
||||
err = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
|
||||
&devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
|
||||
DEVLINK_CMD_REGION_READ);
|
||||
|
@ -1000,8 +1000,10 @@ static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!monitor_hw)
|
||||
if (!monitor_hw) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
monitor_hw = false;
|
||||
|
||||
|
@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map)
|
||||
int i;
|
||||
|
||||
synchronize_rcu();
|
||||
rcu_read_lock();
|
||||
raw_spin_lock_bh(&stab->lock);
|
||||
for (i = 0; i < stab->map.max_entries; i++) {
|
||||
struct sock **psk = &stab->sks[i];
|
||||
@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map)
|
||||
sk = xchg(psk, NULL);
|
||||
if (sk) {
|
||||
lock_sock(sk);
|
||||
rcu_read_lock();
|
||||
sock_map_unref(sk, psk);
|
||||
rcu_read_unlock();
|
||||
release_sock(sk);
|
||||
}
|
||||
}
|
||||
raw_spin_unlock_bh(&stab->lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* wait for psock readers accessing its map link */
|
||||
synchronize_rcu();
|
||||
|
||||
bpf_map_area_free(stab->sks);
|
||||
@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!sock_map_sk_is_suitable(sk) ||
|
||||
sk->sk_state != TCP_ESTABLISHED) {
|
||||
if (!sock_map_sk_is_suitable(sk)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sock_map_sk_acquire(sk);
|
||||
ret = sock_map_update_common(map, idx, sk, flags);
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = sock_map_update_common(map, idx, sk, flags);
|
||||
sock_map_sk_release(sk);
|
||||
out:
|
||||
fput(sock->file);
|
||||
@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key,
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!sock_map_sk_is_suitable(sk) ||
|
||||
sk->sk_state != TCP_ESTABLISHED) {
|
||||
if (!sock_map_sk_is_suitable(sk)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
sock_map_sk_acquire(sk);
|
||||
ret = sock_hash_update_common(map, key, sk, flags);
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
ret = -EOPNOTSUPP;
|
||||
else
|
||||
ret = sock_hash_update_common(map, key, sk, flags);
|
||||
sock_map_sk_release(sk);
|
||||
out:
|
||||
fput(sock->file);
|
||||
@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map)
|
||||
int i;
|
||||
|
||||
synchronize_rcu();
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < htab->buckets_num; i++) {
|
||||
bucket = sock_hash_select_bucket(htab, i);
|
||||
raw_spin_lock_bh(&bucket->lock);
|
||||
hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
|
||||
hlist_del_rcu(&elem->node);
|
||||
lock_sock(elem->sk);
|
||||
rcu_read_lock();
|
||||
sock_map_unref(elem->sk, elem);
|
||||
rcu_read_unlock();
|
||||
release_sock(elem->sk);
|
||||
}
|
||||
raw_spin_unlock_bh(&bucket->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* wait for psock readers accessing its map link */
|
||||
synchronize_rcu();
|
||||
|
||||
bpf_map_area_free(htab->buckets);
|
||||
kfree(htab);
|
||||
|
@ -5718,6 +5718,9 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
|
||||
struct nlattr *tb[IFLA_INET6_MAX + 1];
|
||||
int err;
|
||||
|
||||
if (!idev)
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
|
||||
BUG();
|
||||
|
||||
|
@ -24,57 +24,12 @@
|
||||
|
||||
#define MPTCP_SAME_STATE TCP_MAX_STATES
|
||||
|
||||
static void __mptcp_close(struct sock *sk, long timeout);
|
||||
|
||||
static const struct proto_ops *tcp_proto_ops(struct sock *sk)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
if (sk->sk_family == AF_INET6)
|
||||
return &inet6_stream_ops;
|
||||
struct mptcp6_sock {
|
||||
struct mptcp_sock msk;
|
||||
struct ipv6_pinfo np;
|
||||
};
|
||||
#endif
|
||||
return &inet_stream_ops;
|
||||
}
|
||||
|
||||
/* MP_CAPABLE handshake failed, convert msk to plain tcp, replacing
|
||||
* socket->sk and stream ops and destroying msk
|
||||
* return the msk socket, as we can't access msk anymore after this function
|
||||
* completes
|
||||
* Called with msk lock held, releases such lock before returning
|
||||
*/
|
||||
static struct socket *__mptcp_fallback_to_tcp(struct mptcp_sock *msk,
|
||||
struct sock *ssk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow;
|
||||
struct socket *sock;
|
||||
struct sock *sk;
|
||||
|
||||
sk = (struct sock *)msk;
|
||||
sock = sk->sk_socket;
|
||||
subflow = mptcp_subflow_ctx(ssk);
|
||||
|
||||
/* detach the msk socket */
|
||||
list_del_init(&subflow->node);
|
||||
sock_orphan(sk);
|
||||
sock->sk = NULL;
|
||||
|
||||
/* socket is now TCP */
|
||||
lock_sock(ssk);
|
||||
sock_graft(ssk, sock);
|
||||
if (subflow->conn) {
|
||||
/* We can't release the ULP data on a live socket,
|
||||
* restore the tcp callback
|
||||
*/
|
||||
mptcp_subflow_tcp_fallback(ssk, subflow);
|
||||
sock_put(subflow->conn);
|
||||
subflow->conn = NULL;
|
||||
}
|
||||
release_sock(ssk);
|
||||
sock->ops = tcp_proto_ops(ssk);
|
||||
|
||||
/* destroy the left-over msk sock */
|
||||
__mptcp_close(sk, 0);
|
||||
return sock;
|
||||
}
|
||||
|
||||
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
|
||||
* completed yet or has failed, return the subflow socket.
|
||||
@ -93,10 +48,6 @@ static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk)
|
||||
return msk->first && !sk_is_mptcp(msk->first);
|
||||
}
|
||||
|
||||
/* if the mp_capable handshake has failed, it fallbacks msk to plain TCP,
|
||||
* releases the socket lock and returns a reference to the now TCP socket.
|
||||
* Otherwise returns NULL
|
||||
*/
|
||||
static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
|
||||
{
|
||||
sock_owned_by_me((const struct sock *)msk);
|
||||
@ -105,15 +56,11 @@ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
|
||||
return NULL;
|
||||
|
||||
if (msk->subflow) {
|
||||
/* the first subflow is an active connection, discart the
|
||||
* paired socket
|
||||
*/
|
||||
msk->subflow->sk = NULL;
|
||||
sock_release(msk->subflow);
|
||||
msk->subflow = NULL;
|
||||
release_sock((struct sock *)msk);
|
||||
return msk->subflow;
|
||||
}
|
||||
|
||||
return __mptcp_fallback_to_tcp(msk, msk->first);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
|
||||
@ -640,12 +587,14 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how)
|
||||
}
|
||||
|
||||
/* Called with msk lock held, releases such lock before returning */
|
||||
static void __mptcp_close(struct sock *sk, long timeout)
|
||||
static void mptcp_close(struct sock *sk, long timeout)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow, *tmp;
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
LIST_HEAD(conn_list);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
mptcp_token_destroy(msk->token);
|
||||
inet_sk_state_store(sk, TCP_CLOSE);
|
||||
|
||||
@ -662,12 +611,6 @@ static void __mptcp_close(struct sock *sk, long timeout)
|
||||
sk_common_release(sk);
|
||||
}
|
||||
|
||||
static void mptcp_close(struct sock *sk, long timeout)
|
||||
{
|
||||
lock_sock(sk);
|
||||
__mptcp_close(sk, timeout);
|
||||
}
|
||||
|
||||
static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
@ -691,6 +634,30 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
|
||||
inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
|
||||
{
|
||||
unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);
|
||||
|
||||
return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct sock *mptcp_sk_clone_lock(const struct sock *sk)
|
||||
{
|
||||
struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
|
||||
|
||||
if (!nsk)
|
||||
return NULL;
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
if (nsk->sk_family == AF_INET6)
|
||||
inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
|
||||
#endif
|
||||
|
||||
return nsk;
|
||||
}
|
||||
|
||||
static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
|
||||
bool kern)
|
||||
{
|
||||
@ -721,7 +688,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
|
||||
lock_sock(sk);
|
||||
|
||||
local_bh_disable();
|
||||
new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC);
|
||||
new_mptcp_sock = mptcp_sk_clone_lock(sk);
|
||||
if (!new_mptcp_sock) {
|
||||
*err = -ENOBUFS;
|
||||
local_bh_enable();
|
||||
@ -1270,8 +1237,7 @@ int mptcp_proto_v6_init(void)
|
||||
strcpy(mptcp_v6_prot.name, "MPTCPv6");
|
||||
mptcp_v6_prot.slab = NULL;
|
||||
mptcp_v6_prot.destroy = mptcp_v6_destroy;
|
||||
mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) +
|
||||
sizeof(struct ipv6_pinfo);
|
||||
mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
|
||||
|
||||
err = proto_register(&mptcp_v6_prot, 1);
|
||||
if (err)
|
||||
|
@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
|
||||
}
|
||||
|
||||
/*
|
||||
* Final call destruction under RCU.
|
||||
* Final call destruction - but must be done in process context.
|
||||
*/
|
||||
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
|
||||
static void rxrpc_destroy_call(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
|
||||
struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
|
||||
struct rxrpc_net *rxnet = call->rxnet;
|
||||
|
||||
rxrpc_put_connection(call->conn);
|
||||
@ -578,6 +578,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
|
||||
wake_up_var(&rxnet->nr_calls);
|
||||
}
|
||||
|
||||
/*
|
||||
* Final call destruction under RCU.
|
||||
*/
|
||||
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
|
||||
{
|
||||
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
|
||||
|
||||
if (in_softirq()) {
|
||||
INIT_WORK(&call->processor, rxrpc_destroy_call);
|
||||
if (!rxrpc_queue_work(&call->processor))
|
||||
BUG();
|
||||
} else {
|
||||
rxrpc_destroy_call(&call->processor);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* clean up a call
|
||||
*/
|
||||
|
@ -171,8 +171,6 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
|
||||
|
||||
_enter("%d,%x", conn->debug_id, call->cid);
|
||||
|
||||
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
|
||||
if (rcu_access_pointer(chan->call) == call) {
|
||||
/* Save the result of the call so that we can repeat it if necessary
|
||||
* through the channel, whilst disposing of the actual call record.
|
||||
@ -225,6 +223,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
|
||||
__rxrpc_disconnect_call(conn, call);
|
||||
spin_unlock(&conn->channel_lock);
|
||||
|
||||
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
|
||||
conn->idle_timestamp = jiffies;
|
||||
}
|
||||
|
||||
|
@ -365,7 +365,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
|
||||
|
||||
err = tcindex_filter_result_init(&new_filter_result, net);
|
||||
if (err < 0)
|
||||
goto errout1;
|
||||
goto errout_alloc;
|
||||
if (old_r)
|
||||
cr = r->res;
|
||||
|
||||
@ -484,7 +484,6 @@ errout_alloc:
|
||||
tcindex_free_perfect_hash(cp);
|
||||
else if (balloc == 2)
|
||||
kfree(cp->h);
|
||||
errout1:
|
||||
tcf_exts_destroy(&new_filter_result.exts);
|
||||
errout:
|
||||
kfree(cp);
|
||||
|
@ -349,9 +349,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
while (sch->q.qlen > sch->limit) {
|
||||
struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
|
||||
|
||||
kfree_skb(skb);
|
||||
len_dropped += qdisc_pkt_len(skb);
|
||||
num_dropped += 1;
|
||||
rtnl_kfree_skbs(skb, skb);
|
||||
}
|
||||
qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
|
||||
|
||||
|
@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(taprio_list_lock);
|
||||
|
||||
#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
|
||||
#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
|
||||
#define TAPRIO_FLAGS_INVALID U32_MAX
|
||||
|
||||
struct sched_entry {
|
||||
struct list_head list;
|
||||
@ -766,6 +767,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
|
||||
[TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
|
||||
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
|
||||
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
|
||||
[TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
|
||||
@ -1367,6 +1369,33 @@ static int taprio_mqprio_cmp(const struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The semantics of the 'flags' argument in relation to 'change()'
|
||||
* requests, are interpreted following two rules (which are applied in
|
||||
* this order): (1) an omitted 'flags' argument is interpreted as
|
||||
* zero; (2) the 'flags' of a "running" taprio instance cannot be
|
||||
* changed.
|
||||
*/
|
||||
static int taprio_new_flags(const struct nlattr *attr, u32 old,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 new = 0;
|
||||
|
||||
if (attr)
|
||||
new = nla_get_u32(attr);
|
||||
|
||||
if (old != TAPRIO_FLAGS_INVALID && old != new) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!taprio_flags_valid(new)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -1375,7 +1404,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct tc_mqprio_qopt *mqprio = NULL;
|
||||
u32 taprio_flags = 0;
|
||||
unsigned long flags;
|
||||
ktime_t start;
|
||||
int i, err;
|
||||
@ -1388,21 +1416,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
|
||||
mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
|
||||
|
||||
if (tb[TCA_TAPRIO_ATTR_FLAGS]) {
|
||||
taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]);
|
||||
err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
|
||||
q->flags, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (q->flags != 0 && q->flags != taprio_flags) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
} else if (!taprio_flags_valid(taprio_flags)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
|
||||
return -EINVAL;
|
||||
}
|
||||
q->flags = err;
|
||||
|
||||
q->flags = taprio_flags;
|
||||
}
|
||||
|
||||
err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags);
|
||||
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@ -1444,7 +1465,20 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
||||
taprio_set_picos_per_byte(dev, q);
|
||||
|
||||
if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
|
||||
if (mqprio) {
|
||||
netdev_set_num_tc(dev, mqprio->num_tc);
|
||||
for (i = 0; i < mqprio->num_tc; i++)
|
||||
netdev_set_tc_queue(dev, i,
|
||||
mqprio->count[i],
|
||||
mqprio->offset[i]);
|
||||
|
||||
/* Always use supplied priority mappings */
|
||||
for (i = 0; i <= TC_BITMASK; i++)
|
||||
netdev_set_prio_tc_map(dev, i,
|
||||
mqprio->prio_tc_map[i]);
|
||||
}
|
||||
|
||||
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
|
||||
err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
|
||||
else
|
||||
err = taprio_disable_offload(dev, q, extack);
|
||||
@ -1464,27 +1498,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
|
||||
}
|
||||
|
||||
if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) &&
|
||||
!FULL_OFFLOAD_IS_ENABLED(taprio_flags) &&
|
||||
if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
|
||||
!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
|
||||
!hrtimer_active(&q->advance_timer)) {
|
||||
hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
|
||||
q->advance_timer.function = advance_sched;
|
||||
}
|
||||
|
||||
if (mqprio) {
|
||||
netdev_set_num_tc(dev, mqprio->num_tc);
|
||||
for (i = 0; i < mqprio->num_tc; i++)
|
||||
netdev_set_tc_queue(dev, i,
|
||||
mqprio->count[i],
|
||||
mqprio->offset[i]);
|
||||
|
||||
/* Always use supplied priority mappings */
|
||||
for (i = 0; i <= TC_BITMASK; i++)
|
||||
netdev_set_prio_tc_map(dev, i,
|
||||
mqprio->prio_tc_map[i]);
|
||||
}
|
||||
|
||||
if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) {
|
||||
if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
|
||||
q->dequeue = taprio_dequeue_offload;
|
||||
q->peek = taprio_peek_offload;
|
||||
} else {
|
||||
@ -1501,9 +1522,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) {
|
||||
setup_txtime(q, new_admin, start);
|
||||
setup_txtime(q, new_admin, start);
|
||||
|
||||
if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
|
||||
if (!oper) {
|
||||
rcu_assign_pointer(q->oper_sched, new_admin);
|
||||
err = 0;
|
||||
@ -1528,7 +1549,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
||||
spin_unlock_irqrestore(&q->current_entry_lock, flags);
|
||||
|
||||
if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
|
||||
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
|
||||
taprio_offload_config_changed(q);
|
||||
}
|
||||
|
||||
@ -1567,7 +1588,7 @@ static void taprio_destroy(struct Qdisc *sch)
|
||||
}
|
||||
q->qdiscs = NULL;
|
||||
|
||||
netdev_set_num_tc(dev, 0);
|
||||
netdev_reset_tc(dev);
|
||||
|
||||
if (q->oper_sched)
|
||||
call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
|
||||
@ -1597,6 +1618,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
|
||||
* and get the valid one on taprio_change().
|
||||
*/
|
||||
q->clockid = -1;
|
||||
q->flags = TAPRIO_FLAGS_INVALID;
|
||||
|
||||
spin_lock(&taprio_list_lock);
|
||||
list_add(&q->taprio_list, &taprio_list);
|
||||
|
@ -189,11 +189,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
|
||||
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
|
||||
}
|
||||
|
||||
static inline int unix_recvq_full(struct sock const *sk)
|
||||
static inline int unix_recvq_full(const struct sock *sk)
|
||||
{
|
||||
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
|
||||
}
|
||||
|
||||
static inline int unix_recvq_full_lockless(const struct sock *sk)
|
||||
{
|
||||
return skb_queue_len_lockless(&sk->sk_receive_queue) >
|
||||
READ_ONCE(sk->sk_max_ack_backlog);
|
||||
}
|
||||
|
||||
struct sock *unix_peer_get(struct sock *s)
|
||||
{
|
||||
struct sock *peer;
|
||||
@ -1758,7 +1764,8 @@ restart_locked:
|
||||
* - unix_peer(sk) == sk by time of get but disconnected before lock
|
||||
*/
|
||||
if (other != sk &&
|
||||
unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
|
||||
unlikely(unix_peer(other) != sk &&
|
||||
unix_recvq_full_lockless(other))) {
|
||||
if (timeo) {
|
||||
timeo = unix_wait_for_peer(other, timeo);
|
||||
|
||||
|
@ -83,7 +83,6 @@ static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
|
||||
static u32 opt_umem_flags;
|
||||
static int opt_unaligned_chunks;
|
||||
static int opt_mmap_flags;
|
||||
static u32 opt_xdp_bind_flags;
|
||||
static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
|
||||
static int opt_timeout = 1000;
|
||||
static bool opt_need_wakeup = true;
|
||||
@ -789,7 +788,8 @@ static void kick_tx(struct xsk_socket_info *xsk)
|
||||
int ret;
|
||||
|
||||
ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
|
||||
if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
|
||||
if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN ||
|
||||
errno == EBUSY || errno == ENETDOWN)
|
||||
return;
|
||||
exit_with_error(errno);
|
||||
}
|
||||
|
@ -580,7 +580,7 @@ probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
|
||||
res = bpf_probe_large_insn_limit(ifindex);
|
||||
print_bool_feature("have_large_insn_limit",
|
||||
"Large program size limit",
|
||||
"HAVE_LARGE_INSN_LIMIT",
|
||||
"LARGE_INSN_LIMIT",
|
||||
res, define_prefix);
|
||||
}
|
||||
|
||||
|
@ -536,7 +536,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||||
buf = (unsigned char *)(info->jited_prog_insns);
|
||||
member_len = info->jited_prog_len;
|
||||
} else { /* DUMP_XLATED */
|
||||
if (info->xlated_prog_len == 0) {
|
||||
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
|
||||
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
|
||||
return -1;
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ clean:
|
||||
|
||||
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
|
||||
$(call msg,BINARY,$@)
|
||||
$(Q)$(CC) $(CFLAGS) -lelf -lz $^ -o $@
|
||||
$(Q)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
|
||||
|
||||
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
|
||||
$(OUTPUT)/runqslower.bpf.o
|
||||
@ -75,7 +75,7 @@ $(OUTPUT)/vmlinux.h: $(VMLINUX_BTF_PATH) | $(OUTPUT) $(BPFTOOL)
|
||||
fi
|
||||
$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
|
||||
|
||||
$(BPFOBJ): | $(OUTPUT)
|
||||
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
|
||||
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \
|
||||
OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
|
||||
|
||||
|
74
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
Normal file
74
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
Normal file
@ -0,0 +1,74 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2020 Cloudflare
|
||||
|
||||
#include "test_progs.h"
|
||||
|
||||
static int connected_socket_v4(void)
|
||||
{
|
||||
struct sockaddr_in addr = {
|
||||
.sin_family = AF_INET,
|
||||
.sin_port = htons(80),
|
||||
.sin_addr = { inet_addr("127.0.0.1") },
|
||||
};
|
||||
socklen_t len = sizeof(addr);
|
||||
int s, repair, err;
|
||||
|
||||
s = socket(AF_INET, SOCK_STREAM, 0);
|
||||
if (CHECK_FAIL(s == -1))
|
||||
goto error;
|
||||
|
||||
repair = TCP_REPAIR_ON;
|
||||
err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
|
||||
if (CHECK_FAIL(err))
|
||||
goto error;
|
||||
|
||||
err = connect(s, (struct sockaddr *)&addr, len);
|
||||
if (CHECK_FAIL(err))
|
||||
goto error;
|
||||
|
||||
repair = TCP_REPAIR_OFF_NO_WP;
|
||||
err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
|
||||
if (CHECK_FAIL(err))
|
||||
goto error;
|
||||
|
||||
return s;
|
||||
error:
|
||||
perror(__func__);
|
||||
close(s);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Create a map, populate it with one socket, and free the map. */
|
||||
static void test_sockmap_create_update_free(enum bpf_map_type map_type)
|
||||
{
|
||||
const int zero = 0;
|
||||
int s, map, err;
|
||||
|
||||
s = connected_socket_v4();
|
||||
if (CHECK_FAIL(s == -1))
|
||||
return;
|
||||
|
||||
map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0);
|
||||
if (CHECK_FAIL(map == -1)) {
|
||||
perror("bpf_create_map");
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
|
||||
if (CHECK_FAIL(err)) {
|
||||
perror("bpf_map_update");
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
close(map);
|
||||
close(s);
|
||||
}
|
||||
|
||||
void test_sockmap_basic(void)
|
||||
{
|
||||
if (test__start_subtest("sockmap create_update_free"))
|
||||
test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
|
||||
if (test__start_subtest("sockhash create_update_free"))
|
||||
test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
|
||||
}
|
@ -46,7 +46,7 @@ void test_trampoline_count(void)
|
||||
const char *fentry_name = "fentry/__set_task_comm";
|
||||
const char *fexit_name = "fexit/__set_task_comm";
|
||||
const char *object = "test_trampoline_count.o";
|
||||
struct inst inst[MAX_TRAMP_PROGS] = { 0 };
|
||||
struct inst inst[MAX_TRAMP_PROGS] = {};
|
||||
int err, i = 0, duration = 0;
|
||||
struct bpf_object *obj;
|
||||
struct bpf_link *link;
|
||||
|
@ -14,6 +14,7 @@ ALL_TESTS="
|
||||
ipv4_plen
|
||||
ipv4_replay
|
||||
ipv4_flush
|
||||
ipv4_local_replace
|
||||
ipv6_add
|
||||
ipv6_metric
|
||||
ipv6_append_single
|
||||
@ -26,6 +27,7 @@ ALL_TESTS="
|
||||
ipv6_delete_multipath
|
||||
ipv6_replay_single
|
||||
ipv6_replay_multipath
|
||||
ipv6_local_replace
|
||||
"
|
||||
NUM_NETIFS=0
|
||||
source $lib_dir/lib.sh
|
||||
@ -89,6 +91,43 @@ ipv4_flush()
|
||||
fib_ipv4_flush_test "testns1"
|
||||
}
|
||||
|
||||
ipv4_local_replace()
|
||||
{
|
||||
local ns="testns1"
|
||||
|
||||
RET=0
|
||||
|
||||
ip -n $ns link add name dummy1 type dummy
|
||||
ip -n $ns link set dev dummy1 up
|
||||
|
||||
ip -n $ns route add table local 192.0.2.1/32 dev dummy1
|
||||
fib4_trap_check $ns "table local 192.0.2.1/32 dev dummy1" false
|
||||
check_err $? "Local table route not in hardware when should"
|
||||
|
||||
ip -n $ns route add table main 192.0.2.1/32 dev dummy1
|
||||
fib4_trap_check $ns "table main 192.0.2.1/32 dev dummy1" true
|
||||
check_err $? "Main table route in hardware when should not"
|
||||
|
||||
fib4_trap_check $ns "table local 192.0.2.1/32 dev dummy1" false
|
||||
check_err $? "Local table route was replaced when should not"
|
||||
|
||||
# Test that local routes can replace routes in main table.
|
||||
ip -n $ns route add table main 192.0.2.2/32 dev dummy1
|
||||
fib4_trap_check $ns "table main 192.0.2.2/32 dev dummy1" false
|
||||
check_err $? "Main table route not in hardware when should"
|
||||
|
||||
ip -n $ns route add table local 192.0.2.2/32 dev dummy1
|
||||
fib4_trap_check $ns "table local 192.0.2.2/32 dev dummy1" false
|
||||
check_err $? "Local table route did not replace route in main table when should"
|
||||
|
||||
fib4_trap_check $ns "table main 192.0.2.2/32 dev dummy1" true
|
||||
check_err $? "Main table route was not replaced when should"
|
||||
|
||||
log_test "IPv4 local table route replacement"
|
||||
|
||||
ip -n $ns link del dev dummy1
|
||||
}
|
||||
|
||||
ipv6_add()
|
||||
{
|
||||
fib_ipv6_add_test "testns1"
|
||||
@ -149,6 +188,43 @@ ipv6_replay_multipath()
|
||||
fib_ipv6_replay_multipath_test "testns1" "$DEVLINK_DEV"
|
||||
}
|
||||
|
||||
ipv6_local_replace()
|
||||
{
|
||||
local ns="testns1"
|
||||
|
||||
RET=0
|
||||
|
||||
ip -n $ns link add name dummy1 type dummy
|
||||
ip -n $ns link set dev dummy1 up
|
||||
|
||||
ip -n $ns route add table local 2001:db8:1::1/128 dev dummy1
|
||||
fib6_trap_check $ns "table local 2001:db8:1::1/128 dev dummy1" false
|
||||
check_err $? "Local table route not in hardware when should"
|
||||
|
||||
ip -n $ns route add table main 2001:db8:1::1/128 dev dummy1
|
||||
fib6_trap_check $ns "table main 2001:db8:1::1/128 dev dummy1" true
|
||||
check_err $? "Main table route in hardware when should not"
|
||||
|
||||
fib6_trap_check $ns "table local 2001:db8:1::1/128 dev dummy1" false
|
||||
check_err $? "Local table route was replaced when should not"
|
||||
|
||||
# Test that local routes can replace routes in main table.
|
||||
ip -n $ns route add table main 2001:db8:1::2/128 dev dummy1
|
||||
fib6_trap_check $ns "table main 2001:db8:1::2/128 dev dummy1" false
|
||||
check_err $? "Main table route not in hardware when should"
|
||||
|
||||
ip -n $ns route add table local 2001:db8:1::2/128 dev dummy1
|
||||
fib6_trap_check $ns "table local 2001:db8:1::2/128 dev dummy1" false
|
||||
check_err $? "Local route route did not replace route in main table when should"
|
||||
|
||||
fib6_trap_check $ns "table main 2001:db8:1::2/128 dev dummy1" true
|
||||
check_err $? "Main table route was not replaced when should"
|
||||
|
||||
log_test "IPv6 local table route replacement"
|
||||
|
||||
ip -n $ns link del dev dummy1
|
||||
}
|
||||
|
||||
setup_prepare()
|
||||
{
|
||||
ip netns add testns1
|
||||
|
@ -634,6 +634,14 @@ static void check_getpeername_connect(int fd)
|
||||
cfg_host, a, cfg_port, b);
|
||||
}
|
||||
|
||||
static void maybe_close(int fd)
|
||||
{
|
||||
unsigned int r = rand();
|
||||
|
||||
if (r & 1)
|
||||
close(fd);
|
||||
}
|
||||
|
||||
int main_loop_s(int listensock)
|
||||
{
|
||||
struct sockaddr_storage ss;
|
||||
@ -657,6 +665,7 @@ int main_loop_s(int listensock)
|
||||
salen = sizeof(ss);
|
||||
remotesock = accept(listensock, (struct sockaddr *)&ss, &salen);
|
||||
if (remotesock >= 0) {
|
||||
maybe_close(listensock);
|
||||
check_sockaddr(pf, &ss, salen);
|
||||
check_getpeername(remotesock, &ss, salen);
|
||||
|
||||
|
@ -38,9 +38,8 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; }
|
||||
ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; }
|
||||
ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; }
|
||||
sleep() { read -t "$1" -N 1 || true; }
|
||||
waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; }
|
||||
waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; }
|
||||
waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; }
|
||||
waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; }
|
||||
waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; }
|
||||
waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; }
|
||||
|
||||
cleanup() {
|
||||
@ -119,22 +118,22 @@ tests() {
|
||||
|
||||
# TCP over IPv4
|
||||
n2 iperf3 -s -1 -B 192.168.241.2 &
|
||||
waitiperf $netns2
|
||||
waitiperf $netns2 $!
|
||||
n1 iperf3 -Z -t 3 -c 192.168.241.2
|
||||
|
||||
# TCP over IPv6
|
||||
n1 iperf3 -s -1 -B fd00::1 &
|
||||
waitiperf $netns1
|
||||
waitiperf $netns1 $!
|
||||
n2 iperf3 -Z -t 3 -c fd00::1
|
||||
|
||||
# UDP over IPv4
|
||||
n1 iperf3 -s -1 -B 192.168.241.1 &
|
||||
waitiperf $netns1
|
||||
waitiperf $netns1 $!
|
||||
n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1
|
||||
|
||||
# UDP over IPv6
|
||||
n2 iperf3 -s -1 -B fd00::2 &
|
||||
waitiperf $netns2
|
||||
waitiperf $netns2 $!
|
||||
n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2
|
||||
}
|
||||
|
||||
@ -207,7 +206,7 @@ n1 ping -W 1 -c 1 192.168.241.2
|
||||
n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24
|
||||
exec 4< <(n1 ncat -l -u -p 1111)
|
||||
ncat_pid=$!
|
||||
waitncatudp $netns1
|
||||
waitncatudp $netns1 $ncat_pid
|
||||
n2 ncat -u 192.168.241.1 1111 <<<"X"
|
||||
read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]]
|
||||
kill $ncat_pid
|
||||
@ -216,7 +215,7 @@ n1 wg set wg0 peer "$more_specific_key" allowed-ips 192.168.241.2/32
|
||||
n2 wg set wg0 listen-port 9997
|
||||
exec 4< <(n1 ncat -l -u -p 1111)
|
||||
ncat_pid=$!
|
||||
waitncatudp $netns1
|
||||
waitncatudp $netns1 $ncat_pid
|
||||
n2 ncat -u 192.168.241.1 1111 <<<"X"
|
||||
! read -r -N 1 -t 1 out <&4 || false
|
||||
kill $ncat_pid
|
||||
@ -516,6 +515,12 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0,10.0.0.0/8,100.0.0.0/10,172.16.
|
||||
n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0
|
||||
n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75
|
||||
n0 wg set wg0 peer "$pub2" allowed-ips ::/0
|
||||
n0 wg set wg0 peer "$pub2" remove
|
||||
low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= )
|
||||
n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer }
|
||||
[[ -z $(n0 wg show wg0 peers) ]]
|
||||
n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer }
|
||||
[[ -z $(n0 wg show wg0 peers) ]]
|
||||
ip0 link del wg0
|
||||
|
||||
declare -A objects
|
||||
|
@ -1,5 +1,4 @@
|
||||
CONFIG_LOCALVERSION="-debug"
|
||||
CONFIG_ENABLE_WARN_DEPRECATED=y
|
||||
CONFIG_ENABLE_MUST_CHECK=y
|
||||
CONFIG_FRAME_POINTER=y
|
||||
CONFIG_STACK_VALIDATION=y
|
||||
|
Loading…
Reference in New Issue
Block a user