forked from Minki/linux
Networking fixes for 5.10-rc5, including fixes from the WiFi (mac80211),
can and bpf (including the strncpy_from_user fix). Current release - regressions: - mac80211: fix memory leak of filtered powersave frames - mac80211: free sta in sta_info_insert_finish() on errors to avoid sleeping in atomic context - netlabel: fix an uninitialized variable warning added in -rc4 Previous release - regressions: - vsock: forward all packets to the host when no H2G is registered, un-breaking AWS Nitro Enclaves - net: Exempt multicast addresses from five-second neighbor lifetime requirement, decreasing the chances neighbor tables fill up - net/tls: fix corrupted data in recvmsg - qed: fix ILT configuration of SRC block - can: m_can: process interrupt only when not runtime suspended Previous release - always broken: - page_frag: Recover from memory pressure by not recycling pages allocating from the reserves - strncpy_from_user: Mask out bytes after NUL terminator - ip_tunnels: Set tunnel option flag only when tunnel metadata is present, always setting it confuses Open vSwitch - bpf, sockmap: - Fix partial copy_page_to_iter so progress can still be made - Fix socket memory accounting and obeying SO_RCVBUF - net: Have netpoll bring-up DSA management interface - net: bridge: add missing counters to ndo_get_stats64 callback - tcp: brr: only postpone PROBE_RTT if RTT is < current min_rtt - enetc: Workaround MDIO register access HW bug - net/ncsi: move netlink family registration to a subsystem init, instead of tying it to driver probe - net: ftgmac100: unregister NC-SI when removing driver to avoid crash - lan743x: prevent interrupt storm on open - lan743x: fix freeing skbs in the wrong context - net/mlx5e: Fix socket refcount leak on kTLS RX resync - net: dsa: mv88e6xxx: Avoid VLAN database corruption on 6097 - fix 21 unset return codes and other mistakes on error paths, mostly detected by the Hulk Robot Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAl+226AACgkQMUZtbf5S IruE1w/+JX3CqJwGIqyzyhwVshNaKxmX9gAOMJzkckjEohn8932zPaNq7kbmNYqt 5QsJoou3cXjFeoIEAkQA5fqR4stTZpZMnLO+7JnxxQ0vb2YBN+tIGQRNCnmd1Q0h u9gb5+5AdORdlmk3E7oC8v50dzQRfboJXLEEZTo2uGJwUgLlEAiqTSV2w4YDHMhL JtgtWA/fraL0CUc2WMoxuimg9NirbRuMijsU6+d/yExzznDpdoho/qsxL+Odu1NF hSdaKirA8B8ml0pOd/b4mj+fm4+lKyXZBfSyLx4Ki1TqluEMLzDp7gQPRnU6yyJm AOu4zsKxx6qitOX2qLQCNlEpkQp6LA2N2Zb1orliUV3Bsq2DJRhU35FgLcghtdRP GTRSdKHr2BvMScOZ7dQo8l4TqVc3e/khSZDRGdvpsM275Dt0JyS/l7yAWxunPqMb +/483/s75OuBRO57ULLJ/hR02TG37g/Jv5sI0sG/7oDpGfnulinQX+fxy9izyTEM KYl0mAPSqhb6RcjE0YXWG0rhJN6FSvc/lwPQHjq8wPSkwEdD/FTb6/eYqbXDi1ld UTYhFpkh1PQrwct14eSScMeJqTsNKvG0VV39/uZLZCzcqa3yOY5+oTzwaCFlMsy3 a5yGGxqoh7/FTM8t1ml21is9uZ31LAQEnNTMPv69pZPwAv5G5yE= =SRwI -----END PGP SIGNATURE----- Merge tag 'net-5.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Networking fixes for 5.10-rc5, including fixes from the WiFi (mac80211), can and bpf (including the strncpy_from_user fix). Current release - regressions: - mac80211: fix memory leak of filtered powersave frames - mac80211: free sta in sta_info_insert_finish() on errors to avoid sleeping in atomic context - netlabel: fix an uninitialized variable warning added in -rc4 Previous release - regressions: - vsock: forward all packets to the host when no H2G is registered, un-breaking AWS Nitro Enclaves - net: Exempt multicast addresses from five-second neighbor lifetime requirement, decreasing the chances neighbor tables fill up - net/tls: fix corrupted data in recvmsg - qed: fix ILT configuration of SRC block - can: m_can: process interrupt only when not runtime suspended Previous release - always broken: - page_frag: Recover from memory pressure by not recycling pages allocating from the reserves - strncpy_from_user: Mask out bytes after NUL terminator - ip_tunnels: Set tunnel option flag only when tunnel metadata is present, always setting it confuses Open vSwitch - bpf, sockmap: - Fix partial copy_page_to_iter so progress can still be made - Fix socket memory accounting and obeying SO_RCVBUF - net: Have netpoll bring-up DSA management interface - net: bridge: add missing counters to ndo_get_stats64 callback - tcp: brr: only postpone PROBE_RTT if RTT is < current min_rtt - enetc: Workaround MDIO register access HW bug - net/ncsi: move netlink family registration to a subsystem init, instead of tying it to driver probe - net: ftgmac100: unregister NC-SI when removing driver to avoid crash - lan743x: - prevent interrupt storm on open - fix freeing skbs in the wrong context - net/mlx5e: Fix socket refcount leak on kTLS RX resync - net: dsa: mv88e6xxx: Avoid VLAN database corruption on 6097 - fix 21 unset return codes and other mistakes on error paths, mostly detected by the Hulk Robot" * tag 'net-5.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (115 commits) fail_function: Remove a redundant mutex unlock selftest/bpf: Test bpf_probe_read_user_str() strips trailing bytes after NUL lib/strncpy_from_user.c: Mask out bytes after NUL terminator. net/smc: fix direct access to ib_gid_addr->ndev in smc_ib_determine_gid() net/smc: fix matching of existing link groups ipv6: Remove dependency of ipv6_frag_thdr_truncated on ipv6 module libbpf: Fix VERSIONED_SYM_COUNT number parsing net/mlx4_core: Fix init_hca fields offset atm: nicstar: Unmap DMA on send error page_frag: Recover from memory pressure net: dsa: mv88e6xxx: Wait for EEPROM done after HW reset mlxsw: core: Use variable timeout for EMAD retries mlxsw: Fix firmware flashing net: Have netpoll bring-up DSA management interface atl1e: fix error return code in atl1e_probe() atl1c: fix error return code in atl1c_probe() ah6: fix error return code in ah6_input() net: usb: qmi_wwan: Set DTR quirk for MR400 can: m_can: process interrupt only when not runtime suspended can: flexcan: flexcan_chip_start(): fix erroneous flexcan_transceiver_enable() during bus-off recovery ...
This commit is contained in:
commit
4d02da974e
27
MAINTAINERS
27
MAINTAINERS
@ -3233,10 +3233,10 @@ F: drivers/iio/accel/bma400*
|
||||
BPF (Safe dynamic programs and tools)
|
||||
M: Alexei Starovoitov <ast@kernel.org>
|
||||
M: Daniel Borkmann <daniel@iogearbox.net>
|
||||
M: Andrii Nakryiko <andrii@kernel.org>
|
||||
R: Martin KaFai Lau <kafai@fb.com>
|
||||
R: Song Liu <songliubraving@fb.com>
|
||||
R: Yonghong Song <yhs@fb.com>
|
||||
R: Andrii Nakryiko <andrii@kernel.org>
|
||||
R: John Fastabend <john.fastabend@gmail.com>
|
||||
R: KP Singh <kpsingh@chromium.org>
|
||||
L: netdev@vger.kernel.org
|
||||
@ -4700,7 +4700,7 @@ T: git git://linuxtv.org/anttip/media_tree.git
|
||||
F: drivers/media/dvb-frontends/cxd2820r*
|
||||
|
||||
CXGB3 ETHERNET DRIVER (CXGB3)
|
||||
M: Vishal Kulkarni <vishal@chelsio.com>
|
||||
M: Raju Rangoju <rajur@chelsio.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.chelsio.com
|
||||
@ -4732,7 +4732,7 @@ W: http://www.chelsio.com
|
||||
F: drivers/net/ethernet/chelsio/inline_crypto/
|
||||
|
||||
CXGB4 ETHERNET DRIVER (CXGB4)
|
||||
M: Vishal Kulkarni <vishal@chelsio.com>
|
||||
M: Raju Rangoju <rajur@chelsio.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.chelsio.com
|
||||
@ -4754,7 +4754,7 @@ F: drivers/infiniband/hw/cxgb4/
|
||||
F: include/uapi/rdma/cxgb4-abi.h
|
||||
|
||||
CXGB4VF ETHERNET DRIVER (CXGB4VF)
|
||||
M: Vishal Kulkarni <vishal@gmail.com>
|
||||
M: Raju Rangoju <rajur@chelsio.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.chelsio.com
|
||||
@ -9833,13 +9833,6 @@ S: Maintained
|
||||
F: arch/mips/lantiq
|
||||
F: drivers/soc/lantiq
|
||||
|
||||
LAPB module
|
||||
L: linux-x25@vger.kernel.org
|
||||
S: Orphan
|
||||
F: Documentation/networking/lapb-module.rst
|
||||
F: include/*/lapb.h
|
||||
F: net/lapb/
|
||||
|
||||
LASI 53c700 driver for PARISC
|
||||
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
@ -18991,12 +18984,18 @@ L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
N: axp[128]
|
||||
|
||||
X.25 NETWORK LAYER
|
||||
M: Andrew Hendry <andrew.hendry@gmail.com>
|
||||
X.25 STACK
|
||||
M: Martin Schiller <ms@dev.tdt.de>
|
||||
L: linux-x25@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
S: Maintained
|
||||
F: Documentation/networking/lapb-module.rst
|
||||
F: Documentation/networking/x25*
|
||||
F: drivers/net/wan/hdlc_x25.c
|
||||
F: drivers/net/wan/lapbether.c
|
||||
F: include/*/lapb.h
|
||||
F: include/net/x25*
|
||||
F: include/uapi/linux/x25.h
|
||||
F: net/lapb/
|
||||
F: net/x25/
|
||||
|
||||
X86 ARCHITECTURE (32-BIT AND 64-BIT)
|
||||
|
@ -1706,6 +1706,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||
|
||||
if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
|
||||
atomic_inc(&vcc->stats->tx_err);
|
||||
dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -592,7 +592,7 @@ static void can_restart(struct net_device *dev)
|
||||
|
||||
cf->can_id |= CAN_ERR_RESTARTED;
|
||||
|
||||
netif_rx(skb);
|
||||
netif_rx_ni(skb);
|
||||
|
||||
stats->rx_packets++;
|
||||
stats->rx_bytes += cf->can_dlc;
|
||||
|
@ -728,8 +728,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
|
||||
int err;
|
||||
|
||||
err = pm_runtime_get_sync(priv->dev);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
pm_runtime_put_noidle(priv->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = __flexcan_get_berr_counter(dev, bec);
|
||||
|
||||
@ -1565,14 +1567,10 @@ static int flexcan_chip_start(struct net_device *dev)
|
||||
priv->write(reg_ctrl2, ®s->ctrl2);
|
||||
}
|
||||
|
||||
err = flexcan_transceiver_enable(priv);
|
||||
if (err)
|
||||
goto out_chip_disable;
|
||||
|
||||
/* synchronize with the can bus */
|
||||
err = flexcan_chip_unfreeze(priv);
|
||||
if (err)
|
||||
goto out_transceiver_disable;
|
||||
goto out_chip_disable;
|
||||
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
|
||||
@ -1590,8 +1588,6 @@ static int flexcan_chip_start(struct net_device *dev)
|
||||
|
||||
return 0;
|
||||
|
||||
out_transceiver_disable:
|
||||
flexcan_transceiver_disable(priv);
|
||||
out_chip_disable:
|
||||
flexcan_chip_disable(priv);
|
||||
return err;
|
||||
@ -1621,7 +1617,6 @@ static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
|
||||
priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
|
||||
®s->ctrl);
|
||||
|
||||
flexcan_transceiver_disable(priv);
|
||||
priv->can.state = CAN_STATE_STOPPED;
|
||||
|
||||
return 0;
|
||||
@ -1654,17 +1649,23 @@ static int flexcan_open(struct net_device *dev)
|
||||
}
|
||||
|
||||
err = pm_runtime_get_sync(priv->dev);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
pm_runtime_put_noidle(priv->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = open_candev(dev);
|
||||
if (err)
|
||||
goto out_runtime_put;
|
||||
|
||||
err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
|
||||
err = flexcan_transceiver_enable(priv);
|
||||
if (err)
|
||||
goto out_close;
|
||||
|
||||
err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
|
||||
if (err)
|
||||
goto out_transceiver_disable;
|
||||
|
||||
if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
|
||||
priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
|
||||
else
|
||||
@ -1716,6 +1717,8 @@ static int flexcan_open(struct net_device *dev)
|
||||
can_rx_offload_del(&priv->offload);
|
||||
out_free_irq:
|
||||
free_irq(dev->irq, dev);
|
||||
out_transceiver_disable:
|
||||
flexcan_transceiver_disable(priv);
|
||||
out_close:
|
||||
close_candev(dev);
|
||||
out_runtime_put:
|
||||
@ -1734,6 +1737,7 @@ static int flexcan_close(struct net_device *dev)
|
||||
|
||||
can_rx_offload_del(&priv->offload);
|
||||
free_irq(dev->irq, dev);
|
||||
flexcan_transceiver_disable(priv);
|
||||
|
||||
close_candev(dev);
|
||||
pm_runtime_put(priv->dev);
|
||||
@ -1852,7 +1856,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
|
||||
/* stop mode property format is:
|
||||
* <&gpr req_gpr>.
|
||||
* <&gpr req_gpr req_bit>.
|
||||
*/
|
||||
ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
|
||||
ARRAY_SIZE(out_val));
|
||||
|
@ -287,12 +287,12 @@ struct kvaser_pciefd_tx_packet {
|
||||
static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
|
||||
.name = KVASER_PCIEFD_DRV_NAME,
|
||||
.tseg1_min = 1,
|
||||
.tseg1_max = 255,
|
||||
.tseg1_max = 512,
|
||||
.tseg2_min = 1,
|
||||
.tseg2_max = 32,
|
||||
.sjw_max = 16,
|
||||
.brp_min = 1,
|
||||
.brp_max = 4096,
|
||||
.brp_max = 8192,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
|
@ -16,7 +16,8 @@ config CAN_M_CAN_PLATFORM
|
||||
|
||||
config CAN_M_CAN_TCAN4X5X
|
||||
depends on CAN_M_CAN
|
||||
depends on REGMAP_SPI
|
||||
depends on SPI
|
||||
select REGMAP_SPI
|
||||
tristate "TCAN4X5X M_CAN device"
|
||||
help
|
||||
Say Y here if you want support for Texas Instruments TCAN4x5x
|
||||
|
@ -665,7 +665,7 @@ static int m_can_handle_state_change(struct net_device *dev,
|
||||
unsigned int ecr;
|
||||
|
||||
switch (new_state) {
|
||||
case CAN_STATE_ERROR_ACTIVE:
|
||||
case CAN_STATE_ERROR_WARNING:
|
||||
/* error warning state */
|
||||
cdev->can.can_stats.error_warning++;
|
||||
cdev->can.state = CAN_STATE_ERROR_WARNING;
|
||||
@ -694,7 +694,7 @@ static int m_can_handle_state_change(struct net_device *dev,
|
||||
__m_can_get_berr_counter(dev, &bec);
|
||||
|
||||
switch (new_state) {
|
||||
case CAN_STATE_ERROR_ACTIVE:
|
||||
case CAN_STATE_ERROR_WARNING:
|
||||
/* error warning state */
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = (bec.txerr > bec.rxerr) ?
|
||||
@ -956,6 +956,8 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
|
||||
struct net_device_stats *stats = &dev->stats;
|
||||
u32 ir;
|
||||
|
||||
if (pm_runtime_suspended(cdev->dev))
|
||||
return IRQ_NONE;
|
||||
ir = m_can_read(cdev, M_CAN_IR);
|
||||
if (!ir)
|
||||
return IRQ_NONE;
|
||||
@ -1414,6 +1416,9 @@ static void m_can_stop(struct net_device *dev)
|
||||
/* disable all interrupts */
|
||||
m_can_disable_all_interrupts(cdev);
|
||||
|
||||
/* Set init mode to disengage from the network */
|
||||
m_can_config_endisable(cdev, true);
|
||||
|
||||
/* set the state as STOPPED */
|
||||
cdev->can.state = CAN_STATE_STOPPED;
|
||||
}
|
||||
@ -1812,6 +1817,12 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
|
||||
|
||||
void m_can_class_free_dev(struct net_device *net)
|
||||
{
|
||||
free_candev(net);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(m_can_class_free_dev);
|
||||
|
||||
int m_can_class_register(struct m_can_classdev *m_can_dev)
|
||||
{
|
||||
int ret;
|
||||
@ -1850,7 +1861,6 @@ pm_runtime_fail:
|
||||
if (ret) {
|
||||
if (m_can_dev->pm_clock_support)
|
||||
pm_runtime_disable(m_can_dev->dev);
|
||||
free_candev(m_can_dev->net);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1908,8 +1918,6 @@ void m_can_class_unregister(struct m_can_classdev *m_can_dev)
|
||||
unregister_candev(m_can_dev->net);
|
||||
|
||||
m_can_clk_stop(m_can_dev);
|
||||
|
||||
free_candev(m_can_dev->net);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(m_can_class_unregister);
|
||||
|
||||
|
@ -99,6 +99,7 @@ struct m_can_classdev {
|
||||
};
|
||||
|
||||
struct m_can_classdev *m_can_class_allocate_dev(struct device *dev);
|
||||
void m_can_class_free_dev(struct net_device *net);
|
||||
int m_can_class_register(struct m_can_classdev *cdev);
|
||||
void m_can_class_unregister(struct m_can_classdev *cdev);
|
||||
int m_can_class_get_clocks(struct m_can_classdev *cdev);
|
||||
|
@ -67,32 +67,36 @@ static int m_can_plat_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
if (!priv) {
|
||||
ret = -ENOMEM;
|
||||
goto probe_fail;
|
||||
}
|
||||
|
||||
mcan_class->device_data = priv;
|
||||
|
||||
m_can_class_get_clocks(mcan_class);
|
||||
ret = m_can_class_get_clocks(mcan_class);
|
||||
if (ret)
|
||||
goto probe_fail;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
|
||||
addr = devm_ioremap_resource(&pdev->dev, res);
|
||||
irq = platform_get_irq_byname(pdev, "int0");
|
||||
if (IS_ERR(addr) || irq < 0) {
|
||||
ret = -EINVAL;
|
||||
goto failed_ret;
|
||||
goto probe_fail;
|
||||
}
|
||||
|
||||
/* message ram could be shared */
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
|
||||
if (!res) {
|
||||
ret = -ENODEV;
|
||||
goto failed_ret;
|
||||
goto probe_fail;
|
||||
}
|
||||
|
||||
mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
|
||||
if (!mram_addr) {
|
||||
ret = -ENOMEM;
|
||||
goto failed_ret;
|
||||
goto probe_fail;
|
||||
}
|
||||
|
||||
priv->base = addr;
|
||||
@ -111,9 +115,10 @@ static int m_can_plat_probe(struct platform_device *pdev)
|
||||
|
||||
m_can_init_ram(mcan_class);
|
||||
|
||||
ret = m_can_class_register(mcan_class);
|
||||
return m_can_class_register(mcan_class);
|
||||
|
||||
failed_ret:
|
||||
probe_fail:
|
||||
m_can_class_free_dev(mcan_class->net);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -134,6 +139,8 @@ static int m_can_plat_remove(struct platform_device *pdev)
|
||||
|
||||
m_can_class_unregister(mcan_class);
|
||||
|
||||
m_can_class_free_dev(mcan_class->net);
|
||||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
return 0;
|
||||
|
@ -440,14 +440,18 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
||||
return -ENOMEM;
|
||||
|
||||
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
if (!priv) {
|
||||
ret = -ENOMEM;
|
||||
goto out_m_can_class_free_dev;
|
||||
}
|
||||
|
||||
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
|
||||
if (PTR_ERR(priv->power) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
else
|
||||
if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_m_can_class_free_dev;
|
||||
} else {
|
||||
priv->power = NULL;
|
||||
}
|
||||
|
||||
mcan_class->device_data = priv;
|
||||
|
||||
@ -460,8 +464,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
||||
}
|
||||
|
||||
/* Sanity check */
|
||||
if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF)
|
||||
return -ERANGE;
|
||||
if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) {
|
||||
ret = -ERANGE;
|
||||
goto out_m_can_class_free_dev;
|
||||
}
|
||||
|
||||
priv->reg_offset = TCAN4X5X_MCAN_OFFSET;
|
||||
priv->mram_start = TCAN4X5X_MRAM_START;
|
||||
@ -487,6 +493,10 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
||||
|
||||
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
|
||||
&spi->dev, &tcan4x5x_regmap);
|
||||
if (IS_ERR(priv->regmap)) {
|
||||
ret = PTR_ERR(priv->regmap);
|
||||
goto out_clk;
|
||||
}
|
||||
|
||||
ret = tcan4x5x_power_enable(priv->power, 1);
|
||||
if (ret)
|
||||
@ -514,8 +524,10 @@ out_clk:
|
||||
clk_disable_unprepare(mcan_class->cclk);
|
||||
clk_disable_unprepare(mcan_class->hclk);
|
||||
}
|
||||
|
||||
out_m_can_class_free_dev:
|
||||
m_can_class_free_dev(mcan_class->net);
|
||||
dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -523,9 +535,11 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
|
||||
{
|
||||
struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
|
||||
|
||||
m_can_class_unregister(priv->mcan_dev);
|
||||
|
||||
tcan4x5x_power_enable(priv->power, 0);
|
||||
|
||||
m_can_class_unregister(priv->mcan_dev);
|
||||
m_can_class_free_dev(priv->mcan_dev->net);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -881,7 +881,8 @@ static int ti_hecc_probe(struct platform_device *pdev)
|
||||
priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc");
|
||||
if (IS_ERR(priv->base)) {
|
||||
dev_err(&pdev->dev, "hecc ioremap failed\n");
|
||||
return PTR_ERR(priv->base);
|
||||
err = PTR_ERR(priv->base);
|
||||
goto probe_exit_candev;
|
||||
}
|
||||
|
||||
/* handle hecc-ram memory */
|
||||
@ -889,20 +890,22 @@ static int ti_hecc_probe(struct platform_device *pdev)
|
||||
"hecc-ram");
|
||||
if (IS_ERR(priv->hecc_ram)) {
|
||||
dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
|
||||
return PTR_ERR(priv->hecc_ram);
|
||||
err = PTR_ERR(priv->hecc_ram);
|
||||
goto probe_exit_candev;
|
||||
}
|
||||
|
||||
/* handle mbx memory */
|
||||
priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx");
|
||||
if (IS_ERR(priv->mbx)) {
|
||||
dev_err(&pdev->dev, "mbx ioremap failed\n");
|
||||
return PTR_ERR(priv->mbx);
|
||||
err = PTR_ERR(priv->mbx);
|
||||
goto probe_exit_candev;
|
||||
}
|
||||
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!irq) {
|
||||
dev_err(&pdev->dev, "No irq resource\n");
|
||||
goto probe_exit;
|
||||
goto probe_exit_candev;
|
||||
}
|
||||
|
||||
priv->ndev = ndev;
|
||||
@ -966,7 +969,7 @@ probe_exit_release_clk:
|
||||
clk_put(priv->clk);
|
||||
probe_exit_candev:
|
||||
free_candev(ndev);
|
||||
probe_exit:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -367,7 +367,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
|
||||
.tseg2_max = 32,
|
||||
.sjw_max = 16,
|
||||
.brp_min = 1,
|
||||
.brp_max = 4096,
|
||||
.brp_max = 8192,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
|
@ -326,8 +326,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
|
||||
if (!ctx)
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
can_put_echo_skb(skb, priv->netdev, ctx->ndx);
|
||||
|
||||
if (cf->can_id & CAN_EFF_FLAG) {
|
||||
/* SIDH | SIDL | EIDH | EIDL
|
||||
* 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0
|
||||
@ -357,6 +355,8 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
|
||||
if (cf->can_id & CAN_RTR_FLAG)
|
||||
usb_msg.dlc |= MCBA_DLC_RTR_MASK;
|
||||
|
||||
can_put_echo_skb(skb, priv->netdev, ctx->ndx);
|
||||
|
||||
err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx);
|
||||
if (err)
|
||||
goto xmit_failed;
|
||||
|
@ -156,7 +156,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
|
||||
if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
|
||||
/* case when event time (tsw) wraps */
|
||||
if (ts < time_ref->ts_dev_1)
|
||||
delta_ts = 1 << time_ref->adapter->ts_used_bits;
|
||||
delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits);
|
||||
|
||||
/* Otherwise, sync time counter (ts_dev_2) has wrapped:
|
||||
* handle case when event time (tsn) hasn't.
|
||||
@ -168,7 +168,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
|
||||
* tsn ts
|
||||
*/
|
||||
} else if (time_ref->ts_dev_1 < ts) {
|
||||
delta_ts = -(1 << time_ref->adapter->ts_used_bits);
|
||||
delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits);
|
||||
}
|
||||
|
||||
/* add delay between last sync and event timestamps */
|
||||
|
@ -26,6 +26,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/if_bridge.h>
|
||||
@ -1837,6 +1838,16 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
|
||||
i++;
|
||||
}
|
||||
|
||||
/* The standalone PHY11G requires 300ms to be fully
|
||||
* initialized and ready for any MDIO communication after being
|
||||
* taken out of reset. For the SoC-internal GPHY variant there
|
||||
* is no (known) documentation for the minimum time after a
|
||||
* reset. Use the same value as for the standalone variant as
|
||||
* some users have reported internal PHYs not being detected
|
||||
* without any delay.
|
||||
*/
|
||||
msleep(300);
|
||||
|
||||
return 0;
|
||||
|
||||
remove_gphy:
|
||||
|
@ -2297,6 +2297,8 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
|
||||
usleep_range(10000, 20000);
|
||||
gpiod_set_value_cansleep(gpiod, 0);
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
mv88e6xxx_g1_wait_eeprom_done(chip);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,37 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
|
||||
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
|
||||
}
|
||||
|
||||
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
const unsigned long timeout = jiffies + 1 * HZ;
|
||||
u16 val;
|
||||
int err;
|
||||
|
||||
/* Wait up to 1 second for the switch to finish reading the
|
||||
* EEPROM.
|
||||
*/
|
||||
while (time_before(jiffies, timeout)) {
|
||||
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
|
||||
if (err) {
|
||||
dev_err(chip->dev, "Error reading status");
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the switch is still resetting, it may not
|
||||
* respond on the bus, and so MDIO read returns
|
||||
* 0xffff. Differentiate between that, and waiting for
|
||||
* the EEPROM to be done by bit 0 being set.
|
||||
*/
|
||||
if (val != 0xffff &&
|
||||
val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
|
||||
return;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
dev_err(chip->dev, "Timeout waiting for EEPROM done");
|
||||
}
|
||||
|
||||
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
|
||||
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
|
||||
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
|
||||
|
@ -278,6 +278,7 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
|
||||
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
|
||||
|
||||
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
|
||||
|
@ -125,11 +125,9 @@ static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
|
||||
* Offset 0x08: VTU/STU Data Register 2
|
||||
* Offset 0x09: VTU/STU Data Register 3
|
||||
*/
|
||||
|
||||
static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
|
||||
struct mv88e6xxx_vtu_entry *entry)
|
||||
static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
|
||||
u16 *regs)
|
||||
{
|
||||
u16 regs[3];
|
||||
int i;
|
||||
|
||||
/* Read all 3 VTU/STU Data registers */
|
||||
@ -142,12 +140,45 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Extract MemberTag and PortState data */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
|
||||
struct mv88e6xxx_vtu_entry *entry)
|
||||
{
|
||||
u16 regs[3];
|
||||
int err;
|
||||
int i;
|
||||
|
||||
err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Extract MemberTag data */
|
||||
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
|
||||
unsigned int member_offset = (i % 4) * 4;
|
||||
unsigned int state_offset = member_offset + 2;
|
||||
|
||||
entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
|
||||
struct mv88e6xxx_vtu_entry *entry)
|
||||
{
|
||||
u16 regs[3];
|
||||
int err;
|
||||
int i;
|
||||
|
||||
err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Extract PortState data */
|
||||
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
|
||||
unsigned int state_offset = (i % 4) * 4 + 2;
|
||||
|
||||
entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
|
||||
}
|
||||
|
||||
@ -349,6 +380,10 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mv88e6185_g1_stu_data_read(chip, entry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* VTU DBNum[3:0] are located in VTU Operation 3:0
|
||||
* VTU DBNum[7:4] are located in VTU Operation 11:8
|
||||
*/
|
||||
@ -374,11 +409,6 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
|
||||
return err;
|
||||
|
||||
if (entry->valid) {
|
||||
/* Fetch (and mask) VLAN PortState data from the STU */
|
||||
err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mv88e6185_g1_vtu_data_read(chip, entry);
|
||||
if (err)
|
||||
return err;
|
||||
@ -386,6 +416,15 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
|
||||
err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Fetch VLAN PortState data from the STU */
|
||||
err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mv88e6185_g1_stu_data_read(chip, entry);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2543,8 +2543,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
* various kernel subsystems to support the mechanics required by a
|
||||
* fixed-high-32-bit system.
|
||||
*/
|
||||
if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
|
||||
(dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
|
||||
goto err_dma;
|
||||
}
|
||||
|
@ -2312,8 +2312,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
* various kernel subsystems to support the mechanics required by a
|
||||
* fixed-high-32-bit system.
|
||||
*/
|
||||
if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
|
||||
(dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
|
||||
goto err_dma;
|
||||
}
|
||||
|
@ -2383,7 +2383,8 @@ static int b44_init_one(struct ssb_device *sdev,
|
||||
goto err_out_free_dev;
|
||||
}
|
||||
|
||||
if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
|
||||
err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
|
||||
if (err) {
|
||||
dev_err(sdev->dev,
|
||||
"Required 30BIT DMA mask unsupported by the system\n");
|
||||
goto err_out_powerdown;
|
||||
|
@ -4099,7 +4099,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
|
||||
bnxt_free_ntp_fltrs(bp, irq_re_init);
|
||||
if (irq_re_init) {
|
||||
bnxt_free_ring_stats(bp);
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET))
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
|
||||
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
|
||||
bnxt_free_port_stats(bp);
|
||||
bnxt_free_ring_grps(bp);
|
||||
bnxt_free_vnics(bp);
|
||||
@ -7757,6 +7758,7 @@ static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
|
||||
{
|
||||
u64 sw_tmp;
|
||||
|
||||
hw &= mask;
|
||||
sw_tmp = (*sw & ~mask) | hw;
|
||||
if (hw < (*sw & mask))
|
||||
sw_tmp += mask + 1;
|
||||
|
@ -2079,6 +2079,9 @@ int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
|
||||
struct hwrm_nvm_get_dev_info_input req = {0};
|
||||
int rc;
|
||||
|
||||
if (BNXT_VF(bp))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
|
||||
mutex_lock(&bp->hwrm_cmd_lock);
|
||||
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
||||
@ -2997,7 +3000,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
|
||||
/* Read A2 portion of the EEPROM */
|
||||
if (length) {
|
||||
start -= ETH_MODULE_SFF_8436_LEN;
|
||||
rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
|
||||
rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
|
||||
start, length, data);
|
||||
}
|
||||
return rc;
|
||||
|
@ -1907,6 +1907,8 @@ err_register_netdev:
|
||||
clk_disable_unprepare(priv->rclk);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
err_ncsi_dev:
|
||||
if (priv->ndev)
|
||||
ncsi_unregister_dev(priv->ndev);
|
||||
ftgmac100_destroy_mdio(netdev);
|
||||
err_setup_mdio:
|
||||
iounmap(priv->base);
|
||||
@ -1926,6 +1928,8 @@ static int ftgmac100_remove(struct platform_device *pdev)
|
||||
netdev = platform_get_drvdata(pdev);
|
||||
priv = netdev_priv(netdev);
|
||||
|
||||
if (priv->ndev)
|
||||
ncsi_unregister_dev(priv->ndev);
|
||||
unregister_netdev(netdev);
|
||||
|
||||
clk_disable_unprepare(priv->rclk);
|
||||
|
@ -16,6 +16,7 @@ config FSL_ENETC
|
||||
config FSL_ENETC_VF
|
||||
tristate "ENETC VF driver"
|
||||
depends on PCI && PCI_MSI
|
||||
select FSL_ENETC_MDIO
|
||||
select PHYLINK
|
||||
select DIMLIB
|
||||
help
|
||||
|
@ -33,7 +33,10 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
enetc_lock_mdio();
|
||||
count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
|
||||
enetc_unlock_mdio();
|
||||
|
||||
if (unlikely(!count))
|
||||
goto drop_packet_err;
|
||||
|
||||
@ -239,7 +242,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* let H/W know BD ring has been updated */
|
||||
enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
|
||||
enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
|
||||
|
||||
return count;
|
||||
|
||||
@ -262,12 +265,16 @@ static irqreturn_t enetc_msix(int irq, void *data)
|
||||
struct enetc_int_vector *v = data;
|
||||
int i;
|
||||
|
||||
enetc_lock_mdio();
|
||||
|
||||
/* disable interrupts */
|
||||
enetc_wr_reg(v->rbier, 0);
|
||||
enetc_wr_reg(v->ricr1, v->rx_ictt);
|
||||
enetc_wr_reg_hot(v->rbier, 0);
|
||||
enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
|
||||
|
||||
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
|
||||
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
|
||||
enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
|
||||
|
||||
enetc_unlock_mdio();
|
||||
|
||||
napi_schedule(&v->napi);
|
||||
|
||||
@ -334,19 +341,23 @@ static int enetc_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
v->rx_napi_work = false;
|
||||
|
||||
enetc_lock_mdio();
|
||||
|
||||
/* enable interrupts */
|
||||
enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
|
||||
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
|
||||
|
||||
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
|
||||
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
|
||||
ENETC_TBIER_TXTIE);
|
||||
enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
|
||||
ENETC_TBIER_TXTIE);
|
||||
|
||||
enetc_unlock_mdio();
|
||||
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
|
||||
{
|
||||
int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
|
||||
int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
|
||||
|
||||
return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
|
||||
}
|
||||
@ -386,7 +397,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
|
||||
|
||||
i = tx_ring->next_to_clean;
|
||||
tx_swbd = &tx_ring->tx_swbd[i];
|
||||
|
||||
enetc_lock_mdio();
|
||||
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
|
||||
enetc_unlock_mdio();
|
||||
|
||||
do_tstamp = false;
|
||||
|
||||
@ -429,16 +443,20 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
|
||||
tx_swbd = tx_ring->tx_swbd;
|
||||
}
|
||||
|
||||
enetc_lock_mdio();
|
||||
|
||||
/* BD iteration loop end */
|
||||
if (is_eof) {
|
||||
tx_frm_cnt++;
|
||||
/* re-arm interrupt source */
|
||||
enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
|
||||
BIT(16 + tx_ring->index));
|
||||
enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
|
||||
BIT(16 + tx_ring->index));
|
||||
}
|
||||
|
||||
if (unlikely(!bds_to_clean))
|
||||
bds_to_clean = enetc_bd_ready_count(tx_ring, i);
|
||||
|
||||
enetc_unlock_mdio();
|
||||
}
|
||||
|
||||
tx_ring->next_to_clean = i;
|
||||
@ -515,8 +533,6 @@ static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
|
||||
if (likely(j)) {
|
||||
rx_ring->next_to_alloc = i; /* keep track from page reuse */
|
||||
rx_ring->next_to_use = i;
|
||||
/* update ENETC's consumer index */
|
||||
enetc_wr_reg(rx_ring->rcir, i);
|
||||
}
|
||||
|
||||
return j;
|
||||
@ -534,8 +550,8 @@ static void enetc_get_rx_tstamp(struct net_device *ndev,
|
||||
u64 tstamp;
|
||||
|
||||
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
|
||||
lo = enetc_rd(hw, ENETC_SICTR0);
|
||||
hi = enetc_rd(hw, ENETC_SICTR1);
|
||||
lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
|
||||
hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
|
||||
rxbd = enetc_rxbd_ext(rxbd);
|
||||
tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
|
||||
if (lo <= tstamp_lo)
|
||||
@ -684,23 +700,31 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
|
||||
u32 bd_status;
|
||||
u16 size;
|
||||
|
||||
enetc_lock_mdio();
|
||||
|
||||
if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
|
||||
int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
|
||||
|
||||
/* update ENETC's consumer index */
|
||||
enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
|
||||
cleaned_cnt -= count;
|
||||
}
|
||||
|
||||
rxbd = enetc_rxbd(rx_ring, i);
|
||||
bd_status = le32_to_cpu(rxbd->r.lstatus);
|
||||
if (!bd_status)
|
||||
if (!bd_status) {
|
||||
enetc_unlock_mdio();
|
||||
break;
|
||||
}
|
||||
|
||||
enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
|
||||
enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
|
||||
dma_rmb(); /* for reading other rxbd fields */
|
||||
size = le16_to_cpu(rxbd->r.buf_len);
|
||||
skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
enetc_unlock_mdio();
|
||||
break;
|
||||
}
|
||||
|
||||
enetc_get_offloads(rx_ring, rxbd, skb);
|
||||
|
||||
@ -712,6 +736,7 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
|
||||
|
||||
if (unlikely(bd_status &
|
||||
ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
|
||||
enetc_unlock_mdio();
|
||||
dev_kfree_skb(skb);
|
||||
while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
|
||||
dma_rmb();
|
||||
@ -751,6 +776,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
|
||||
|
||||
enetc_process_skb(rx_ring, skb);
|
||||
|
||||
enetc_unlock_mdio();
|
||||
|
||||
napi_gro_receive(napi, skb);
|
||||
|
||||
rx_frm_cnt++;
|
||||
@ -1225,6 +1252,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
|
||||
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
|
||||
|
||||
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
|
||||
enetc_wr(hw, ENETC_SIRXIDR, rx_ring->next_to_use);
|
||||
|
||||
/* enable ring */
|
||||
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
|
||||
|
@ -324,14 +324,100 @@ struct enetc_hw {
|
||||
void __iomem *global;
|
||||
};
|
||||
|
||||
/* general register accessors */
|
||||
#define enetc_rd_reg(reg) ioread32((reg))
|
||||
#define enetc_wr_reg(reg, val) iowrite32((val), (reg))
|
||||
/* ENETC register accessors */
|
||||
|
||||
/* MDIO issue workaround (on LS1028A) -
|
||||
* Due to a hardware issue, an access to MDIO registers
|
||||
* that is concurrent with other ENETC register accesses
|
||||
* may lead to the MDIO access being dropped or corrupted.
|
||||
* To protect the MDIO accesses a readers-writers locking
|
||||
* scheme is used, where the MDIO register accesses are
|
||||
* protected by write locks to insure exclusivity, while
|
||||
* the remaining ENETC registers are accessed under read
|
||||
* locks since they only compete with MDIO accesses.
|
||||
*/
|
||||
extern rwlock_t enetc_mdio_lock;
|
||||
|
||||
/* use this locking primitive only on the fast datapath to
|
||||
* group together multiple non-MDIO register accesses to
|
||||
* minimize the overhead of the lock
|
||||
*/
|
||||
static inline void enetc_lock_mdio(void)
|
||||
{
|
||||
read_lock(&enetc_mdio_lock);
|
||||
}
|
||||
|
||||
static inline void enetc_unlock_mdio(void)
|
||||
{
|
||||
read_unlock(&enetc_mdio_lock);
|
||||
}
|
||||
|
||||
/* use these accessors only on the fast datapath under
|
||||
* the enetc_lock_mdio() locking primitive to minimize
|
||||
* the overhead of the lock
|
||||
*/
|
||||
static inline u32 enetc_rd_reg_hot(void __iomem *reg)
|
||||
{
|
||||
lockdep_assert_held(&enetc_mdio_lock);
|
||||
|
||||
return ioread32(reg);
|
||||
}
|
||||
|
||||
static inline void enetc_wr_reg_hot(void __iomem *reg, u32 val)
|
||||
{
|
||||
lockdep_assert_held(&enetc_mdio_lock);
|
||||
|
||||
iowrite32(val, reg);
|
||||
}
|
||||
|
||||
/* internal helpers for the MDIO w/a */
|
||||
static inline u32 _enetc_rd_reg_wa(void __iomem *reg)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
enetc_lock_mdio();
|
||||
val = ioread32(reg);
|
||||
enetc_unlock_mdio();
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void _enetc_wr_reg_wa(void __iomem *reg, u32 val)
|
||||
{
|
||||
enetc_lock_mdio();
|
||||
iowrite32(val, reg);
|
||||
enetc_unlock_mdio();
|
||||
}
|
||||
|
||||
static inline u32 _enetc_rd_mdio_reg_wa(void __iomem *reg)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
write_lock_irqsave(&enetc_mdio_lock, flags);
|
||||
val = ioread32(reg);
|
||||
write_unlock_irqrestore(&enetc_mdio_lock, flags);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void _enetc_wr_mdio_reg_wa(void __iomem *reg, u32 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&enetc_mdio_lock, flags);
|
||||
iowrite32(val, reg);
|
||||
write_unlock_irqrestore(&enetc_mdio_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef ioread64
|
||||
#define enetc_rd_reg64(reg) ioread64((reg))
|
||||
static inline u64 _enetc_rd_reg64(void __iomem *reg)
|
||||
{
|
||||
return ioread64(reg);
|
||||
}
|
||||
#else
|
||||
/* using this to read out stats on 32b systems */
|
||||
static inline u64 enetc_rd_reg64(void __iomem *reg)
|
||||
static inline u64 _enetc_rd_reg64(void __iomem *reg)
|
||||
{
|
||||
u32 low, high, tmp;
|
||||
|
||||
@ -345,12 +431,29 @@ static inline u64 enetc_rd_reg64(void __iomem *reg)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline u64 _enetc_rd_reg64_wa(void __iomem *reg)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
enetc_lock_mdio();
|
||||
val = _enetc_rd_reg64(reg);
|
||||
enetc_unlock_mdio();
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/* general register accessors */
|
||||
#define enetc_rd_reg(reg) _enetc_rd_reg_wa((reg))
|
||||
#define enetc_wr_reg(reg, val) _enetc_wr_reg_wa((reg), (val))
|
||||
#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off))
|
||||
#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val)
|
||||
#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off))
|
||||
#define enetc_rd64(hw, off) _enetc_rd_reg64_wa((hw)->reg + (off))
|
||||
/* port register accessors - PF only */
|
||||
#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off))
|
||||
#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val)
|
||||
#define enetc_port_rd_mdio(hw, off) _enetc_rd_mdio_reg_wa((hw)->port + (off))
|
||||
#define enetc_port_wr_mdio(hw, off, val) _enetc_wr_mdio_reg_wa(\
|
||||
(hw)->port + (off), val)
|
||||
/* global register accessors - PF only */
|
||||
#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off))
|
||||
#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val)
|
||||
|
@ -16,13 +16,13 @@
|
||||
|
||||
static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
|
||||
{
|
||||
return enetc_port_rd(mdio_priv->hw, mdio_priv->mdio_base + off);
|
||||
return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
|
||||
}
|
||||
|
||||
static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
|
||||
u32 val)
|
||||
{
|
||||
enetc_port_wr(mdio_priv->hw, mdio_priv->mdio_base + off, val);
|
||||
enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
|
||||
}
|
||||
|
||||
#define enetc_mdio_rd(mdio_priv, off) \
|
||||
@ -174,3 +174,7 @@ struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
|
||||
return hw;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enetc_hw_alloc);
|
||||
|
||||
/* Lock for MDIO access errata on LS1028A */
|
||||
DEFINE_RWLOCK(enetc_mdio_lock);
|
||||
EXPORT_SYMBOL_GPL(enetc_mdio_lock);
|
||||
|
@ -1808,7 +1808,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
|
||||
int ret = 0, frame_start, frame_addr, frame_op;
|
||||
bool is_c45 = !!(regnum & MII_ADDR_C45);
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -1867,11 +1867,9 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
|
||||
int ret, frame_start, frame_addr;
|
||||
bool is_c45 = !!(regnum & MII_ADDR_C45);
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
if (is_c45) {
|
||||
frame_start = FEC_MMFR_ST_C45;
|
||||
@ -2275,7 +2273,7 @@ static void fec_enet_get_regs(struct net_device *ndev,
|
||||
u32 i, off;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
@ -2976,7 +2974,7 @@ fec_enet_open(struct net_device *ndev)
|
||||
int ret;
|
||||
bool reset_again;
|
||||
|
||||
ret = pm_runtime_get_sync(&fep->pdev->dev);
|
||||
ret = pm_runtime_resume_and_get(&fep->pdev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -3770,7 +3768,7 @@ fec_drv_remove(struct platform_device *pdev)
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
ret = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -2287,6 +2287,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
|
||||
dma_sync_single_for_cpu(dev->dev.parent,
|
||||
rx_desc->buf_phys_addr,
|
||||
len, dma_dir);
|
||||
rx_desc->buf_phys_addr = 0;
|
||||
|
||||
if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
|
||||
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
|
||||
@ -2295,8 +2296,8 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
|
||||
skb_frag_size_set(frag, data_len);
|
||||
__skb_frag_set_page(frag, page);
|
||||
sinfo->nr_frags++;
|
||||
|
||||
rx_desc->buf_phys_addr = 0;
|
||||
} else {
|
||||
page_pool_put_full_page(rxq->page_pool, page, true);
|
||||
}
|
||||
*size -= len;
|
||||
}
|
||||
|
@ -676,7 +676,8 @@ static int prestera_pci_probe(struct pci_dev *pdev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) {
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "fail to set DMA mask\n");
|
||||
goto err_dma_mask;
|
||||
}
|
||||
@ -702,8 +703,10 @@ static int prestera_pci_probe(struct pci_dev *pdev,
|
||||
dev_info(fw->dev.dev, "Prestera FW is ready\n");
|
||||
|
||||
fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
|
||||
if (!fw->wq)
|
||||
if (!fw->wq) {
|
||||
err = -ENOMEM;
|
||||
goto err_wq_alloc;
|
||||
}
|
||||
|
||||
INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
|
||||
|
||||
|
@ -966,6 +966,7 @@ static int mtk_star_enable(struct net_device *ndev)
|
||||
mtk_star_adjust_link, 0, priv->phy_intf);
|
||||
if (!priv->phydev) {
|
||||
netdev_err(ndev, "failed to connect to PHY\n");
|
||||
ret = -ENODEV;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
@ -1053,7 +1054,7 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
|
||||
err_drop_packet:
|
||||
dev_kfree_skb(skb);
|
||||
ndev->stats.tx_dropped++;
|
||||
return NETDEV_TX_BUSY;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Returns the number of bytes sent or a negative number on the first
|
||||
|
@ -1864,8 +1864,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
||||
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
|
||||
#define INIT_HCA_MCAST_OFFSET 0x0c0
|
||||
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
|
||||
#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
|
||||
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
|
||||
#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
|
||||
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17)
|
||||
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
|
||||
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
|
||||
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
|
||||
@ -1873,7 +1873,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
||||
#define INIT_HCA_DRIVER_VERSION_SZ 0x40
|
||||
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
|
||||
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
|
||||
#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
|
||||
#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13)
|
||||
#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
|
||||
#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
|
||||
#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
|
||||
|
@ -182,8 +182,8 @@ struct mlx4_init_hca_param {
|
||||
u64 cmpt_base;
|
||||
u64 mtt_base;
|
||||
u64 global_caps;
|
||||
u16 log_mc_entry_sz;
|
||||
u16 log_mc_hash_sz;
|
||||
u8 log_mc_entry_sz;
|
||||
u8 log_mc_hash_sz;
|
||||
u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */
|
||||
u8 log_num_qps;
|
||||
u8 log_num_srqs;
|
||||
|
@ -187,7 +187,7 @@ static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
/* A given netdev is not a representor or not a slave of LAG configuration */
|
||||
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
|
||||
if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
|
||||
return false;
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
|
@ -64,13 +64,13 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
|
||||
if (!spec)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Action to copy 7 bit ipsec_syndrome to regB[0:6] */
|
||||
/* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
|
||||
MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
|
||||
MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
|
||||
MLX5_SET(copy_action_in, action, src_offset, 0);
|
||||
MLX5_SET(copy_action_in, action, length, 7);
|
||||
MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
|
||||
MLX5_SET(copy_action_in, action, dst_offset, 0);
|
||||
MLX5_SET(copy_action_in, action, dst_offset, 24);
|
||||
|
||||
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
|
||||
1, action);
|
||||
@ -488,13 +488,13 @@ static int rx_add_rule(struct mlx5e_priv *priv,
|
||||
|
||||
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
|
||||
|
||||
/* Set 1 bit ipsec marker */
|
||||
/* Set 24 bit ipsec_obj_id */
|
||||
/* Set bit[31] ipsec marker */
|
||||
/* Set bit[23-0] ipsec_obj_id */
|
||||
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
|
||||
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
|
||||
MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1);
|
||||
MLX5_SET(set_action_in, action, offset, 7);
|
||||
MLX5_SET(set_action_in, action, length, 25);
|
||||
MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
|
||||
MLX5_SET(set_action_in, action, offset, 0);
|
||||
MLX5_SET(set_action_in, action, length, 32);
|
||||
|
||||
modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
|
||||
1, action);
|
||||
|
@ -453,7 +453,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
|
||||
struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
|
||||
u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
|
||||
struct mlx5e_priv *priv;
|
||||
struct xfrm_offload *xo;
|
||||
struct xfrm_state *xs;
|
||||
@ -481,7 +480,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
|
||||
xo = xfrm_offload(skb);
|
||||
xo->flags = CRYPTO_DONE;
|
||||
|
||||
switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
|
||||
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
|
||||
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
|
||||
xo->status = CRYPTO_SUCCESS;
|
||||
if (WARN_ON_ONCE(priv->ipsec->no_trailer))
|
||||
|
@ -39,9 +39,10 @@
|
||||
#include "en.h"
|
||||
#include "en/txrx.h"
|
||||
|
||||
#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80)
|
||||
#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
|
||||
#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
|
||||
/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
|
||||
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
|
||||
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
|
||||
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
|
||||
|
||||
struct mlx5e_accel_tx_ipsec_state {
|
||||
struct xfrm_offload *xo;
|
||||
@ -78,7 +79,7 @@ static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_st
|
||||
|
||||
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
|
||||
return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
|
||||
}
|
||||
|
||||
static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
|
||||
|
@ -476,19 +476,22 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
|
||||
|
||||
depth += sizeof(struct tcphdr);
|
||||
|
||||
if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT))
|
||||
if (unlikely(!sk))
|
||||
return;
|
||||
|
||||
if (unlikely(sk->sk_state == TCP_TIME_WAIT))
|
||||
goto unref;
|
||||
|
||||
if (unlikely(!resync_queue_get_psv(sk)))
|
||||
return;
|
||||
|
||||
skb->sk = sk;
|
||||
skb->destructor = sock_edemux;
|
||||
goto unref;
|
||||
|
||||
seq = th->seq;
|
||||
datalen = skb->len - depth;
|
||||
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
|
||||
rq->stats->tls_resync_req_start++;
|
||||
|
||||
unref:
|
||||
sock_gen_put(sk);
|
||||
}
|
||||
|
||||
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
|
||||
|
@ -5229,8 +5229,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||
|
||||
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
|
||||
MLX5_FLOW_NAMESPACE_KERNEL);
|
||||
if (IS_ERR(tc->ct))
|
||||
if (IS_ERR(tc->ct)) {
|
||||
err = PTR_ERR(tc->ct);
|
||||
goto err_ct;
|
||||
}
|
||||
|
||||
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
|
||||
err = register_netdevice_notifier_dev_net(priv->netdev,
|
||||
|
@ -283,6 +283,9 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
|
||||
|
||||
reg_b = be32_to_cpu(cqe->ft_metadata);
|
||||
|
||||
if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS))
|
||||
return false;
|
||||
|
||||
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
|
||||
if (chain)
|
||||
return true;
|
||||
|
@ -144,7 +144,9 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
|
||||
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
|
||||
}
|
||||
|
||||
/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
|
||||
/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
|
||||
* need to set L3 checksum flag for IPsec
|
||||
*/
|
||||
static void
|
||||
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
@ -154,7 +156,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
sq->stats->csum_partial_inner++;
|
||||
} else {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
}
|
||||
@ -162,11 +163,6 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
static inline void
|
||||
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
@ -177,6 +173,9 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
} else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
|
||||
} else
|
||||
sq->stats->csum_none++;
|
||||
}
|
||||
|
@ -1142,6 +1142,10 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
|
||||
if (!vport->qos.enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
|
||||
|
||||
return mlx5_modify_scheduling_element_cmd(esw->dev,
|
||||
@ -1408,6 +1412,7 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
|
||||
int i;
|
||||
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
memset(&vport->qos, 0, sizeof(vport->qos));
|
||||
memset(&vport->info, 0, sizeof(vport->info));
|
||||
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
|
||||
}
|
||||
@ -2221,12 +2226,15 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
|
||||
max_guarantee = evport->info.min_rate;
|
||||
}
|
||||
|
||||
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
|
||||
if (max_guarantee)
|
||||
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
||||
static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
|
||||
{
|
||||
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
|
||||
u32 divider = calculate_vports_min_rate_divider(esw);
|
||||
struct mlx5_vport *evport;
|
||||
u32 vport_max_rate;
|
||||
u32 vport_min_rate;
|
||||
@ -2239,9 +2247,9 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
||||
continue;
|
||||
vport_min_rate = evport->info.min_rate;
|
||||
vport_max_rate = evport->info.max_rate;
|
||||
bw_share = MLX5_MIN_BW_SHARE;
|
||||
bw_share = 0;
|
||||
|
||||
if (vport_min_rate)
|
||||
if (divider)
|
||||
bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
|
||||
divider,
|
||||
fw_max_bw_share);
|
||||
@ -2266,7 +2274,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
|
||||
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
|
||||
u32 fw_max_bw_share;
|
||||
u32 previous_min_rate;
|
||||
u32 divider;
|
||||
bool min_rate_supported;
|
||||
bool max_rate_supported;
|
||||
int err = 0;
|
||||
@ -2291,8 +2298,7 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
|
||||
|
||||
previous_min_rate = evport->info.min_rate;
|
||||
evport->info.min_rate = min_rate;
|
||||
divider = calculate_vports_min_rate_divider(esw);
|
||||
err = normalize_vports_min_rate(esw, divider);
|
||||
err = normalize_vports_min_rate(esw);
|
||||
if (err) {
|
||||
evport->info.min_rate = previous_min_rate;
|
||||
goto unlock;
|
||||
|
@ -534,6 +534,13 @@ static void del_sw_hw_rule(struct fs_node *node)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
|
||||
--fte->dests_size) {
|
||||
fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
|
||||
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
|
||||
--fte->dests_size) {
|
||||
fte->modify_mask |=
|
||||
|
@ -6,6 +6,7 @@
|
||||
config MLXSW_CORE
|
||||
tristate "Mellanox Technologies Switch ASICs support"
|
||||
select NET_DEVLINK
|
||||
select MLXFW
|
||||
help
|
||||
This driver supports Mellanox Technologies Switch ASICs family.
|
||||
|
||||
@ -82,7 +83,6 @@ config MLXSW_SPECTRUM
|
||||
select GENERIC_ALLOCATOR
|
||||
select PARMAN
|
||||
select OBJAGG
|
||||
select MLXFW
|
||||
imply PTP_1588_CLOCK
|
||||
select NET_PTP_CLASSIFY if PTP_1588_CLOCK
|
||||
default m
|
||||
|
@ -571,7 +571,8 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
|
||||
if (trans->core->fw_flash_in_progress)
|
||||
timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
|
||||
|
||||
queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
|
||||
queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
|
||||
timeout << trans->retries);
|
||||
}
|
||||
|
||||
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
|
||||
|
@ -148,7 +148,8 @@ static void lan743x_intr_software_isr(void *context)
|
||||
|
||||
int_sts = lan743x_csr_read(adapter, INT_STS);
|
||||
if (int_sts & INT_BIT_SW_GP_) {
|
||||
lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
|
||||
/* disable the interrupt to prevent repeated re-triggering */
|
||||
lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
|
||||
intr->software_isr_flag = 1;
|
||||
}
|
||||
}
|
||||
@ -1307,13 +1308,13 @@ clean_up_data_descriptor:
|
||||
goto clear_active;
|
||||
|
||||
if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED)) {
|
||||
dev_kfree_skb(buffer_info->skb);
|
||||
dev_kfree_skb_any(buffer_info->skb);
|
||||
goto clear_skb;
|
||||
}
|
||||
|
||||
if (cleanup) {
|
||||
lan743x_ptp_unrequest_tx_timestamp(tx->adapter);
|
||||
dev_kfree_skb(buffer_info->skb);
|
||||
dev_kfree_skb_any(buffer_info->skb);
|
||||
} else {
|
||||
ignore_sync = (buffer_info->flags &
|
||||
TX_BUFFER_INFO_FLAG_IGNORE_SYNC) != 0;
|
||||
@ -1623,7 +1624,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
|
||||
if (required_number_of_descriptors >
|
||||
lan743x_tx_get_avail_desc(tx)) {
|
||||
if (required_number_of_descriptors > (tx->ring_size - 1)) {
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_irq(skb);
|
||||
} else {
|
||||
/* save to overflow buffer */
|
||||
tx->overflow_skb = skb;
|
||||
@ -1656,7 +1657,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
|
||||
start_frame_length,
|
||||
do_timestamp,
|
||||
skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_irq(skb);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@ -1675,7 +1676,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
|
||||
* frame assembler clean up was performed inside
|
||||
* lan743x_tx_frame_add_fragment
|
||||
*/
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_irq(skb);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
@ -1647,9 +1647,9 @@ static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
|
||||
ilog2(rounded_conn_num));
|
||||
|
||||
STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
|
||||
p_hwfn->p_cxt_mngr->first_free);
|
||||
p_hwfn->p_cxt_mngr->src_t2.first_free);
|
||||
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
|
||||
p_hwfn->p_cxt_mngr->last_free);
|
||||
p_hwfn->p_cxt_mngr->src_t2.last_free);
|
||||
}
|
||||
|
||||
/* Timers PF */
|
||||
|
@ -326,9 +326,6 @@ struct qed_cxt_mngr {
|
||||
|
||||
/* SRC T2 */
|
||||
struct qed_src_t2 src_t2;
|
||||
u32 t2_num_pages;
|
||||
u64 first_free;
|
||||
u64 last_free;
|
||||
|
||||
/* total number of SRQ's for this hwfn */
|
||||
u32 srq_count;
|
||||
|
@ -2754,14 +2754,18 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
||||
iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
|
||||
sizeof(*iwarp_info->partial_fpdus),
|
||||
GFP_KERNEL);
|
||||
if (!iwarp_info->partial_fpdus)
|
||||
if (!iwarp_info->partial_fpdus) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
|
||||
|
||||
iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
|
||||
if (!iwarp_info->mpa_intermediate_buf)
|
||||
if (!iwarp_info->mpa_intermediate_buf) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* The mpa_bufs array serves for pending RX packets received on the
|
||||
* mpa ll2 that don't have place on the tx ring and require later
|
||||
@ -2771,8 +2775,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
||||
iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
|
||||
sizeof(*iwarp_info->mpa_bufs),
|
||||
GFP_KERNEL);
|
||||
if (!iwarp_info->mpa_bufs)
|
||||
if (!iwarp_info->mpa_bufs) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
|
||||
INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
|
||||
|
@ -2231,7 +2231,8 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
|
||||
|
||||
/* Boot either flash image or firmware image from host file system */
|
||||
if (qlcnic_load_fw_file == 1) {
|
||||
if (qlcnic_83xx_load_fw_image_from_host(adapter))
|
||||
err = qlcnic_83xx_load_fw_image_from_host(adapter);
|
||||
if (err)
|
||||
return err;
|
||||
} else {
|
||||
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
|
||||
|
@ -188,6 +188,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
|
||||
|
||||
dev = skb->dev;
|
||||
port = rmnet_get_port_rcu(dev);
|
||||
if (unlikely(!port)) {
|
||||
atomic_long_inc(&skb->dev->rx_nohandler);
|
||||
kfree_skb(skb);
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (port->rmnet_mode) {
|
||||
case RMNET_EPMODE_VND:
|
||||
|
@ -113,8 +113,10 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
|
||||
/* Enable TX clock */
|
||||
if (dwmac->data->tx_clk_en) {
|
||||
dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
|
||||
if (IS_ERR(dwmac->tx_clk))
|
||||
if (IS_ERR(dwmac->tx_clk)) {
|
||||
ret = PTR_ERR(dwmac->tx_clk);
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
|
||||
clk_prepare_enable(dwmac->tx_clk);
|
||||
|
||||
|
@ -23,7 +23,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
|
||||
|
||||
return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
|
||||
!(value & DMA_BUS_MODE_SFT_RESET),
|
||||
10000, 100000);
|
||||
10000, 200000);
|
||||
}
|
||||
|
||||
/* CSR1 enables the transmit DMA to check for new descriptor */
|
||||
|
@ -5247,6 +5247,7 @@ int stmmac_resume(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
stmmac_reset_queues_param(priv);
|
||||
@ -5262,6 +5263,7 @@ int stmmac_resume(struct device *dev)
|
||||
stmmac_enable_all_queues(priv);
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
rtnl_unlock();
|
||||
|
||||
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
|
||||
rtnl_lock();
|
||||
|
@ -1001,8 +1001,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
|
||||
if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
|
||||
dev_err(dev, "Failed to register ptp clk %ld\n",
|
||||
PTR_ERR(cpts->ptp_clock));
|
||||
if (!cpts->ptp_clock)
|
||||
ret = -ENODEV;
|
||||
ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
|
||||
goto refclk_disable;
|
||||
}
|
||||
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
|
||||
|
@ -838,9 +838,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
||||
if (ret < 0)
|
||||
goto err_cleanup;
|
||||
|
||||
if (cpts_register(cpsw->cpts))
|
||||
dev_err(priv->dev, "error registering cpts device\n");
|
||||
|
||||
if (cpsw->cpts) {
|
||||
if (cpts_register(cpsw->cpts))
|
||||
dev_err(priv->dev, "error registering cpts device\n");
|
||||
else
|
||||
writel(0x10, &cpsw->wr_regs->misc_en);
|
||||
}
|
||||
}
|
||||
|
||||
cpsw_restore(priv);
|
||||
@ -1631,6 +1634,7 @@ static int cpsw_probe(struct platform_device *pdev)
|
||||
CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
|
||||
if (!ndev) {
|
||||
dev_err(dev, "error allocating net_device\n");
|
||||
ret = -ENOMEM;
|
||||
goto clean_cpts;
|
||||
}
|
||||
|
||||
@ -1716,7 +1720,6 @@ static int cpsw_probe(struct platform_device *pdev)
|
||||
|
||||
/* Enable misc CPTS evnt_pend IRQ */
|
||||
cpts_set_irqpoll(cpsw->cpts, false);
|
||||
writel(0x10, &cpsw->wr_regs->misc_en);
|
||||
|
||||
skip_cpts:
|
||||
cpsw_notice(priv, probe,
|
||||
|
@ -873,8 +873,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
|
||||
if (ret < 0)
|
||||
goto err_cleanup;
|
||||
|
||||
if (cpts_register(cpsw->cpts))
|
||||
dev_err(priv->dev, "error registering cpts device\n");
|
||||
if (cpsw->cpts) {
|
||||
if (cpts_register(cpsw->cpts))
|
||||
dev_err(priv->dev, "error registering cpts device\n");
|
||||
else
|
||||
writel(0x10, &cpsw->wr_regs->misc_en);
|
||||
}
|
||||
|
||||
napi_enable(&cpsw->napi_rx);
|
||||
napi_enable(&cpsw->napi_tx);
|
||||
@ -2006,7 +2010,6 @@ static int cpsw_probe(struct platform_device *pdev)
|
||||
|
||||
/* Enable misc CPTS evnt_pend IRQ */
|
||||
cpts_set_irqpoll(cpsw->cpts, false);
|
||||
writel(0x10, &cpsw->wr_regs->misc_en);
|
||||
|
||||
skip_cpts:
|
||||
ret = cpsw_register_notifiers(cpsw);
|
||||
|
@ -224,8 +224,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
|
||||
if (ip_tunnel_collect_metadata() || gs->collect_md) {
|
||||
__be16 flags;
|
||||
|
||||
flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
|
||||
(gnvh->oam ? TUNNEL_OAM : 0) |
|
||||
flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) |
|
||||
(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
|
||||
|
||||
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
|
||||
|
@ -362,22 +362,31 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
|
||||
return trans;
|
||||
}
|
||||
|
||||
/* Free a previously-allocated transaction (used only in case of error) */
|
||||
/* Free a previously-allocated transaction */
|
||||
void gsi_trans_free(struct gsi_trans *trans)
|
||||
{
|
||||
refcount_t *refcount = &trans->refcount;
|
||||
struct gsi_trans_info *trans_info;
|
||||
bool last;
|
||||
|
||||
if (!refcount_dec_and_test(&trans->refcount))
|
||||
/* We must hold the lock to release the last reference */
|
||||
if (refcount_dec_not_one(refcount))
|
||||
return;
|
||||
|
||||
trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
|
||||
|
||||
spin_lock_bh(&trans_info->spinlock);
|
||||
|
||||
list_del(&trans->links);
|
||||
/* Reference might have been added before we got the lock */
|
||||
last = refcount_dec_and_test(refcount);
|
||||
if (last)
|
||||
list_del(&trans->links);
|
||||
|
||||
spin_unlock_bh(&trans_info->spinlock);
|
||||
|
||||
if (!last)
|
||||
return;
|
||||
|
||||
ipa_gsi_trans_release(trans);
|
||||
|
||||
/* Releasing the reserved TREs implicitly frees the sgl[] and
|
||||
|
@ -96,6 +96,7 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
|
||||
.open = simple_open,
|
||||
.write = nsim_dev_take_snapshot_write,
|
||||
.llseek = generic_file_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static ssize_t nsim_dev_trap_fa_cookie_read(struct file *file,
|
||||
@ -188,6 +189,7 @@ static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
|
||||
.read = nsim_dev_trap_fa_cookie_read,
|
||||
.write = nsim_dev_trap_fa_cookie_write,
|
||||
.llseek = generic_file_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
|
||||
|
@ -261,6 +261,7 @@ static const struct file_operations nsim_dev_health_break_fops = {
|
||||
.open = simple_open,
|
||||
.write = nsim_dev_health_break_write,
|
||||
.llseek = generic_file_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
|
||||
|
@ -124,6 +124,7 @@ static const struct file_operations nsim_udp_tunnels_info_reset_fops = {
|
||||
.open = simple_open,
|
||||
.write = nsim_udp_tunnels_info_reset_write,
|
||||
.llseek = generic_file_llseek,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
|
||||
|
@ -981,7 +981,6 @@ int vsc8584_macsec_init(struct phy_device *phydev)
|
||||
|
||||
switch (phydev->phy_id & phydev->drv->phy_id_mask) {
|
||||
case PHY_ID_VSC856X:
|
||||
case PHY_ID_VSC8575:
|
||||
case PHY_ID_VSC8582:
|
||||
case PHY_ID_VSC8584:
|
||||
INIT_LIST_HEAD(&vsc8531->macsec_flows);
|
||||
|
@ -291,8 +291,10 @@ static int smsc_phy_probe(struct phy_device *phydev)
|
||||
return ret;
|
||||
|
||||
ret = clk_set_rate(priv->refclk, 50 * 1000 * 1000);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
clk_disable_unprepare(priv->refclk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -197,7 +197,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
}
|
||||
|
||||
/* enable ethernet mode (?) */
|
||||
if (cx82310_enable_ethernet(dev))
|
||||
ret = cx82310_enable_ethernet(dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* get the MAC address */
|
||||
|
@ -1070,7 +1070,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9011, 4)},
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9021, 1)},
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9022, 2)},
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
|
||||
{QMI_QUIRK_SET_DTR(0x05c6, 0x9025, 4)}, /* Alcatel-sbell ASB TL131 TDD LTE (China Mobile) */
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9026, 3)},
|
||||
{QMI_FIXED_INTF(0x05c6, 0x902e, 5)},
|
||||
{QMI_FIXED_INTF(0x05c6, 0x9031, 5)},
|
||||
|
@ -386,6 +386,27 @@ static inline int pm_runtime_get_sync(struct device *dev)
|
||||
return __pm_runtime_resume(dev, RPM_GET_PUT);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
|
||||
* @dev: Target device.
|
||||
*
|
||||
* Resume @dev synchronously and if that is successful, increment its runtime
|
||||
* PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
|
||||
* incremented or a negative error code otherwise.
|
||||
*/
|
||||
static inline int pm_runtime_resume_and_get(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __pm_runtime_resume(dev, RPM_GET_PUT);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
|
||||
* @dev: Target device.
|
||||
|
@ -478,9 +478,11 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
|
||||
const void *from, int len,
|
||||
__be16 flags)
|
||||
{
|
||||
memcpy(ip_tunnel_info_opts(info), from, len);
|
||||
info->options_len = len;
|
||||
info->key.tun_flags |= flags;
|
||||
if (len > 0) {
|
||||
memcpy(ip_tunnel_info_opts(info), from, len);
|
||||
info->key.tun_flags |= flags;
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
|
||||
@ -526,7 +528,6 @@ static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
|
||||
__be16 flags)
|
||||
{
|
||||
info->options_len = 0;
|
||||
info->key.tun_flags |= flags;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_INET */
|
||||
|
@ -108,5 +108,35 @@ out_rcu_unlock:
|
||||
rcu_read_unlock();
|
||||
inet_frag_put(&fq->q);
|
||||
}
|
||||
|
||||
/* Check if the upper layer header is truncated in the first fragment. */
|
||||
static inline bool
|
||||
ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp)
|
||||
{
|
||||
u8 nexthdr = *nexthdrp;
|
||||
__be16 frag_off;
|
||||
int offset;
|
||||
|
||||
offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off);
|
||||
if (offset < 0 || (frag_off & htons(IP6_OFFSET)))
|
||||
return false;
|
||||
switch (nexthdr) {
|
||||
case NEXTHDR_TCP:
|
||||
offset += sizeof(struct tcphdr);
|
||||
break;
|
||||
case NEXTHDR_UDP:
|
||||
offset += sizeof(struct udphdr);
|
||||
break;
|
||||
case NEXTHDR_ICMP:
|
||||
offset += sizeof(struct icmp6hdr);
|
||||
break;
|
||||
default:
|
||||
offset += 1;
|
||||
}
|
||||
if (offset > skb->len)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -204,6 +204,7 @@ struct neigh_table {
|
||||
int (*pconstructor)(struct pneigh_entry *);
|
||||
void (*pdestructor)(struct pneigh_entry *);
|
||||
void (*proxy_redo)(struct sk_buff *skb);
|
||||
int (*is_multicast)(const void *pkey);
|
||||
bool (*allow_add)(const struct net_device *dev,
|
||||
struct netlink_ext_ack *extack);
|
||||
char *id;
|
||||
|
@ -300,7 +300,8 @@ enum tls_offload_sync_type {
|
||||
#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
|
||||
struct tls_offload_resync_async {
|
||||
atomic64_t req;
|
||||
u32 loglen;
|
||||
u16 loglen;
|
||||
u16 rcd_delta;
|
||||
u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
|
||||
};
|
||||
|
||||
@ -471,6 +472,18 @@ static inline bool tls_bigint_increment(unsigned char *seq, int len)
|
||||
return (i == -1);
|
||||
}
|
||||
|
||||
static inline void tls_bigint_subtract(unsigned char *seq, int n)
|
||||
{
|
||||
u64 rcd_sn;
|
||||
__be64 *p;
|
||||
|
||||
BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
|
||||
|
||||
p = (__be64 *)seq;
|
||||
rcd_sn = be64_to_cpu(*p);
|
||||
*p = cpu_to_be64(rcd_sn - n);
|
||||
}
|
||||
|
||||
static inline struct tls_context *tls_get_ctx(const struct sock *sk)
|
||||
{
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
@ -639,6 +652,7 @@ tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
|
||||
atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
|
||||
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
|
||||
rx_ctx->resync_async->loglen = 0;
|
||||
rx_ctx->resync_async->rcd_delta = 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -7786,9 +7786,11 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||
struct tnum range = tnum_range(0, 1);
|
||||
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
|
||||
int err;
|
||||
const bool is_subprog = env->cur_state->frame[0]->subprogno;
|
||||
|
||||
/* LSM and struct_ops func-ptr's return type could be "void" */
|
||||
if ((prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
|
||||
if (!is_subprog &&
|
||||
(prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
|
||||
prog_type == BPF_PROG_TYPE_LSM) &&
|
||||
!prog->aux->attach_func_proto->type)
|
||||
return 0;
|
||||
@ -7808,6 +7810,16 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
reg = cur_regs(env) + BPF_REG_0;
|
||||
if (is_subprog) {
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
|
||||
reg_type_str[reg->type]);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (prog_type) {
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
|
||||
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
|
||||
@ -7861,7 +7873,6 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||
return 0;
|
||||
}
|
||||
|
||||
reg = cur_regs(env) + BPF_REG_0;
|
||||
if (reg->type != SCALAR_VALUE) {
|
||||
verbose(env, "At program exit the register R0 is not a known value (%s)\n",
|
||||
reg_type_str[reg->type]);
|
||||
@ -9572,12 +9583,13 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
|
||||
struct bpf_insn *insn,
|
||||
struct bpf_insn_aux_data *aux)
|
||||
{
|
||||
u32 datasec_id, type, id = insn->imm;
|
||||
const struct btf_var_secinfo *vsi;
|
||||
const struct btf_type *datasec;
|
||||
const struct btf_type *t;
|
||||
const char *sym_name;
|
||||
bool percpu = false;
|
||||
u32 type, id = insn->imm;
|
||||
s32 datasec_id;
|
||||
u64 addr;
|
||||
int i;
|
||||
|
||||
|
@ -253,7 +253,7 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
|
||||
|
||||
if (copy_from_user(buf, buffer, count)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
goto out_free;
|
||||
}
|
||||
buf[count] = '\0';
|
||||
sym = strstrip(buf);
|
||||
@ -307,8 +307,9 @@ static ssize_t fei_write(struct file *file, const char __user *buffer,
|
||||
ret = count;
|
||||
}
|
||||
out:
|
||||
kfree(buf);
|
||||
mutex_unlock(&fei_lock);
|
||||
out_free:
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -181,6 +181,16 @@ bpf_probe_read_user_str_common(void *dst, u32 size,
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* NB: We rely on strncpy_from_user() not copying junk past the NUL
|
||||
* terminator into `dst`.
|
||||
*
|
||||
* strncpy_from_user() does long-sized strides in the fast path. If the
|
||||
* strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
|
||||
* then there could be junk after the NUL in `dst`. If user takes `dst`
|
||||
* and keys a hash map with it, then semantically identical strings can
|
||||
* occupy multiple entries in the map.
|
||||
*/
|
||||
ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
|
||||
if (unlikely(ret < 0))
|
||||
memset(dst, 0, size);
|
||||
@ -1198,7 +1208,7 @@ static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
|
||||
*btf = bpf_get_btf_vmlinux();
|
||||
|
||||
if (IS_ERR_OR_NULL(*btf))
|
||||
return PTR_ERR(*btf);
|
||||
return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
|
||||
|
||||
if (ptr->type_id > 0)
|
||||
*btf_id = ptr->type_id;
|
||||
|
@ -35,17 +35,32 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
|
||||
goto byte_at_a_time;
|
||||
|
||||
while (max >= sizeof(unsigned long)) {
|
||||
unsigned long c, data;
|
||||
unsigned long c, data, mask;
|
||||
|
||||
/* Fall back to byte-at-a-time if we get a page fault */
|
||||
unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
|
||||
|
||||
*(unsigned long *)(dst+res) = c;
|
||||
/*
|
||||
* Note that we mask out the bytes following the NUL. This is
|
||||
* important to do because string oblivious code may read past
|
||||
* the NUL. For those routines, we don't want to give them
|
||||
* potentially random bytes after the NUL in `src`.
|
||||
*
|
||||
* One example of such code is BPF map keys. BPF treats map keys
|
||||
* as an opaque set of bytes. Without the post-NUL mask, any BPF
|
||||
* maps keyed by strings returned from strncpy_from_user() may
|
||||
* have multiple entries for semantically identical strings.
|
||||
*/
|
||||
if (has_zero(c, &data, &constants)) {
|
||||
data = prep_zero_mask(c, data, &constants);
|
||||
data = create_zero_mask(data);
|
||||
mask = zero_bytemask(data);
|
||||
*(unsigned long *)(dst+res) = c & mask;
|
||||
return res + find_zero(data);
|
||||
}
|
||||
|
||||
*(unsigned long *)(dst+res) = c;
|
||||
|
||||
res += sizeof(unsigned long);
|
||||
max -= sizeof(unsigned long);
|
||||
}
|
||||
|
@ -5103,6 +5103,11 @@ refill:
|
||||
if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
|
||||
goto refill;
|
||||
|
||||
if (unlikely(nc->pfmemalloc)) {
|
||||
free_the_page(page, compound_order(page));
|
||||
goto refill;
|
||||
}
|
||||
|
||||
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
|
||||
/* if size can vary use size else just use PAGE_SIZE */
|
||||
size = nc->size;
|
||||
|
@ -207,6 +207,7 @@ static void br_get_stats64(struct net_device *dev,
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
||||
netdev_stats_to_stats64(stats, &dev->stats);
|
||||
dev_fetch_sw_netstats(stats, br->stats);
|
||||
}
|
||||
|
||||
|
@ -677,16 +677,25 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
{
|
||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||
|
||||
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
|
||||
cfd->len > CAN_MAX_DLEN)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
|
||||
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
|
||||
dev->type, skb->len);
|
||||
goto free_skb;
|
||||
}
|
||||
|
||||
/* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */
|
||||
if (unlikely(cfd->len > CAN_MAX_DLEN)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d, datalen %d\n",
|
||||
dev->type, skb->len, cfd->len);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
goto free_skb;
|
||||
}
|
||||
|
||||
can_receive(skb, dev);
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
free_skb:
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
@ -694,16 +703,25 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
{
|
||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||
|
||||
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
|
||||
cfd->len > CANFD_MAX_DLEN)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
|
||||
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
|
||||
dev->type, skb->len);
|
||||
goto free_skb;
|
||||
}
|
||||
|
||||
/* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */
|
||||
if (unlikely(cfd->len > CANFD_MAX_DLEN)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d, datalen %d\n",
|
||||
dev->type, skb->len, cfd->len);
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
goto free_skb;
|
||||
}
|
||||
|
||||
can_receive(skb, dev);
|
||||
return NET_RX_SUCCESS;
|
||||
|
||||
free_skb:
|
||||
kfree_skb(skb);
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
/* af_can protocol functions */
|
||||
|
@ -1448,7 +1448,7 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
|
||||
err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
|
||||
pool_index, &cur, &max);
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
return err;
|
||||
goto sb_occ_get_failure;
|
||||
if (!err) {
|
||||
if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
|
||||
goto nla_put_failure;
|
||||
@ -1461,8 +1461,10 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
err = -EMSGSIZE;
|
||||
sb_occ_get_failure:
|
||||
genlmsg_cancel(msg, hdr);
|
||||
return -EMSGSIZE;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
|
||||
|
@ -235,6 +235,8 @@ static int neigh_forced_gc(struct neigh_table *tbl)
|
||||
|
||||
write_lock(&n->lock);
|
||||
if ((n->nud_state == NUD_FAILED) ||
|
||||
(tbl->is_multicast &&
|
||||
tbl->is_multicast(n->primary_key)) ||
|
||||
time_after(tref, n->updated))
|
||||
remove = true;
|
||||
write_unlock(&n->lock);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/dsa.h>
|
||||
#include <net/tcp.h>
|
||||
#include <net/udp.h>
|
||||
#include <net/addrconf.h>
|
||||
@ -657,15 +658,15 @@ EXPORT_SYMBOL_GPL(__netpoll_setup);
|
||||
|
||||
int netpoll_setup(struct netpoll *np)
|
||||
{
|
||||
struct net_device *ndev = NULL;
|
||||
struct net_device *ndev = NULL, *dev = NULL;
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
struct in_device *in_dev;
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
if (np->dev_name[0]) {
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
if (np->dev_name[0])
|
||||
ndev = __dev_get_by_name(net, np->dev_name);
|
||||
}
|
||||
|
||||
if (!ndev) {
|
||||
np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
|
||||
err = -ENODEV;
|
||||
@ -673,6 +674,19 @@ int netpoll_setup(struct netpoll *np)
|
||||
}
|
||||
dev_hold(ndev);
|
||||
|
||||
/* bring up DSA management network devices up first */
|
||||
for_each_netdev(net, dev) {
|
||||
if (!netdev_uses_dsa(dev))
|
||||
continue;
|
||||
|
||||
err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
|
||||
if (err < 0) {
|
||||
np_err(np, "%s failed to open %s\n",
|
||||
np->dev_name, dev->name);
|
||||
goto put;
|
||||
}
|
||||
}
|
||||
|
||||
if (netdev_master_upper_dev_get(ndev)) {
|
||||
np_err(np, "%s is a slave device, aborting\n", np->dev_name);
|
||||
err = -EBUSY;
|
||||
|
@ -170,10 +170,12 @@ static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
|
||||
struct scatterlist *sge = sk_msg_elem(msg, i);
|
||||
u32 len = sge->length;
|
||||
|
||||
if (charge)
|
||||
sk_mem_uncharge(sk, len);
|
||||
if (!msg->skb)
|
||||
/* When the skb owns the memory we free it from consume_skb path. */
|
||||
if (!msg->skb) {
|
||||
if (charge)
|
||||
sk_mem_uncharge(sk, len);
|
||||
put_page(sg_page(sge));
|
||||
}
|
||||
memset(sge, 0, sizeof(*sge));
|
||||
return len;
|
||||
}
|
||||
@ -397,28 +399,45 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
|
||||
|
||||
static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
||||
static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = psock->sk;
|
||||
int copied = 0, num_sge;
|
||||
struct sk_msg *msg;
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
|
||||
return NULL;
|
||||
|
||||
if (!sk_rmem_schedule(sk, skb, skb->truesize))
|
||||
return NULL;
|
||||
|
||||
msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
|
||||
if (unlikely(!msg))
|
||||
return -EAGAIN;
|
||||
if (!sk_rmem_schedule(sk, skb, skb->len)) {
|
||||
kfree(msg);
|
||||
return -EAGAIN;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
sk_msg_init(msg);
|
||||
return msg;
|
||||
}
|
||||
|
||||
static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
|
||||
struct sk_psock *psock,
|
||||
struct sock *sk,
|
||||
struct sk_msg *msg)
|
||||
{
|
||||
int num_sge, copied;
|
||||
|
||||
/* skb linearize may fail with ENOMEM, but lets simply try again
|
||||
* later if this happens. Under memory pressure we don't want to
|
||||
* drop the skb. We need to linearize the skb so that the mapping
|
||||
* in skb_to_sgvec can not error.
|
||||
*/
|
||||
if (skb_linearize(skb))
|
||||
return -EAGAIN;
|
||||
num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
|
||||
if (unlikely(num_sge < 0)) {
|
||||
kfree(msg);
|
||||
return num_sge;
|
||||
}
|
||||
|
||||
sk_mem_charge(sk, skb->len);
|
||||
copied = skb->len;
|
||||
msg->sg.start = 0;
|
||||
msg->sg.size = copied;
|
||||
@ -430,6 +449,48 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
||||
return copied;
|
||||
}
|
||||
|
||||
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
|
||||
|
||||
static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
|
||||
{
|
||||
struct sock *sk = psock->sk;
|
||||
struct sk_msg *msg;
|
||||
|
||||
/* If we are receiving on the same sock skb->sk is already assigned,
|
||||
* skip memory accounting and owner transition seeing it already set
|
||||
* correctly.
|
||||
*/
|
||||
if (unlikely(skb->sk == sk))
|
||||
return sk_psock_skb_ingress_self(psock, skb);
|
||||
msg = sk_psock_create_ingress_msg(sk, skb);
|
||||
if (!msg)
|
||||
return -EAGAIN;
|
||||
|
||||
/* This will transition ownership of the data from the socket where
|
||||
* the BPF program was run initiating the redirect to the socket
|
||||
* we will eventually receive this data on. The data will be released
|
||||
* from skb_consume found in __tcp_bpf_recvmsg() after its been copied
|
||||
* into user buffers.
|
||||
*/
|
||||
skb_set_owner_r(skb, sk);
|
||||
return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
|
||||
}
|
||||
|
||||
/* Puts an skb on the ingress queue of the socket already assigned to the
|
||||
* skb. In this case we do not need to check memory limits or skb_set_owner_r
|
||||
* because the skb is already accounted for here.
|
||||
*/
|
||||
static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
|
||||
struct sock *sk = psock->sk;
|
||||
|
||||
if (unlikely(!msg))
|
||||
return -EAGAIN;
|
||||
sk_msg_init(msg);
|
||||
return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
|
||||
}
|
||||
|
||||
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
|
||||
u32 off, u32 len, bool ingress)
|
||||
{
|
||||
@ -789,7 +850,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
|
||||
* retrying later from workqueue.
|
||||
*/
|
||||
if (skb_queue_empty(&psock->ingress_skb)) {
|
||||
err = sk_psock_skb_ingress(psock, skb);
|
||||
err = sk_psock_skb_ingress_self(psock, skb);
|
||||
}
|
||||
if (err < 0) {
|
||||
skb_queue_tail(&psock->ingress_skb, skb);
|
||||
|
@ -125,6 +125,7 @@ static int arp_constructor(struct neighbour *neigh);
|
||||
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
|
||||
static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
|
||||
static void parp_redo(struct sk_buff *skb);
|
||||
static int arp_is_multicast(const void *pkey);
|
||||
|
||||
static const struct neigh_ops arp_generic_ops = {
|
||||
.family = AF_INET,
|
||||
@ -156,6 +157,7 @@ struct neigh_table arp_tbl = {
|
||||
.key_eq = arp_key_eq,
|
||||
.constructor = arp_constructor,
|
||||
.proxy_redo = parp_redo,
|
||||
.is_multicast = arp_is_multicast,
|
||||
.id = "arp_cache",
|
||||
.parms = {
|
||||
.tbl = &arp_tbl,
|
||||
@ -928,6 +930,10 @@ static void parp_redo(struct sk_buff *skb)
|
||||
arp_process(dev_net(skb->dev), NULL, skb);
|
||||
}
|
||||
|
||||
static int arp_is_multicast(const void *pkey)
|
||||
{
|
||||
return ipv4_is_multicast(*((__be32 *)pkey));
|
||||
}
|
||||
|
||||
/*
|
||||
* Receive an arp request from the device layer.
|
||||
|
@ -696,7 +696,7 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
|
||||
cfg->fc_gw4 = *((__be32 *)via->rtvia_addr);
|
||||
break;
|
||||
case AF_INET6:
|
||||
#ifdef CONFIG_IPV6
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (alen != sizeof(struct in6_addr)) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA");
|
||||
return -EINVAL;
|
||||
|
@ -479,8 +479,10 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
|
||||
r->idiag_inode = 0;
|
||||
|
||||
if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
|
||||
inet_rsk(reqsk)->ir_mark))
|
||||
inet_rsk(reqsk)->ir_mark)) {
|
||||
nlmsg_cancel(skb, nlh);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
nlmsg_end(skb, nlh);
|
||||
return 0;
|
||||
|
@ -945,7 +945,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
|
||||
filter_expired = after(tcp_jiffies32,
|
||||
bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
|
||||
if (rs->rtt_us >= 0 &&
|
||||
(rs->rtt_us <= bbr->min_rtt_us ||
|
||||
(rs->rtt_us < bbr->min_rtt_us ||
|
||||
(filter_expired && !rs->is_ack_delayed))) {
|
||||
bbr->min_rtt_us = rs->rtt_us;
|
||||
bbr->min_rtt_stamp = tcp_jiffies32;
|
||||
|
@ -15,8 +15,8 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
|
||||
{
|
||||
struct iov_iter *iter = &msg->msg_iter;
|
||||
int peek = flags & MSG_PEEK;
|
||||
int i, ret, copied = 0;
|
||||
struct sk_msg *msg_rx;
|
||||
int i, copied = 0;
|
||||
|
||||
msg_rx = list_first_entry_or_null(&psock->ingress_msg,
|
||||
struct sk_msg, list);
|
||||
@ -37,17 +37,16 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
|
||||
page = sg_page(sge);
|
||||
if (copied + copy > len)
|
||||
copy = len - copied;
|
||||
ret = copy_page_to_iter(page, sge->offset, copy, iter);
|
||||
if (ret != copy) {
|
||||
msg_rx->sg.start = i;
|
||||
return -EFAULT;
|
||||
}
|
||||
copy = copy_page_to_iter(page, sge->offset, copy, iter);
|
||||
if (!copy)
|
||||
return copied ? copied : -EFAULT;
|
||||
|
||||
copied += copy;
|
||||
if (likely(!peek)) {
|
||||
sge->offset += copy;
|
||||
sge->length -= copy;
|
||||
sk_mem_uncharge(sk, copy);
|
||||
if (!msg_rx->skb)
|
||||
sk_mem_uncharge(sk, copy);
|
||||
msg_rx->sg.size -= copy;
|
||||
|
||||
if (!sge->length) {
|
||||
@ -56,6 +55,11 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
|
||||
put_page(page);
|
||||
}
|
||||
} else {
|
||||
/* Lets not optimize peek case if copy_page_to_iter
|
||||
* didn't copy the entire length lets just break.
|
||||
*/
|
||||
if (copy != sge->length)
|
||||
return copied;
|
||||
sk_msg_iter_var_next(i);
|
||||
}
|
||||
|
||||
|
@ -5022,8 +5022,10 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (args->netnsid >= 0 &&
|
||||
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
|
||||
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
|
||||
nlmsg_cancel(skb, nlh);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
|
||||
if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
|
||||
@ -5054,8 +5056,10 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
|
||||
return -EMSGSIZE;
|
||||
|
||||
if (args->netnsid >= 0 &&
|
||||
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
|
||||
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
|
||||
nlmsg_cancel(skb, nlh);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
|
||||
if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
|
||||
|
@ -588,7 +588,8 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
|
||||
memset(ah->auth_data, 0, ahp->icv_trunc_len);
|
||||
|
||||
if (ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN))
|
||||
err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN);
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
ip6h->priority = 0;
|
||||
|
@ -81,6 +81,7 @@ static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
|
||||
static int pndisc_constructor(struct pneigh_entry *n);
|
||||
static void pndisc_destructor(struct pneigh_entry *n);
|
||||
static void pndisc_redo(struct sk_buff *skb);
|
||||
static int ndisc_is_multicast(const void *pkey);
|
||||
|
||||
static const struct neigh_ops ndisc_generic_ops = {
|
||||
.family = AF_INET6,
|
||||
@ -115,6 +116,7 @@ struct neigh_table nd_tbl = {
|
||||
.pconstructor = pndisc_constructor,
|
||||
.pdestructor = pndisc_destructor,
|
||||
.proxy_redo = pndisc_redo,
|
||||
.is_multicast = ndisc_is_multicast,
|
||||
.allow_add = ndisc_allow_add,
|
||||
.id = "ndisc_cache",
|
||||
.parms = {
|
||||
@ -1706,6 +1708,11 @@ static void pndisc_redo(struct sk_buff *skb)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static int ndisc_is_multicast(const void *pkey)
|
||||
{
|
||||
return ipv6_addr_is_multicast((struct in6_addr *)pkey);
|
||||
}
|
||||
|
||||
static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
|
||||
{
|
||||
struct inet6_dev *idev = __in6_dev_get(skb->dev);
|
||||
|
@ -440,6 +440,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
|
||||
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
||||
{
|
||||
u16 savethdr = skb->transport_header;
|
||||
u8 nexthdr = NEXTHDR_FRAGMENT;
|
||||
int fhoff, nhoff, ret;
|
||||
struct frag_hdr *fhdr;
|
||||
struct frag_queue *fq;
|
||||
@ -455,6 +456,14 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
||||
if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
|
||||
return 0;
|
||||
|
||||
/* Discard the first fragment if it does not include all headers
|
||||
* RFC 8200, Section 4.5
|
||||
*/
|
||||
if (ipv6frag_thdr_truncated(skb, fhoff, &nexthdr)) {
|
||||
pr_debug("Drop incomplete fragment\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -324,9 +324,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
||||
struct frag_queue *fq;
|
||||
const struct ipv6hdr *hdr = ipv6_hdr(skb);
|
||||
struct net *net = dev_net(skb_dst(skb)->dev);
|
||||
__be16 frag_off;
|
||||
int iif, offset;
|
||||
u8 nexthdr;
|
||||
int iif;
|
||||
|
||||
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
|
||||
goto fail_hdr;
|
||||
@ -362,24 +361,11 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
||||
* the source of the fragment, with the Pointer field set to zero.
|
||||
*/
|
||||
nexthdr = hdr->nexthdr;
|
||||
offset = ipv6_skip_exthdr(skb, skb_transport_offset(skb), &nexthdr, &frag_off);
|
||||
if (offset >= 0) {
|
||||
/* Check some common protocols' header */
|
||||
if (nexthdr == IPPROTO_TCP)
|
||||
offset += sizeof(struct tcphdr);
|
||||
else if (nexthdr == IPPROTO_UDP)
|
||||
offset += sizeof(struct udphdr);
|
||||
else if (nexthdr == IPPROTO_ICMPV6)
|
||||
offset += sizeof(struct icmp6hdr);
|
||||
else
|
||||
offset += 1;
|
||||
|
||||
if (!(frag_off & htons(IP6_OFFSET)) && offset > skb->len) {
|
||||
__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0);
|
||||
return -1;
|
||||
}
|
||||
if (ipv6frag_thdr_truncated(skb, skb_transport_offset(skb), &nexthdr)) {
|
||||
__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
iif = skb->dev ? skb->dev->ifindex : 0;
|
||||
|
@ -274,7 +274,7 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
|
||||
success = !!(info->flags & IEEE80211_TX_STAT_ACK);
|
||||
|
||||
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
|
||||
if (ar[i].idx < 0)
|
||||
if (ar[i].idx < 0 || !ar[i].count)
|
||||
break;
|
||||
|
||||
ndx = rix_to_ndx(mi, ar[i].idx);
|
||||
@ -287,12 +287,6 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
|
||||
mi->r[ndx].stats.success += success;
|
||||
}
|
||||
|
||||
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
|
||||
mi->sample_packets++;
|
||||
|
||||
if (mi->sample_deferred > 0)
|
||||
mi->sample_deferred--;
|
||||
|
||||
if (time_after(jiffies, mi->last_stats_update +
|
||||
mp->update_interval / (mp->new_avg ? 2 : 1)))
|
||||
minstrel_update_stats(mp, mi);
|
||||
@ -367,7 +361,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
|
||||
return;
|
||||
|
||||
delta = (mi->total_packets * sampling_ratio / 100) -
|
||||
(mi->sample_packets + mi->sample_deferred / 2);
|
||||
mi->sample_packets;
|
||||
|
||||
/* delta < 0: no sampling required */
|
||||
prev_sample = mi->prev_sample;
|
||||
@ -376,7 +370,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
|
||||
return;
|
||||
|
||||
if (mi->total_packets >= 10000) {
|
||||
mi->sample_deferred = 0;
|
||||
mi->sample_packets = 0;
|
||||
mi->total_packets = 0;
|
||||
} else if (delta > mi->n_rates * 2) {
|
||||
@ -401,19 +394,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
|
||||
* rate sampling method should be used.
|
||||
* Respect such rates that are not sampled for 20 interations.
|
||||
*/
|
||||
if (mrr_capable &&
|
||||
msr->perfect_tx_time > mr->perfect_tx_time &&
|
||||
msr->stats.sample_skipped < 20) {
|
||||
/* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
|
||||
* packets that have the sampling rate deferred to the
|
||||
* second MRR stage. Increase the sample counter only
|
||||
* if the deferred sample rate was actually used.
|
||||
* Use the sample_deferred counter to make sure that
|
||||
* the sampling is not done in large bursts */
|
||||
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
|
||||
rate++;
|
||||
mi->sample_deferred++;
|
||||
} else {
|
||||
if (msr->perfect_tx_time < mr->perfect_tx_time ||
|
||||
msr->stats.sample_skipped >= 20) {
|
||||
if (!msr->sample_limit)
|
||||
return;
|
||||
|
||||
@ -433,6 +415,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
|
||||
|
||||
rate->idx = mi->r[ndx].rix;
|
||||
rate->count = minstrel_get_retry_count(&mi->r[ndx], info);
|
||||
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
|
||||
}
|
||||
|
||||
|
||||
|
@ -126,7 +126,6 @@ struct minstrel_sta_info {
|
||||
u8 max_prob_rate;
|
||||
unsigned int total_packets;
|
||||
unsigned int sample_packets;
|
||||
int sample_deferred;
|
||||
|
||||
unsigned int sample_row;
|
||||
unsigned int sample_column;
|
||||
|
@ -705,7 +705,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
|
||||
out_drop_sta:
|
||||
local->num_sta--;
|
||||
synchronize_net();
|
||||
__cleanup_single_sta(sta);
|
||||
cleanup_single_sta(sta);
|
||||
out_err:
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
kfree(sinfo);
|
||||
@ -724,19 +724,13 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
|
||||
|
||||
err = sta_info_insert_check(sta);
|
||||
if (err) {
|
||||
sta_info_free(local, sta);
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
rcu_read_lock();
|
||||
goto out_free;
|
||||
return err;
|
||||
}
|
||||
|
||||
err = sta_info_insert_finish(sta);
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
return 0;
|
||||
out_free:
|
||||
sta_info_free(local, sta);
|
||||
return err;
|
||||
return sta_info_insert_finish(sta);
|
||||
}
|
||||
|
||||
int sta_info_insert(struct sta_info *sta)
|
||||
|
@ -49,7 +49,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
|
||||
int ac;
|
||||
|
||||
if (info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
|
||||
IEEE80211_TX_CTL_AMPDU)) {
|
||||
IEEE80211_TX_CTL_AMPDU |
|
||||
IEEE80211_TX_CTL_HW_80211_ENCAP)) {
|
||||
ieee80211_free_txskb(&local->hw, skb);
|
||||
return;
|
||||
}
|
||||
@ -915,15 +916,6 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
|
||||
ieee80211_mpsp_trigger_process(
|
||||
ieee80211_get_qos_ctl(hdr), sta, true, acked);
|
||||
|
||||
if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
|
||||
/*
|
||||
* The STA is in power save mode, so assume
|
||||
* that this TX packet failed because of that.
|
||||
*/
|
||||
ieee80211_handle_filtered_frame(local, sta, skb);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
|
||||
(ieee80211_is_data(hdr->frame_control)) &&
|
||||
(rates_idx != -1))
|
||||
@ -1150,6 +1142,12 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
|
||||
-info->status.ack_signal);
|
||||
}
|
||||
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
|
||||
/*
|
||||
* The STA is in power save mode, so assume
|
||||
* that this TX packet failed because of that.
|
||||
*/
|
||||
if (skb)
|
||||
ieee80211_handle_filtered_frame(local, sta, skb);
|
||||
return;
|
||||
} else if (noack_success) {
|
||||
/* nothing to do here, do not account as lost */
|
||||
|
@ -1726,9 +1726,6 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
|
||||
ndp->ptype.dev = dev;
|
||||
dev_add_pack(&ndp->ptype);
|
||||
|
||||
/* Set up generic netlink interface */
|
||||
ncsi_init_netlink(dev);
|
||||
|
||||
pdev = to_platform_device(dev->dev.parent);
|
||||
if (pdev) {
|
||||
np = pdev->dev.of_node;
|
||||
@ -1892,8 +1889,6 @@ void ncsi_unregister_dev(struct ncsi_dev *nd)
|
||||
list_del_rcu(&ndp->node);
|
||||
spin_unlock_irqrestore(&ncsi_dev_lock, flags);
|
||||
|
||||
ncsi_unregister_netlink(nd->dev);
|
||||
|
||||
kfree(ndp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ncsi_unregister_dev);
|
||||
|
@ -766,24 +766,8 @@ static struct genl_family ncsi_genl_family __ro_after_init = {
|
||||
.n_small_ops = ARRAY_SIZE(ncsi_ops),
|
||||
};
|
||||
|
||||
int ncsi_init_netlink(struct net_device *dev)
|
||||
static int __init ncsi_init_netlink(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = genl_register_family(&ncsi_genl_family);
|
||||
if (rc)
|
||||
netdev_err(dev, "ncsi: failed to register netlink family\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int ncsi_unregister_netlink(struct net_device *dev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = genl_unregister_family(&ncsi_genl_family);
|
||||
if (rc)
|
||||
netdev_err(dev, "ncsi: failed to unregister netlink family\n");
|
||||
|
||||
return rc;
|
||||
return genl_register_family(&ncsi_genl_family);
|
||||
}
|
||||
subsys_initcall(ncsi_init_netlink);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user