Networking fixes for 5.16-rc8, including fixes from.. Santa?
Current release - regressions: - xsk: initialise xskb free_list_node, fixup for a -rc7 fix Current release - new code bugs: - mlx5: handful of minor fixes: - use first online CPU instead of hard coded CPU - fix some error handling paths in 'mlx5e_tc_add_fdb_flow()' - fix skb memory leak when TC classifier action offloads are disabled - fix memory leak with rules with internal OvS port Previous releases - regressions: - igc: do not enable crosstimestamping for i225-V models Previous releases - always broken: - udp: use datalen to cap ipv6 udp max gso segments - fix use-after-free in tw_timer_handler due to early free of stats - smc: fix kernel panic caused by race of smc_sock - smc: don't send CDC/LLC message if link not ready, avoid timeouts - sctp: use call_rcu to free endpoint, avoid UAF in sock diag - bridge: mcast: add and enforce query interval minimum - usb: pegasus: do not drop long Ethernet frames - mlx5e: fix ICOSQ recovery flow for XSK - nfc: uapi: use kernel size_t to fix user-space builds Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmHN9xUACgkQMUZtbf5S Irt86w/9HC6nHXaEmcBoLhBp7k39Kbs5s/og68+ALgtQt/XRlQsiC5HuYqLQREQ0 kqGEyp0JJyLuAM23CcWM7s8JhEAcmyHiGFhdCtrTwNltFLE0Fvd7XYPtG8VXHtVE bEbMu3cmafKtyn5EueFp+Hfl1yA0u5LrX6lDZfLgEgYDjLVSUJCXg2B+uiTIdhON UuKdXIHrBWX0aZpCHeMZ0/Ksdw9oOq7dqcaKi62yQAWkXpQMAUlFJ9OiQXksdlqY leBao3gA8F9J8KK39GfDNyn1Gt8kbN6d/pwi3+IVM2KTHk1wlyLfelDauTG7iUOl FDLuzrKZtMsyAXa5zxeHvQlV2f7CeXsOmpLhGnO0/FSCIc9WvkBFnuq49ESur0Lq 3tu5vrxoIW0In1DWy2HvWCflV3eYatq9eGzAhymkAiBcKrBhJyEE1IH4hYPzRD4x 3ab8Ma0zKzbRum37izNfW2X9hpJTSmlXdVsSP1L6O6hq1iSZhQnQ0dWP8KXw222u CpaqfepkxQMGj+mQss+nIltw8OQnj84dJOajuH/oo4Le4lUciyPizwAo45Muv7D7 2MDd/GFs3yHT8gglxSEjwNg8HKooI93Zc11uEt0KJDTXMlmnCLasTwkKBh+CD970 +PyKuaNDE1k6rav01bcteOEXFOhnDjvU3Kur1bnzo5OXKZ5cbng= =ucH7 -----END PGP SIGNATURE----- Merge tag 'net-5.16-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from.. Santa? No regressions on our radar at this point. The igc problem fixed here was the last one I was tracking but it was broken in previous releases, anyway. Mostly driver fixes and a couple of largish SMC fixes. Current release - regressions: - xsk: initialise xskb free_list_node, fixup for a -rc7 fix Current release - new code bugs: - mlx5: handful of minor fixes: - use first online CPU instead of hard coded CPU - fix some error handling paths in 'mlx5e_tc_add_fdb_flow()' - fix skb memory leak when TC classifier action offloads are disabled - fix memory leak with rules with internal OvS port Previous releases - regressions: - igc: do not enable crosstimestamping for i225-V models Previous releases - always broken: - udp: use datalen to cap ipv6 udp max gso segments - fix use-after-free in tw_timer_handler due to early free of stats - smc: fix kernel panic caused by race of smc_sock - smc: don't send CDC/LLC message if link not ready, avoid timeouts - sctp: use call_rcu to free endpoint, avoid UAF in sock diag - bridge: mcast: add and enforce query interval minimum - usb: pegasus: do not drop long Ethernet frames - mlx5e: fix ICOSQ recovery flow for XSK - nfc: uapi: use kernel size_t to fix user-space builds" * tag 'net-5.16-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (47 commits) fsl/fman: Fix missing put_device() call in fman_port_probe selftests: net: using ping6 for IPv6 in udpgro_fwd.sh Documentation: fix outdated interpretation of ip_no_pmtu_disc net/ncsi: check for error return from call to nla_put_u32 net: bridge: mcast: fix br_multicast_ctx_vlan_global_disabled helper net: fix use-after-free in tw_timer_handler selftests: net: Fix a typo in udpgro_fwd.sh selftests/net: udpgso_bench_tx: fix dst ip argument net: bridge: mcast: add and enforce startup query interval minimum net: bridge: mcast: add and enforce query interval minimum ipv6: raw: check passed optlen before reading xsk: Initialise xskb free_list_node net/mlx5e: Fix wrong features assignment in case of error net/mlx5e: TC, Fix memory leak with rules with internal port ionic: Initialize the 'lif->dbid_inuse' bitmap igc: Fix TX timestamp support for non-MSI-X platforms igc: Do not enable crosstimestamping for i225-V models net/smc: fix kernel panic caused by race of smc_sock net/smc: don't send CDC/LLC message if link not ready NFC: st21nfca: Fix memory leak in device probe and remove ...
This commit is contained in:
commit
74c78b4291
@ -25,7 +25,8 @@ ip_default_ttl - INTEGER
|
||||
ip_no_pmtu_disc - INTEGER
|
||||
Disable Path MTU Discovery. If enabled in mode 1 and a
|
||||
fragmentation-required ICMP is received, the PMTU to this
|
||||
destination will be set to min_pmtu (see below). You will need
|
||||
destination will be set to the smallest of the old MTU to
|
||||
this destination and min_pmtu (see below). You will need
|
||||
to raise min_pmtu to the smallest interface MTU on your system
|
||||
manually if you want to avoid locally generated fragments.
|
||||
|
||||
@ -49,7 +50,8 @@ ip_no_pmtu_disc - INTEGER
|
||||
Default: FALSE
|
||||
|
||||
min_pmtu - INTEGER
|
||||
default 552 - minimum discovered Path MTU
|
||||
default 552 - minimum Path MTU. Unless this is changed mannually,
|
||||
each cached pmtu will never be lower than this setting.
|
||||
|
||||
ip_forward_use_pmtu - BOOLEAN
|
||||
By default we don't trust protocol path MTUs while forwarding
|
||||
|
@ -381,7 +381,7 @@ mISDNInit(void)
|
||||
err = mISDN_inittimer(&debug);
|
||||
if (err)
|
||||
goto error2;
|
||||
err = l1_init(&debug);
|
||||
err = Isdnl1_Init(&debug);
|
||||
if (err)
|
||||
goto error3;
|
||||
err = Isdnl2_Init(&debug);
|
||||
@ -395,7 +395,7 @@ mISDNInit(void)
|
||||
error5:
|
||||
Isdnl2_cleanup();
|
||||
error4:
|
||||
l1_cleanup();
|
||||
Isdnl1_cleanup();
|
||||
error3:
|
||||
mISDN_timer_cleanup();
|
||||
error2:
|
||||
@ -408,7 +408,7 @@ static void mISDN_cleanup(void)
|
||||
{
|
||||
misdn_sock_cleanup();
|
||||
Isdnl2_cleanup();
|
||||
l1_cleanup();
|
||||
Isdnl1_cleanup();
|
||||
mISDN_timer_cleanup();
|
||||
class_unregister(&mISDN_class);
|
||||
|
||||
|
@ -60,8 +60,8 @@ struct Bprotocol *get_Bprotocol4id(u_int);
|
||||
extern int mISDN_inittimer(u_int *);
|
||||
extern void mISDN_timer_cleanup(void);
|
||||
|
||||
extern int l1_init(u_int *);
|
||||
extern void l1_cleanup(void);
|
||||
extern int Isdnl1_Init(u_int *);
|
||||
extern void Isdnl1_cleanup(void);
|
||||
extern int Isdnl2_Init(u_int *);
|
||||
extern void Isdnl2_cleanup(void);
|
||||
|
||||
|
@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
|
||||
EXPORT_SYMBOL(create_l1);
|
||||
|
||||
int
|
||||
l1_init(u_int *deb)
|
||||
Isdnl1_Init(u_int *deb)
|
||||
{
|
||||
debug = deb;
|
||||
l1fsm_s.state_count = L1S_STATE_COUNT;
|
||||
@ -409,7 +409,7 @@ l1_init(u_int *deb)
|
||||
}
|
||||
|
||||
void
|
||||
l1_cleanup(void)
|
||||
Isdnl1_cleanup(void)
|
||||
{
|
||||
mISDN_FsmFree(&l1fsm_s);
|
||||
}
|
||||
|
@ -366,6 +366,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
if (!buff->is_eop) {
|
||||
buff_ = buff;
|
||||
do {
|
||||
if (buff_->next >= self->size) {
|
||||
err = -EIO;
|
||||
goto err_exit;
|
||||
}
|
||||
next_ = buff_->next,
|
||||
buff_ = &self->buff_ring[next_];
|
||||
is_rsc_completed =
|
||||
@ -389,6 +393,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
||||
(buff->is_lro && buff->is_cso_err)) {
|
||||
buff_ = buff;
|
||||
do {
|
||||
if (buff_->next >= self->size) {
|
||||
err = -EIO;
|
||||
goto err_exit;
|
||||
}
|
||||
next_ = buff_->next,
|
||||
buff_ = &self->buff_ring[next_];
|
||||
|
||||
|
@ -1913,15 +1913,12 @@ static int ag71xx_probe(struct platform_device *pdev)
|
||||
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
|
||||
if (IS_ERR(ag->mac_reset)) {
|
||||
netif_err(ag, probe, ndev, "missing mac reset\n");
|
||||
err = PTR_ERR(ag->mac_reset);
|
||||
goto err_free;
|
||||
return PTR_ERR(ag->mac_reset);
|
||||
}
|
||||
|
||||
ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
|
||||
if (!ag->mac_base) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
if (!ag->mac_base)
|
||||
return -ENOMEM;
|
||||
|
||||
ndev->irq = platform_get_irq(pdev, 0);
|
||||
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
|
||||
@ -1929,7 +1926,7 @@ static int ag71xx_probe(struct platform_device *pdev)
|
||||
if (err) {
|
||||
netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
|
||||
ndev->irq);
|
||||
goto err_free;
|
||||
return err;
|
||||
}
|
||||
|
||||
ndev->netdev_ops = &ag71xx_netdev_ops;
|
||||
@ -1957,10 +1954,8 @@ static int ag71xx_probe(struct platform_device *pdev)
|
||||
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
|
||||
sizeof(struct ag71xx_desc),
|
||||
&ag->stop_desc_dma, GFP_KERNEL);
|
||||
if (!ag->stop_desc) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
if (!ag->stop_desc)
|
||||
return -ENOMEM;
|
||||
|
||||
ag->stop_desc->data = 0;
|
||||
ag->stop_desc->ctrl = 0;
|
||||
@ -1975,7 +1970,7 @@ static int ag71xx_probe(struct platform_device *pdev)
|
||||
err = of_get_phy_mode(np, &ag->phy_if_mode);
|
||||
if (err) {
|
||||
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
|
||||
goto err_free;
|
||||
return err;
|
||||
}
|
||||
|
||||
netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
|
||||
@ -1983,7 +1978,7 @@ static int ag71xx_probe(struct platform_device *pdev)
|
||||
err = clk_prepare_enable(ag->clk_eth);
|
||||
if (err) {
|
||||
netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
|
||||
goto err_free;
|
||||
return err;
|
||||
}
|
||||
|
||||
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
|
||||
@ -2019,8 +2014,6 @@ err_mdio_remove:
|
||||
ag71xx_mdio_remove(ag);
|
||||
err_put_clk:
|
||||
clk_disable_unprepare(ag->clk_eth);
|
||||
err_free:
|
||||
free_netdev(ndev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
|
||||
fman = dev_get_drvdata(&fm_pdev->dev);
|
||||
if (!fman) {
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
err = of_property_read_u32(port_node, "cell-index", &val);
|
||||
@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
|
||||
dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
|
||||
__func__, port_node);
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
goto put_device;
|
||||
}
|
||||
port_id = (u8)val;
|
||||
port->dts_params.id = port_id;
|
||||
@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
|
||||
} else {
|
||||
dev_err(port->dev, "%s: Illegal port type\n", __func__);
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
port->dts_params.type = port_type;
|
||||
@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
|
||||
dev_err(port->dev, "%s: incorrect qman-channel-id\n",
|
||||
__func__);
|
||||
err = -EINVAL;
|
||||
goto return_err;
|
||||
goto put_device;
|
||||
}
|
||||
port->dts_params.qman_channel_id = qman_channel_id;
|
||||
}
|
||||
@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
|
||||
dev_err(port->dev, "%s: of_address_to_resource() failed\n",
|
||||
__func__);
|
||||
err = -ENOMEM;
|
||||
goto return_err;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
port->dts_params.fman = fman;
|
||||
@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
|
||||
|
||||
return 0;
|
||||
|
||||
put_device:
|
||||
put_device(&fm_pdev->dev);
|
||||
return_err:
|
||||
of_node_put(port_node);
|
||||
free_port:
|
||||
|
@ -5467,6 +5467,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
if (icr & IGC_ICR_TS)
|
||||
igc_tsync_interrupt(adapter);
|
||||
|
||||
napi_schedule(&q_vector->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -5510,6 +5513,9 @@ static irqreturn_t igc_intr(int irq, void *data)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
if (icr & IGC_ICR_TS)
|
||||
igc_tsync_interrupt(adapter);
|
||||
|
||||
napi_schedule(&q_vector->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -768,7 +768,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
|
||||
*/
|
||||
static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false;
|
||||
if (!IS_ENABLED(CONFIG_X86_TSC))
|
||||
return false;
|
||||
|
||||
/* FIXME: it was noticed that enabling support for PCIe PTM in
|
||||
* some i225-V models could cause lockups when bringing the
|
||||
* interface up/down. There should be no downsides to
|
||||
* disabling crosstimestamping support for i225-V, as it
|
||||
* doesn't have any PTP support. That way we gain some time
|
||||
* while root causing the issue.
|
||||
*/
|
||||
if (adapter->pdev->device == IGC_DEV_ID_I225_V)
|
||||
return false;
|
||||
|
||||
return pcie_ptm_enabled(adapter->pdev);
|
||||
}
|
||||
|
||||
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
|
||||
|
@ -224,7 +224,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
|
||||
skb->protocol = eth_type_trans(skb, net_dev);
|
||||
netif_receive_skb(skb);
|
||||
net_dev->stats.rx_packets++;
|
||||
net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
|
||||
net_dev->stats.rx_bytes += len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -783,6 +783,8 @@ struct mlx5e_channel {
|
||||
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
|
||||
int ix;
|
||||
int cpu;
|
||||
/* Sync between icosq recovery and XSK enable/disable. */
|
||||
struct mutex icosq_recovery_lock;
|
||||
};
|
||||
|
||||
struct mlx5e_ptp;
|
||||
@ -1014,9 +1016,6 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
|
||||
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
|
||||
|
||||
struct mlx5e_sq_param;
|
||||
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
|
||||
void mlx5e_close_icosq(struct mlx5e_icosq *sq);
|
||||
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
|
||||
struct mlx5e_xdpsq *sq, bool is_redirect);
|
||||
|
@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
|
||||
void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
|
||||
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
|
||||
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
|
||||
void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c);
|
||||
void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c);
|
||||
|
||||
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
|
||||
|
||||
|
@ -66,7 +66,7 @@ mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
|
||||
static inline void
|
||||
mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
|
||||
struct sk_buff *skb) {}
|
||||
struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
|
||||
|
||||
#endif /* CONFIG_MLX5_CLS_ACT */
|
||||
|
||||
|
@ -62,6 +62,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
|
||||
|
||||
static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
|
||||
{
|
||||
struct mlx5e_rq *xskrq = NULL;
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct mlx5e_icosq *icosq;
|
||||
struct net_device *dev;
|
||||
@ -70,7 +71,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
|
||||
int err;
|
||||
|
||||
icosq = ctx;
|
||||
|
||||
mutex_lock(&icosq->channel->icosq_recovery_lock);
|
||||
|
||||
/* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
|
||||
rq = &icosq->channel->rq;
|
||||
if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
|
||||
xskrq = &icosq->channel->xskrq;
|
||||
mdev = icosq->channel->mdev;
|
||||
dev = icosq->channel->netdev;
|
||||
err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
|
||||
@ -84,6 +91,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
|
||||
goto out;
|
||||
|
||||
mlx5e_deactivate_rq(rq);
|
||||
if (xskrq)
|
||||
mlx5e_deactivate_rq(xskrq);
|
||||
|
||||
err = mlx5e_wait_for_icosq_flush(icosq);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -97,15 +107,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
|
||||
goto out;
|
||||
|
||||
mlx5e_reset_icosq_cc_pc(icosq);
|
||||
|
||||
mlx5e_free_rx_in_progress_descs(rq);
|
||||
if (xskrq)
|
||||
mlx5e_free_rx_in_progress_descs(xskrq);
|
||||
|
||||
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
|
||||
mlx5e_activate_icosq(icosq);
|
||||
mlx5e_activate_rq(rq);
|
||||
|
||||
mlx5e_activate_rq(rq);
|
||||
rq->stats->recover++;
|
||||
|
||||
if (xskrq) {
|
||||
mlx5e_activate_rq(xskrq);
|
||||
xskrq->stats->recover++;
|
||||
}
|
||||
|
||||
mutex_unlock(&icosq->channel->icosq_recovery_lock);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
|
||||
mutex_unlock(&icosq->channel->icosq_recovery_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -706,6 +729,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
|
||||
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
|
||||
}
|
||||
|
||||
void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
|
||||
{
|
||||
mutex_lock(&c->icosq_recovery_lock);
|
||||
}
|
||||
|
||||
void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
|
||||
{
|
||||
mutex_unlock(&c->icosq_recovery_lock);
|
||||
}
|
||||
|
||||
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
|
||||
.name = "rx",
|
||||
.recover = mlx5e_rx_reporter_recover,
|
||||
|
@ -466,6 +466,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
|
||||
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
|
||||
}
|
||||
|
||||
static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
|
||||
void *ctx)
|
||||
{
|
||||
struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
|
||||
|
||||
return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
|
||||
}
|
||||
|
||||
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
|
||||
struct devlink_fmsg *fmsg)
|
||||
{
|
||||
@ -561,7 +569,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
|
||||
to_ctx.sq = sq;
|
||||
err_ctx.ctx = &to_ctx;
|
||||
err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
|
||||
err_ctx.dump = mlx5e_tx_reporter_dump_sq;
|
||||
err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
|
||||
snprintf(err_str, sizeof(err_str),
|
||||
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
|
||||
sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include "setup.h"
|
||||
#include "en/params.h"
|
||||
#include "en/txrx.h"
|
||||
#include "en/health.h"
|
||||
|
||||
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
|
||||
* change unexpectedly, and mlx5e has a minimum valid stride size for striding
|
||||
@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
|
||||
|
||||
void mlx5e_activate_xsk(struct mlx5e_channel *c)
|
||||
{
|
||||
/* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid
|
||||
* activating XSKRQ in the middle of recovery.
|
||||
*/
|
||||
mlx5e_reporter_icosq_suspend_recovery(c);
|
||||
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
|
||||
mlx5e_reporter_icosq_resume_recovery(c);
|
||||
|
||||
/* TX queue is created active. */
|
||||
|
||||
spin_lock_bh(&c->async_icosq_lock);
|
||||
@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
|
||||
|
||||
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
|
||||
{
|
||||
mlx5e_deactivate_rq(&c->xskrq);
|
||||
/* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the
|
||||
* middle of recovery. Suspend the recovery to avoid it.
|
||||
*/
|
||||
mlx5e_reporter_icosq_suspend_recovery(c);
|
||||
clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
|
||||
mlx5e_reporter_icosq_resume_recovery(c);
|
||||
synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
|
||||
|
||||
/* TX queue is disabled on close. */
|
||||
}
|
||||
|
@ -1087,8 +1087,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
|
||||
void mlx5e_close_rq(struct mlx5e_rq *rq)
|
||||
{
|
||||
cancel_work_sync(&rq->dim.work);
|
||||
if (rq->icosq)
|
||||
cancel_work_sync(&rq->icosq->recover_work);
|
||||
cancel_work_sync(&rq->recover_work);
|
||||
mlx5e_destroy_rq(rq);
|
||||
mlx5e_free_rx_descs(rq);
|
||||
@ -1216,9 +1214,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
|
||||
mlx5e_reporter_icosq_cqe_err(sq);
|
||||
}
|
||||
|
||||
static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
|
||||
{
|
||||
struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
|
||||
recover_work);
|
||||
|
||||
/* Not implemented yet. */
|
||||
|
||||
netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
|
||||
}
|
||||
|
||||
static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
|
||||
struct mlx5e_sq_param *param,
|
||||
struct mlx5e_icosq *sq)
|
||||
struct mlx5e_icosq *sq,
|
||||
work_func_t recover_work_func)
|
||||
{
|
||||
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
||||
struct mlx5_core_dev *mdev = c->mdev;
|
||||
@ -1239,7 +1248,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
|
||||
if (err)
|
||||
goto err_sq_wq_destroy;
|
||||
|
||||
INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
|
||||
INIT_WORK(&sq->recover_work, recover_work_func);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1575,13 +1584,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
|
||||
mlx5e_reporter_tx_err_cqe(sq);
|
||||
}
|
||||
|
||||
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
|
||||
static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
|
||||
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
|
||||
work_func_t recover_work_func)
|
||||
{
|
||||
struct mlx5e_create_sq_param csp = {};
|
||||
int err;
|
||||
|
||||
err = mlx5e_alloc_icosq(c, param, sq);
|
||||
err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -1620,7 +1630,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
|
||||
synchronize_net(); /* Sync with NAPI. */
|
||||
}
|
||||
|
||||
void mlx5e_close_icosq(struct mlx5e_icosq *sq)
|
||||
static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
|
||||
{
|
||||
struct mlx5e_channel *c = sq->channel;
|
||||
|
||||
@ -2084,11 +2094,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
|
||||
|
||||
spin_lock_init(&c->async_icosq_lock);
|
||||
|
||||
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
|
||||
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
|
||||
mlx5e_async_icosq_err_cqe_work);
|
||||
if (err)
|
||||
goto err_close_xdpsq_cq;
|
||||
|
||||
err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
|
||||
mutex_init(&c->icosq_recovery_lock);
|
||||
|
||||
err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
|
||||
mlx5e_icosq_err_cqe_work);
|
||||
if (err)
|
||||
goto err_close_async_icosq;
|
||||
|
||||
@ -2156,9 +2170,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
|
||||
mlx5e_close_xdpsq(&c->xdpsq);
|
||||
if (c->xdp)
|
||||
mlx5e_close_xdpsq(&c->rq_xdpsq);
|
||||
/* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
|
||||
cancel_work_sync(&c->icosq.recover_work);
|
||||
mlx5e_close_rq(&c->rq);
|
||||
mlx5e_close_sqs(c);
|
||||
mlx5e_close_icosq(&c->icosq);
|
||||
mutex_destroy(&c->icosq_recovery_lock);
|
||||
mlx5e_close_icosq(&c->async_icosq);
|
||||
if (c->xdp)
|
||||
mlx5e_close_cq(&c->rq_xdpsq.cq);
|
||||
@ -3724,12 +3741,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
|
||||
|
||||
static int mlx5e_handle_feature(struct net_device *netdev,
|
||||
netdev_features_t *features,
|
||||
netdev_features_t wanted_features,
|
||||
netdev_features_t feature,
|
||||
mlx5e_feature_handler feature_handler)
|
||||
{
|
||||
netdev_features_t changes = wanted_features ^ netdev->features;
|
||||
bool enable = !!(wanted_features & feature);
|
||||
netdev_features_t changes = *features ^ netdev->features;
|
||||
bool enable = !!(*features & feature);
|
||||
int err;
|
||||
|
||||
if (!(changes & feature))
|
||||
@ -3737,22 +3753,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
|
||||
|
||||
err = feature_handler(netdev, enable);
|
||||
if (err) {
|
||||
MLX5E_SET_FEATURE(features, feature, !enable);
|
||||
netdev_err(netdev, "%s feature %pNF failed, err %d\n",
|
||||
enable ? "Enable" : "Disable", &feature, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
MLX5E_SET_FEATURE(features, feature, enable);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
|
||||
{
|
||||
netdev_features_t oper_features = netdev->features;
|
||||
netdev_features_t oper_features = features;
|
||||
int err = 0;
|
||||
|
||||
#define MLX5E_HANDLE_FEATURE(feature, handler) \
|
||||
mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
|
||||
mlx5e_handle_feature(netdev, &oper_features, feature, handler)
|
||||
|
||||
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
|
||||
err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
|
||||
|
@ -1196,19 +1196,14 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
|
||||
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
|
||||
goto offload_rule_0;
|
||||
|
||||
if (flow_flag_test(flow, CT)) {
|
||||
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (flow_flag_test(flow, SAMPLE)) {
|
||||
mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (attr->esw_attr->split_count)
|
||||
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
|
||||
|
||||
if (flow_flag_test(flow, CT))
|
||||
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
|
||||
else if (flow_flag_test(flow, SAMPLE))
|
||||
mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
|
||||
else
|
||||
offload_rule_0:
|
||||
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
|
||||
}
|
||||
@ -1445,7 +1440,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
|
||||
metadata);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_out;
|
||||
|
||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1461,13 +1458,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
if (attr->chain) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Internal port rule is only supported on chain 0");
|
||||
return -EOPNOTSUPP;
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (attr->dest_chain) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Internal port rule offload doesn't support goto action");
|
||||
return -EOPNOTSUPP;
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
|
||||
@ -1475,8 +1474,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
flow_flag_test(flow, EGRESS) ?
|
||||
MLX5E_TC_INT_PORT_EGRESS :
|
||||
MLX5E_TC_INT_PORT_INGRESS);
|
||||
if (IS_ERR(int_port))
|
||||
return PTR_ERR(int_port);
|
||||
if (IS_ERR(int_port)) {
|
||||
err = PTR_ERR(int_port);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
esw_attr->int_port = int_port;
|
||||
}
|
||||
|
@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
|
||||
|
||||
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
|
||||
{
|
||||
if (!mlx5_chains_prios_supported(chains))
|
||||
return 1;
|
||||
|
||||
if (mlx5_chains_ignore_flow_level_supported(chains))
|
||||
return UINT_MAX;
|
||||
|
||||
|
@ -1809,12 +1809,13 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
|
||||
|
||||
int mlx5_recover_device(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int ret = -EIO;
|
||||
|
||||
if (!mlx5_core_is_sf(dev)) {
|
||||
mlx5_pci_disable_device(dev);
|
||||
if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
|
||||
ret = mlx5_load_one(dev);
|
||||
return ret;
|
||||
if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return mlx5_load_one(dev);
|
||||
}
|
||||
|
||||
static struct pci_driver mlx5_core_driver = {
|
||||
|
@ -356,8 +356,8 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
|
||||
new_irq = irq_pool_create_irq(pool, affinity);
|
||||
if (IS_ERR(new_irq)) {
|
||||
if (!least_loaded_irq) {
|
||||
mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
|
||||
cpumask_first(affinity));
|
||||
mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
|
||||
PTR_ERR(new_irq));
|
||||
mutex_unlock(&pool->lock);
|
||||
return new_irq;
|
||||
}
|
||||
@ -398,7 +398,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
|
||||
cpumask_copy(irq->mask, affinity);
|
||||
if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
|
||||
cpumask_empty(irq->mask))
|
||||
cpumask_set_cpu(0, irq->mask);
|
||||
cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask);
|
||||
irq_set_affinity_hint(irq->irqn, irq->mask);
|
||||
unlock:
|
||||
mutex_unlock(&pool->lock);
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Copyright (c) 2019 Mellanox Technologies. */
|
||||
|
||||
#include <linux/mlx5/eswitch.h>
|
||||
#include <linux/err.h>
|
||||
#include "dr_types.h"
|
||||
|
||||
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
|
||||
@ -72,9 +73,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
|
||||
}
|
||||
|
||||
dmn->uar = mlx5_get_uars_page(dmn->mdev);
|
||||
if (!dmn->uar) {
|
||||
if (IS_ERR(dmn->uar)) {
|
||||
mlx5dr_err(dmn, "Couldn't allocate UAR\n");
|
||||
ret = -ENOMEM;
|
||||
ret = PTR_ERR(dmn->uar);
|
||||
goto clean_pd;
|
||||
}
|
||||
|
||||
@ -163,9 +164,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
|
||||
|
||||
static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
return dr_domain_query_vport(dmn,
|
||||
dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
|
||||
false,
|
||||
return dr_domain_query_vport(dmn, 0, false,
|
||||
&dmn->info.caps.vports.esw_manager_caps);
|
||||
}
|
||||
|
||||
|
@ -3135,7 +3135,7 @@ int ionic_lif_init(struct ionic_lif *lif)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
|
||||
lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
|
||||
if (!lif->dbid_inuse) {
|
||||
dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
|
||||
return -ENOMEM;
|
||||
|
@ -239,8 +239,8 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
|
||||
/* Check if we have a GPIO associated with this fixed phy */
|
||||
if (!gpiod) {
|
||||
gpiod = fixed_phy_get_gpiod(np);
|
||||
if (IS_ERR(gpiod))
|
||||
return ERR_CAST(gpiod);
|
||||
if (!gpiod)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Get the next available PHY address, up to PHY_MAX_ADDR */
|
||||
|
@ -493,11 +493,11 @@ static void read_bulk_callback(struct urb *urb)
|
||||
goto goon;
|
||||
|
||||
rx_status = buf[count - 2];
|
||||
if (rx_status & 0x1e) {
|
||||
if (rx_status & 0x1c) {
|
||||
netif_dbg(pegasus, rx_err, net,
|
||||
"RX packet error %x\n", rx_status);
|
||||
net->stats.rx_errors++;
|
||||
if (rx_status & 0x06) /* long or runt */
|
||||
if (rx_status & 0x04) /* runt */
|
||||
net->stats.rx_length_errors++;
|
||||
if (rx_status & 0x08)
|
||||
net->stats.rx_crc_errors++;
|
||||
|
@ -524,7 +524,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
|
||||
phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(phy->gpiod_ena)) {
|
||||
nfc_err(dev, "Unable to get ENABLE GPIO\n");
|
||||
return PTR_ERR(phy->gpiod_ena);
|
||||
r = PTR_ERR(phy->gpiod_ena);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
phy->se_status.is_ese_present =
|
||||
@ -535,7 +536,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
|
||||
r = st21nfca_hci_platform_init(phy);
|
||||
if (r < 0) {
|
||||
nfc_err(&client->dev, "Unable to reboot st21nfca\n");
|
||||
return r;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
|
||||
@ -544,15 +545,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
|
||||
ST21NFCA_HCI_DRIVER_NAME, phy);
|
||||
if (r < 0) {
|
||||
nfc_err(&client->dev, "Unable to register IRQ handler\n");
|
||||
return r;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
|
||||
r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
|
||||
ST21NFCA_FRAME_HEADROOM,
|
||||
ST21NFCA_FRAME_TAILROOM,
|
||||
ST21NFCA_HCI_LLC_MAX_PAYLOAD,
|
||||
&phy->hdev,
|
||||
&phy->se_status);
|
||||
if (r)
|
||||
goto out_free;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
kfree_skb(phy->pending_skb);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int st21nfca_hci_i2c_remove(struct i2c_client *client)
|
||||
@ -563,6 +572,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
|
||||
|
||||
if (phy->powered)
|
||||
st21nfca_hci_i2c_disable(phy);
|
||||
if (phy->pending_skb)
|
||||
kfree_skb(phy->pending_skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -105,6 +105,7 @@ extern struct percpu_counter sctp_sockets_allocated;
|
||||
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
|
||||
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
|
||||
|
||||
typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
|
||||
void sctp_transport_walk_start(struct rhashtable_iter *iter);
|
||||
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
|
||||
struct sctp_transport *sctp_transport_get_next(struct net *net,
|
||||
@ -115,8 +116,7 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
||||
struct net *net,
|
||||
const union sctp_addr *laddr,
|
||||
const union sctp_addr *paddr, void *p);
|
||||
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
||||
int (*cb_done)(struct sctp_transport *, void *),
|
||||
int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
|
||||
struct net *net, int *pos, void *p);
|
||||
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
|
||||
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
|
||||
|
@ -1355,6 +1355,7 @@ struct sctp_endpoint {
|
||||
reconf_enable:1;
|
||||
|
||||
__u8 strreset_enable;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
/* Recover the outter endpoint structure. */
|
||||
@ -1370,7 +1371,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
|
||||
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
|
||||
void sctp_endpoint_free(struct sctp_endpoint *);
|
||||
void sctp_endpoint_put(struct sctp_endpoint *);
|
||||
void sctp_endpoint_hold(struct sctp_endpoint *);
|
||||
int sctp_endpoint_hold(struct sctp_endpoint *ep);
|
||||
void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
|
||||
struct sctp_association *sctp_endpoint_lookup_assoc(
|
||||
const struct sctp_endpoint *ep,
|
||||
|
@ -263,7 +263,7 @@ enum nfc_sdp_attr {
|
||||
#define NFC_SE_ENABLED 0x1
|
||||
|
||||
struct sockaddr_nfc {
|
||||
sa_family_t sa_family;
|
||||
__kernel_sa_family_t sa_family;
|
||||
__u32 dev_idx;
|
||||
__u32 target_idx;
|
||||
__u32 nfc_protocol;
|
||||
@ -271,14 +271,14 @@ struct sockaddr_nfc {
|
||||
|
||||
#define NFC_LLCP_MAX_SERVICE_NAME 63
|
||||
struct sockaddr_nfc_llcp {
|
||||
sa_family_t sa_family;
|
||||
__kernel_sa_family_t sa_family;
|
||||
__u32 dev_idx;
|
||||
__u32 target_idx;
|
||||
__u32 nfc_protocol;
|
||||
__u8 dsap; /* Destination SAP, if known */
|
||||
__u8 ssap; /* Source SAP to be bound to */
|
||||
char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
|
||||
size_t service_name_len;
|
||||
__kernel_size_t service_name_len;
|
||||
};
|
||||
|
||||
/* NFC socket protocols */
|
||||
|
@ -4522,6 +4522,38 @@ int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
|
||||
}
|
||||
#endif
|
||||
|
||||
void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
|
||||
unsigned long val)
|
||||
{
|
||||
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
|
||||
|
||||
if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
|
||||
br_info(brmctx->br,
|
||||
"trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
|
||||
jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
|
||||
jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
|
||||
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
|
||||
}
|
||||
|
||||
brmctx->multicast_query_interval = intvl_jiffies;
|
||||
}
|
||||
|
||||
void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
|
||||
unsigned long val)
|
||||
{
|
||||
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
|
||||
|
||||
if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
|
||||
br_info(brmctx->br,
|
||||
"trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
|
||||
jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
|
||||
jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
|
||||
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
|
||||
}
|
||||
|
||||
brmctx->multicast_startup_query_interval = intvl_jiffies;
|
||||
}
|
||||
|
||||
/**
|
||||
* br_multicast_list_adjacent - Returns snooped multicast addresses
|
||||
* @dev: The bridge port adjacent to which to retrieve addresses
|
||||
|
@ -1357,7 +1357,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
|
||||
if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
|
||||
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
|
||||
|
||||
br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
|
||||
br_multicast_set_query_intvl(&br->multicast_ctx, val);
|
||||
}
|
||||
|
||||
if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
|
||||
@ -1369,7 +1369,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
|
||||
if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
|
||||
u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
|
||||
|
||||
br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
|
||||
br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
|
||||
}
|
||||
|
||||
if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
|
||||
|
@ -28,6 +28,8 @@
|
||||
#define BR_MAX_PORTS (1<<BR_PORT_BITS)
|
||||
|
||||
#define BR_MULTICAST_DEFAULT_HASH_MAX 4096
|
||||
#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
|
||||
#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
|
||||
|
||||
#define BR_HWDOM_MAX BITS_PER_LONG
|
||||
|
||||
@ -963,6 +965,10 @@ int br_multicast_dump_querier_state(struct sk_buff *skb,
|
||||
int nest_attr);
|
||||
size_t br_multicast_querier_state_size(void);
|
||||
size_t br_rports_size(const struct net_bridge_mcast *brmctx);
|
||||
void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
|
||||
unsigned long val);
|
||||
void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
|
||||
unsigned long val);
|
||||
|
||||
static inline bool br_group_is_l2(const struct br_ip *group)
|
||||
{
|
||||
@ -1147,9 +1153,9 @@ br_multicast_port_ctx_get_global(const struct net_bridge_mcast_port *pmctx)
|
||||
static inline bool
|
||||
br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx)
|
||||
{
|
||||
return br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
|
||||
br_multicast_ctx_is_vlan(brmctx) &&
|
||||
!(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED);
|
||||
return br_multicast_ctx_is_vlan(brmctx) &&
|
||||
(!br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) ||
|
||||
!(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -658,7 +658,7 @@ static ssize_t multicast_query_interval_show(struct device *d,
|
||||
static int set_query_interval(struct net_bridge *br, unsigned long val,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
|
||||
br_multicast_set_query_intvl(&br->multicast_ctx, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -706,7 +706,7 @@ static ssize_t multicast_startup_query_interval_show(
|
||||
static int set_startup_query_interval(struct net_bridge *br, unsigned long val,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
|
||||
br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -521,7 +521,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
|
||||
u64 val;
|
||||
|
||||
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
|
||||
v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
|
||||
br_multicast_set_query_intvl(&v->br_mcast_ctx, val);
|
||||
*changed = true;
|
||||
}
|
||||
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
|
||||
@ -535,7 +535,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
|
||||
u64 val;
|
||||
|
||||
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
|
||||
v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
|
||||
br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val);
|
||||
*changed = true;
|
||||
}
|
||||
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
|
||||
|
@ -1994,6 +1994,10 @@ static int __init inet_init(void)
|
||||
|
||||
ip_init();
|
||||
|
||||
/* Initialise per-cpu ipv4 mibs */
|
||||
if (init_ipv4_mibs())
|
||||
panic("%s: Cannot init ipv4 mibs\n", __func__);
|
||||
|
||||
/* Setup TCP slab cache for open requests. */
|
||||
tcp_init();
|
||||
|
||||
@ -2024,12 +2028,6 @@ static int __init inet_init(void)
|
||||
|
||||
if (init_inet_pernet_ops())
|
||||
pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
|
||||
/*
|
||||
* Initialise per-cpu ipv4 mibs
|
||||
*/
|
||||
|
||||
if (init_ipv4_mibs())
|
||||
pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
|
||||
|
||||
ipv4_proc_init();
|
||||
|
||||
|
@ -808,6 +808,8 @@ vti6_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data
|
||||
struct net *net = dev_net(dev);
|
||||
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
|
||||
|
||||
memset(&p1, 0, sizeof(p1));
|
||||
|
||||
switch (cmd) {
|
||||
case SIOCGETTUNNEL:
|
||||
if (dev == ip6n->fb_tnl_dev) {
|
||||
|
@ -1020,6 +1020,9 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
|
||||
struct raw6_sock *rp = raw6_sk(sk);
|
||||
int val;
|
||||
|
||||
if (optlen < sizeof(val))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_sockptr(&val, optval, sizeof(val)))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -1204,7 +1204,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
|
||||
if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb,
|
||||
pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
|
||||
if (!pnest)
|
||||
return -ENOMEM;
|
||||
nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
|
||||
rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
|
||||
if (rc) {
|
||||
nla_nest_cancel(skb, pnest);
|
||||
return rc;
|
||||
}
|
||||
if ((0x1 << np->id) == ndp->package_whitelist)
|
||||
nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
|
||||
cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
|
||||
|
@ -290,9 +290,8 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
|
||||
static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = tsp->asoc->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
struct sk_buff *skb = commp->skb;
|
||||
@ -302,6 +301,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
|
||||
int err = 0;
|
||||
|
||||
lock_sock(sk);
|
||||
if (ep != tsp->asoc->ep)
|
||||
goto release;
|
||||
list_for_each_entry(assoc, &ep->asocs, asocs) {
|
||||
if (cb->args[4] < cb->args[1])
|
||||
goto next;
|
||||
@ -344,9 +345,8 @@ release:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
|
||||
static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
|
||||
{
|
||||
struct sctp_endpoint *ep = tsp->asoc->ep;
|
||||
struct sctp_comm_param *commp = p;
|
||||
struct sock *sk = ep->base.sk;
|
||||
const struct inet_diag_req_v2 *r = commp->r;
|
||||
@ -505,7 +505,7 @@ skip:
|
||||
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
|
||||
goto done;
|
||||
|
||||
sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
|
||||
sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
|
||||
net, &pos, &commp);
|
||||
cb->args[2] = pos;
|
||||
|
||||
|
@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
|
||||
}
|
||||
|
||||
/* Final destructor for endpoint. */
|
||||
static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
|
||||
struct sock *sk = ep->base.sk;
|
||||
|
||||
sctp_sk(sk)->ep = NULL;
|
||||
sock_put(sk);
|
||||
|
||||
kfree(ep);
|
||||
SCTP_DBG_OBJCNT_DEC(ep);
|
||||
}
|
||||
|
||||
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
||||
{
|
||||
struct sock *sk;
|
||||
@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
|
||||
if (sctp_sk(sk)->bind_hash)
|
||||
sctp_put_port(sk);
|
||||
|
||||
sctp_sk(sk)->ep = NULL;
|
||||
/* Give up our hold on the sock */
|
||||
sock_put(sk);
|
||||
|
||||
kfree(ep);
|
||||
SCTP_DBG_OBJCNT_DEC(ep);
|
||||
call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
|
||||
}
|
||||
|
||||
/* Hold a reference to an endpoint. */
|
||||
void sctp_endpoint_hold(struct sctp_endpoint *ep)
|
||||
int sctp_endpoint_hold(struct sctp_endpoint *ep)
|
||||
{
|
||||
refcount_inc(&ep->base.refcnt);
|
||||
return refcount_inc_not_zero(&ep->base.refcnt);
|
||||
}
|
||||
|
||||
/* Release a reference to an endpoint and clean up if there are
|
||||
|
@ -5338,11 +5338,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
|
||||
|
||||
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
||||
int (*cb_done)(struct sctp_transport *, void *),
|
||||
struct net *net, int *pos, void *p) {
|
||||
int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
|
||||
struct net *net, int *pos, void *p)
|
||||
{
|
||||
struct rhashtable_iter hti;
|
||||
struct sctp_transport *tsp;
|
||||
struct sctp_endpoint *ep;
|
||||
int ret;
|
||||
|
||||
again:
|
||||
@ -5351,26 +5352,32 @@ again:
|
||||
|
||||
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
|
||||
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
|
||||
ret = cb(tsp, p);
|
||||
ep = tsp->asoc->ep;
|
||||
if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
|
||||
ret = cb(ep, tsp, p);
|
||||
if (ret)
|
||||
break;
|
||||
sctp_endpoint_put(ep);
|
||||
}
|
||||
(*pos)++;
|
||||
sctp_transport_put(tsp);
|
||||
}
|
||||
sctp_transport_walk_stop(&hti);
|
||||
|
||||
if (ret) {
|
||||
if (cb_done && !cb_done(tsp, p)) {
|
||||
if (cb_done && !cb_done(ep, tsp, p)) {
|
||||
(*pos)++;
|
||||
sctp_endpoint_put(ep);
|
||||
sctp_transport_put(tsp);
|
||||
goto again;
|
||||
}
|
||||
sctp_endpoint_put(ep);
|
||||
sctp_transport_put(tsp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sctp_for_each_transport);
|
||||
EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
|
||||
|
||||
/* 7.2.1 Association Status (SCTP_STATUS)
|
||||
|
||||
|
@ -180,6 +180,11 @@ struct smc_connection {
|
||||
u16 tx_cdc_seq; /* sequence # for CDC send */
|
||||
u16 tx_cdc_seq_fin; /* sequence # - tx completed */
|
||||
spinlock_t send_lock; /* protect wr_sends */
|
||||
atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe
|
||||
* - inc when post wqe,
|
||||
* - dec on polled tx cqe
|
||||
*/
|
||||
wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
|
||||
struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
|
||||
u32 tx_off; /* base offset in peer rmb */
|
||||
|
||||
|
@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
|
||||
struct smc_sock *smc;
|
||||
int diff;
|
||||
|
||||
if (!conn)
|
||||
/* already dismissed */
|
||||
return;
|
||||
|
||||
smc = container_of(conn, struct smc_sock, conn);
|
||||
bh_lock_sock(&smc->sk);
|
||||
if (!wc_status) {
|
||||
@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
|
||||
conn);
|
||||
conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
|
||||
unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
|
||||
wake_up(&conn->cdc_pend_tx_wq);
|
||||
WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
|
||||
|
||||
smc_tx_sndbuf_nonfull(smc);
|
||||
bh_unlock_sock(&smc->sk);
|
||||
}
|
||||
@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
|
||||
conn->tx_cdc_seq++;
|
||||
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
|
||||
smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
|
||||
|
||||
atomic_inc(&conn->cdc_pend_tx_wr);
|
||||
smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
|
||||
|
||||
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
|
||||
if (!rc) {
|
||||
smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
|
||||
@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
|
||||
} else {
|
||||
conn->tx_cdc_seq--;
|
||||
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
|
||||
atomic_dec(&conn->cdc_pend_tx_wr);
|
||||
}
|
||||
|
||||
return rc;
|
||||
@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn,
|
||||
peer->token = htonl(local->token);
|
||||
peer->prod_flags.failover_validation = 1;
|
||||
|
||||
/* We need to set pend->conn here to make sure smc_cdc_tx_handler()
|
||||
* can handle properly
|
||||
*/
|
||||
smc_cdc_add_pending_send(conn, pend);
|
||||
|
||||
atomic_inc(&conn->cdc_pend_tx_wr);
|
||||
smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
|
||||
|
||||
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
|
||||
if (unlikely(rc))
|
||||
atomic_dec(&conn->cdc_pend_tx_wr);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -193,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
|
||||
unsigned long data)
|
||||
void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
|
||||
{
|
||||
struct smc_connection *conn = (struct smc_connection *)data;
|
||||
struct smc_cdc_tx_pend *cdc_pend =
|
||||
(struct smc_cdc_tx_pend *)tx_pend;
|
||||
|
||||
return cdc_pend->conn == conn;
|
||||
}
|
||||
|
||||
static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
|
||||
{
|
||||
struct smc_cdc_tx_pend *cdc_pend =
|
||||
(struct smc_cdc_tx_pend *)tx_pend;
|
||||
|
||||
cdc_pend->conn = NULL;
|
||||
}
|
||||
|
||||
void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
|
||||
{
|
||||
struct smc_link *link = conn->lnk;
|
||||
|
||||
smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
|
||||
smc_cdc_tx_filter, smc_cdc_tx_dismisser,
|
||||
(unsigned long)conn);
|
||||
wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
|
||||
}
|
||||
|
||||
/* Send a SMC-D CDC header.
|
||||
|
@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
|
||||
struct smc_wr_buf **wr_buf,
|
||||
struct smc_rdma_wr **wr_rdma_buf,
|
||||
struct smc_cdc_tx_pend **pend);
|
||||
void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
|
||||
void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
|
||||
int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
|
||||
struct smc_cdc_tx_pend *pend);
|
||||
int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
|
||||
|
@ -647,7 +647,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
|
||||
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
|
||||
struct smc_link *lnk = &lgr->lnk[i];
|
||||
|
||||
if (smc_link_usable(lnk))
|
||||
if (smc_link_sendable(lnk))
|
||||
lnk->state = SMC_LNK_INACTIVE;
|
||||
}
|
||||
wake_up_all(&lgr->llc_msg_waiter);
|
||||
@ -1127,7 +1127,7 @@ void smc_conn_free(struct smc_connection *conn)
|
||||
smc_ism_unset_conn(conn);
|
||||
tasklet_kill(&conn->rx_tsklet);
|
||||
} else {
|
||||
smc_cdc_tx_dismiss_slots(conn);
|
||||
smc_cdc_wait_pend_tx_wr(conn);
|
||||
if (current_work() != &conn->abort_work)
|
||||
cancel_work_sync(&conn->abort_work);
|
||||
}
|
||||
@ -1204,7 +1204,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
|
||||
smc_llc_link_clear(lnk, log);
|
||||
smcr_buf_unmap_lgr(lnk);
|
||||
smcr_rtoken_clear_link(lnk);
|
||||
smc_ib_modify_qp_reset(lnk);
|
||||
smc_ib_modify_qp_error(lnk);
|
||||
smc_wr_free_link(lnk);
|
||||
smc_ib_destroy_queue_pair(lnk);
|
||||
smc_ib_dealloc_protection_domain(lnk);
|
||||
@ -1336,7 +1336,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
|
||||
else
|
||||
tasklet_unlock_wait(&conn->rx_tsklet);
|
||||
} else {
|
||||
smc_cdc_tx_dismiss_slots(conn);
|
||||
smc_cdc_wait_pend_tx_wr(conn);
|
||||
}
|
||||
smc_lgr_unregister_conn(conn);
|
||||
smc_close_active_abort(smc);
|
||||
@ -1459,11 +1459,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
|
||||
/* Called when an SMCR device is removed or the smc module is unloaded.
|
||||
* If smcibdev is given, all SMCR link groups using this device are terminated.
|
||||
* If smcibdev is NULL, all SMCR link groups are terminated.
|
||||
*
|
||||
* We must wait here for QPs been destroyed before we destroy the CQs,
|
||||
* or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus
|
||||
* smc_sock cannot be released.
|
||||
*/
|
||||
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
|
||||
{
|
||||
struct smc_link_group *lgr, *lg;
|
||||
LIST_HEAD(lgr_free_list);
|
||||
LIST_HEAD(lgr_linkdown_list);
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&smc_lgr_list.lock);
|
||||
@ -1475,7 +1480,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
|
||||
list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
|
||||
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
|
||||
if (lgr->lnk[i].smcibdev == smcibdev)
|
||||
smcr_link_down_cond_sched(&lgr->lnk[i]);
|
||||
list_move_tail(&lgr->list, &lgr_linkdown_list);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1487,6 +1492,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
|
||||
__smc_lgr_terminate(lgr, false);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) {
|
||||
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
|
||||
if (lgr->lnk[i].smcibdev == smcibdev) {
|
||||
mutex_lock(&lgr->llc_conf_mutex);
|
||||
smcr_link_down_cond(&lgr->lnk[i]);
|
||||
mutex_unlock(&lgr->llc_conf_mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (smcibdev) {
|
||||
if (atomic_read(&smcibdev->lnk_cnt))
|
||||
wait_event(smcibdev->lnks_deleted,
|
||||
@ -1586,7 +1601,6 @@ static void smcr_link_down(struct smc_link *lnk)
|
||||
if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
|
||||
return;
|
||||
|
||||
smc_ib_modify_qp_reset(lnk);
|
||||
to_lnk = smc_switch_conns(lgr, lnk, true);
|
||||
if (!to_lnk) { /* no backup link available */
|
||||
smcr_link_clear(lnk, true);
|
||||
@ -1824,6 +1838,7 @@ create:
|
||||
conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
|
||||
conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
|
||||
conn->urg_state = SMC_URG_READ;
|
||||
init_waitqueue_head(&conn->cdc_pend_tx_wq);
|
||||
INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
|
||||
if (ini->is_smcd) {
|
||||
conn->rx_off = sizeof(struct smcd_cdc_msg);
|
||||
|
@ -415,6 +415,12 @@ static inline bool smc_link_usable(struct smc_link *lnk)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool smc_link_sendable(struct smc_link *lnk)
|
||||
{
|
||||
return smc_link_usable(lnk) &&
|
||||
lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
|
||||
}
|
||||
|
||||
static inline bool smc_link_active(struct smc_link *lnk)
|
||||
{
|
||||
return lnk->state == SMC_LNK_ACTIVE;
|
||||
|
@ -109,12 +109,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk)
|
||||
IB_QP_MAX_QP_RD_ATOMIC);
|
||||
}
|
||||
|
||||
int smc_ib_modify_qp_reset(struct smc_link *lnk)
|
||||
int smc_ib_modify_qp_error(struct smc_link *lnk)
|
||||
{
|
||||
struct ib_qp_attr qp_attr;
|
||||
|
||||
memset(&qp_attr, 0, sizeof(qp_attr));
|
||||
qp_attr.qp_state = IB_QPS_RESET;
|
||||
qp_attr.qp_state = IB_QPS_ERR;
|
||||
return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
|
||||
}
|
||||
|
||||
|
@ -90,6 +90,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk);
|
||||
int smc_ib_ready_link(struct smc_link *lnk);
|
||||
int smc_ib_modify_qp_rts(struct smc_link *lnk);
|
||||
int smc_ib_modify_qp_reset(struct smc_link *lnk);
|
||||
int smc_ib_modify_qp_error(struct smc_link *lnk);
|
||||
long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
|
||||
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
|
||||
struct smc_buf_desc *buf_slot, u8 link_idx);
|
||||
|
@ -1630,7 +1630,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
|
||||
delllc.reason = htonl(rsn);
|
||||
|
||||
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
|
||||
if (!smc_link_usable(&lgr->lnk[i]))
|
||||
if (!smc_link_sendable(&lgr->lnk[i]))
|
||||
continue;
|
||||
if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
|
||||
break;
|
||||
|
@ -62,13 +62,9 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
|
||||
}
|
||||
|
||||
/* wait till all pending tx work requests on the given link are completed */
|
||||
int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
|
||||
void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
|
||||
{
|
||||
if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
|
||||
SMC_WR_TX_WAIT_PENDING_TIME))
|
||||
return 0;
|
||||
else /* timeout */
|
||||
return -EPIPE;
|
||||
wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
|
||||
}
|
||||
|
||||
static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
|
||||
@ -87,7 +83,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
|
||||
struct smc_wr_tx_pend pnd_snd;
|
||||
struct smc_link *link;
|
||||
u32 pnd_snd_idx;
|
||||
int i;
|
||||
|
||||
link = wc->qp->qp_context;
|
||||
|
||||
@ -128,14 +123,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
|
||||
}
|
||||
|
||||
if (wc->status) {
|
||||
for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
|
||||
/* clear full struct smc_wr_tx_pend including .priv */
|
||||
memset(&link->wr_tx_pends[i], 0,
|
||||
sizeof(link->wr_tx_pends[i]));
|
||||
memset(&link->wr_tx_bufs[i], 0,
|
||||
sizeof(link->wr_tx_bufs[i]));
|
||||
clear_bit(i, link->wr_tx_mask);
|
||||
}
|
||||
if (link->lgr->smc_version == SMC_V2) {
|
||||
memset(link->wr_tx_v2_pend, 0,
|
||||
sizeof(*link->wr_tx_v2_pend));
|
||||
@ -188,7 +175,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
|
||||
static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
|
||||
{
|
||||
*idx = link->wr_tx_cnt;
|
||||
if (!smc_link_usable(link))
|
||||
if (!smc_link_sendable(link))
|
||||
return -ENOLINK;
|
||||
for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
|
||||
if (!test_and_set_bit(*idx, link->wr_tx_mask))
|
||||
@ -231,7 +218,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
|
||||
} else {
|
||||
rc = wait_event_interruptible_timeout(
|
||||
link->wr_tx_wait,
|
||||
!smc_link_usable(link) ||
|
||||
!smc_link_sendable(link) ||
|
||||
lgr->terminating ||
|
||||
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
|
||||
SMC_WR_TX_WAIT_FREE_SLOT_TIME);
|
||||
@ -358,18 +345,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct smc_wr_tx_pend *pend;
|
||||
u32 pnd_idx;
|
||||
int rc;
|
||||
|
||||
pend = container_of(priv, struct smc_wr_tx_pend, priv);
|
||||
pend->compl_requested = 1;
|
||||
init_completion(&link->wr_tx_compl[pend->idx]);
|
||||
pnd_idx = pend->idx;
|
||||
init_completion(&link->wr_tx_compl[pnd_idx]);
|
||||
|
||||
rc = smc_wr_tx_send(link, priv);
|
||||
if (rc)
|
||||
return rc;
|
||||
/* wait for completion by smc_wr_tx_process_cqe() */
|
||||
rc = wait_for_completion_interruptible_timeout(
|
||||
&link->wr_tx_compl[pend->idx], timeout);
|
||||
&link->wr_tx_compl[pnd_idx], timeout);
|
||||
if (rc <= 0)
|
||||
rc = -ENODATA;
|
||||
if (rc > 0)
|
||||
@ -419,25 +408,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
|
||||
smc_wr_tx_filter filter,
|
||||
smc_wr_tx_dismisser dismisser,
|
||||
unsigned long data)
|
||||
{
|
||||
struct smc_wr_tx_pend_priv *tx_pend;
|
||||
struct smc_wr_rx_hdr *wr_tx;
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
|
||||
wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
|
||||
if (wr_tx->type != wr_tx_hdr_type)
|
||||
continue;
|
||||
tx_pend = &link->wr_tx_pends[i].priv;
|
||||
if (filter(tx_pend, data))
|
||||
dismisser(tx_pend);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************** receive queue ********************************/
|
||||
|
||||
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
|
||||
@ -673,10 +643,7 @@ void smc_wr_free_link(struct smc_link *lnk)
|
||||
smc_wr_wakeup_reg_wait(lnk);
|
||||
smc_wr_wakeup_tx_wait(lnk);
|
||||
|
||||
if (smc_wr_tx_wait_no_pending_sends(lnk))
|
||||
memset(lnk->wr_tx_mask, 0,
|
||||
BITS_TO_LONGS(SMC_WR_BUF_CNT) *
|
||||
sizeof(*lnk->wr_tx_mask));
|
||||
smc_wr_tx_wait_no_pending_sends(lnk);
|
||||
wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
|
||||
wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
|
||||
|
||||
|
@ -22,7 +22,6 @@
|
||||
#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
|
||||
|
||||
#define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
|
||||
#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ)
|
||||
|
||||
#define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
|
||||
|
||||
@ -62,7 +61,7 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
|
||||
|
||||
static inline bool smc_wr_tx_link_hold(struct smc_link *link)
|
||||
{
|
||||
if (!smc_link_usable(link))
|
||||
if (!smc_link_sendable(link))
|
||||
return false;
|
||||
atomic_inc(&link->wr_tx_refcnt);
|
||||
return true;
|
||||
@ -130,7 +129,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
|
||||
smc_wr_tx_filter filter,
|
||||
smc_wr_tx_dismisser dismisser,
|
||||
unsigned long data);
|
||||
int smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
|
||||
void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
|
||||
|
||||
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
|
||||
int smc_wr_rx_post_init(struct smc_link *link);
|
||||
|
@ -83,6 +83,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
||||
xskb = &pool->heads[i];
|
||||
xskb->pool = pool;
|
||||
xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
|
||||
INIT_LIST_HEAD(&xskb->free_list_node);
|
||||
if (pool->unaligned)
|
||||
pool->free_heads[i] = xskb;
|
||||
else
|
||||
|
@ -9,7 +9,6 @@ CONFIG_NETFILTER=y
|
||||
CONFIG_NETFILTER_ADVANCED=y
|
||||
CONFIG_NETFILTER_NETLINK=m
|
||||
CONFIG_NF_TABLES=m
|
||||
CONFIG_NFT_COUNTER=m
|
||||
CONFIG_NFT_COMPAT=m
|
||||
CONFIG_NETFILTER_XTABLES=m
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=m
|
||||
|
@ -132,7 +132,7 @@ run_test() {
|
||||
local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \
|
||||
sed -e 's/\[//' -e 's/:.*//'`
|
||||
if [ $rcv != $pkts ]; then
|
||||
echo " fail - received $rvs packets, expected $pkts"
|
||||
echo " fail - received $rcv packets, expected $pkts"
|
||||
ret=1
|
||||
return
|
||||
fi
|
||||
@ -185,6 +185,7 @@ for family in 4 6; do
|
||||
IPT=iptables
|
||||
SUFFIX=24
|
||||
VXDEV=vxlan
|
||||
PING=ping
|
||||
|
||||
if [ $family = 6 ]; then
|
||||
BM_NET=$BM_NET_V6
|
||||
@ -192,6 +193,7 @@ for family in 4 6; do
|
||||
SUFFIX="64 nodad"
|
||||
VXDEV=vxlan6
|
||||
IPT=ip6tables
|
||||
PING="ping6"
|
||||
fi
|
||||
|
||||
echo "IPv$family"
|
||||
@ -237,7 +239,7 @@ for family in 4 6; do
|
||||
|
||||
# load arp cache before running the test to reduce the amount of
|
||||
# stray traffic on top of the UDP tunnel
|
||||
ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null
|
||||
ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
|
||||
run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
|
||||
cleanup
|
||||
|
||||
|
@ -156,13 +156,13 @@ struct testcase testcases_v4[] = {
|
||||
},
|
||||
{
|
||||
/* send max number of min sized segments */
|
||||
.tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
|
||||
.tlen = UDP_MAX_SEGMENTS,
|
||||
.gso_len = 1,
|
||||
.r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
|
||||
.r_num_mss = UDP_MAX_SEGMENTS,
|
||||
},
|
||||
{
|
||||
/* send max number + 1 of min sized segments: fail */
|
||||
.tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1,
|
||||
.tlen = UDP_MAX_SEGMENTS + 1,
|
||||
.gso_len = 1,
|
||||
.tfail = true,
|
||||
},
|
||||
@ -259,13 +259,13 @@ struct testcase testcases_v6[] = {
|
||||
},
|
||||
{
|
||||
/* send max number of min sized segments */
|
||||
.tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
|
||||
.tlen = UDP_MAX_SEGMENTS,
|
||||
.gso_len = 1,
|
||||
.r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
|
||||
.r_num_mss = UDP_MAX_SEGMENTS,
|
||||
},
|
||||
{
|
||||
/* send max number + 1 of min sized segments: fail */
|
||||
.tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1,
|
||||
.tlen = UDP_MAX_SEGMENTS + 1,
|
||||
.gso_len = 1,
|
||||
.tfail = true,
|
||||
},
|
||||
|
@ -419,6 +419,7 @@ static void usage(const char *filepath)
|
||||
|
||||
static void parse_opts(int argc, char **argv)
|
||||
{
|
||||
const char *bind_addr = NULL;
|
||||
int max_len, hdrlen;
|
||||
int c;
|
||||
|
||||
@ -446,7 +447,7 @@ static void parse_opts(int argc, char **argv)
|
||||
cfg_cpu = strtol(optarg, NULL, 0);
|
||||
break;
|
||||
case 'D':
|
||||
setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
|
||||
bind_addr = optarg;
|
||||
break;
|
||||
case 'l':
|
||||
cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
|
||||
@ -492,6 +493,11 @@ static void parse_opts(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
if (!bind_addr)
|
||||
bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
|
||||
|
||||
setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr);
|
||||
|
||||
if (optind != argc)
|
||||
usage(argv[0]);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user