Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix suspend and resume in mt76x0u USB driver, from Stanislaw
    Gruszka.

 2) Missing memory barriers in xsk, from Magnus Karlsson.

 3) rhashtable fixes in mac80211 from Herbert Xu.

 4) 32-bit MIPS eBPF JIT fixes from Paul Burton.

 5) Fix for_each_netdev_feature() on big endian, from Hauke Mehrtens.

 6) GSO validation fixes from Willem de Bruijn.

 7) Endianness fix for dwmac4 timestamp handling, from Alexandre Torgue.

 8) More strict checks in tcp_v4_err(), from Eric Dumazet.

 9) af_alg_release should NULL out the sk after the sock_put(), from Mao
    Wenan.

10) Missing unlock in mac80211 mesh error path, from Wei Yongjun.

11) Missing device put in hns driver, from Salil Mehta.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits)
  sky2: Increase D3 delay again
  vhost: correctly check the return value of translate_desc() in log_used()
  net: netcp: Fix ethss driver probe issue
  net: hns: Fixes the missing put_device in positive leg for roce reset
  net: stmmac: Fix a race in EEE enable callback
  qed: Fix iWARP syn packet mac address validation.
  qed: Fix iWARP buffer size provided for syn packet processing.
  r8152: Add support for MAC address pass through on RTL8153-BD
  mac80211: mesh: fix missing unlock on error in table_path_del()
  net/mlx4_en: fix spelling mistake: "quiting" -> "quitting"
  net: crypto set sk to NULL when af_alg_release.
  net: Do not allocate page fragments that are not skb aligned
  mm: Use fixed constant in page_frag_alloc instead of size + 1
  tcp: tcp_v4_err() should be more careful
  tcp: clear icsk_backoff in tcp_write_queue_purge()
  net: mv643xx_eth: disable clk on error path in mv643xx_eth_shared_probe()
  qmi_wwan: apply SET_DTR quirk to Sierra WP7607
  net: stmmac: handle endianness in dwmac4_get_timestamp
  doc: Mention MSG_ZEROCOPY implementation for UDP
  mlxsw: __mlxsw_sp_port_headroom_set(): Fix a use of local variable
  ...
This commit is contained in:
Linus Torvalds 2019-02-19 16:13:19 -08:00
commit 40e196a906
41 changed files with 356 additions and 245 deletions

View File

@ -7,7 +7,7 @@ Intro
===== =====
The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. The MSG_ZEROCOPY flag enables copy avoidance for socket send calls.
The feature is currently implemented for TCP sockets. The feature is currently implemented for TCP and UDP sockets.
Opportunity and Caveats Opportunity and Caveats

View File

@ -79,8 +79,6 @@ enum reg_val_type {
REG_64BIT_32BIT, REG_64BIT_32BIT,
/* 32-bit compatible, need truncation for 64-bit ops. */ /* 32-bit compatible, need truncation for 64-bit ops. */
REG_32BIT, REG_32BIT,
/* 32-bit zero extended. */
REG_32BIT_ZERO_EX,
/* 32-bit no sign/zero extension needed. */ /* 32-bit no sign/zero extension needed. */
REG_32BIT_POS REG_32BIT_POS
}; };
@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
const struct bpf_prog *prog = ctx->skf; const struct bpf_prog *prog = ctx->skf;
int stack_adjust = ctx->stack_size; int stack_adjust = ctx->stack_size;
int store_offset = stack_adjust - 8; int store_offset = stack_adjust - 8;
enum reg_val_type td;
int r0 = MIPS_R_V0; int r0 = MIPS_R_V0;
if (dest_reg == MIPS_R_RA && if (dest_reg == MIPS_R_RA) {
get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
/* Don't let zero extended value escape. */ /* Don't let zero extended value escape. */
emit_instr(ctx, sll, r0, r0, 0); td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
if (td == REG_64BIT)
emit_instr(ctx, sll, r0, r0, 0);
}
if (ctx->flags & EBPF_SAVE_RA) { if (ctx->flags & EBPF_SAVE_RA) {
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0) if (dst < 0)
return dst; return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg); td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { if (td == REG_64BIT) {
/* sign extend */ /* sign extend */
emit_instr(ctx, sll, dst, dst, 0); emit_instr(ctx, sll, dst, dst, 0);
} }
@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0) if (dst < 0)
return dst; return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg); td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { if (td == REG_64BIT) {
/* sign extend */ /* sign extend */
emit_instr(ctx, sll, dst, dst, 0); emit_instr(ctx, sll, dst, dst, 0);
} }
@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (dst < 0) if (dst < 0)
return dst; return dst;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg); td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) if (td == REG_64BIT)
/* sign extend */ /* sign extend */
emit_instr(ctx, sll, dst, dst, 0); emit_instr(ctx, sll, dst, dst, 0);
if (insn->imm == 1) { if (insn->imm == 1) {
@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (src < 0 || dst < 0) if (src < 0 || dst < 0)
return -EINVAL; return -EINVAL;
td = get_reg_val_type(ctx, this_idx, insn->dst_reg); td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { if (td == REG_64BIT) {
/* sign extend */ /* sign extend */
emit_instr(ctx, sll, dst, dst, 0); emit_instr(ctx, sll, dst, dst, 0);
} }
did_move = false; did_move = false;
ts = get_reg_val_type(ctx, this_idx, insn->src_reg); ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { if (ts == REG_64BIT) {
int tmp_reg = MIPS_R_AT; int tmp_reg = MIPS_R_AT;
if (bpf_op == BPF_MOV) { if (bpf_op == BPF_MOV) {
@ -1254,8 +1255,7 @@ jeq_common:
if (insn->imm == 64 && td == REG_32BIT) if (insn->imm == 64 && td == REG_32BIT)
emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
if (insn->imm != 64 && if (insn->imm != 64 && td == REG_64BIT) {
(td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
/* sign extend */ /* sign extend */
emit_instr(ctx, sll, dst, dst, 0); emit_instr(ctx, sll, dst, dst, 0);
} }

View File

@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
int af_alg_release(struct socket *sock) int af_alg_release(struct socket *sock)
{ {
if (sock->sk) if (sock->sk) {
sock_put(sock->sk); sock_put(sock->sk);
sock->sk = NULL;
}
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(af_alg_release); EXPORT_SYMBOL_GPL(af_alg_release);

View File

@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
} }
static void b53_enable_vlan(struct b53_device *dev, bool enable) static void b53_enable_vlan(struct b53_device *dev, bool enable,
bool enable_filtering)
{ {
u8 mgmt, vc0, vc1, vc4 = 0, vc5; u8 mgmt, vc0, vc1, vc4 = 0, vc5;
@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK; vc4 &= ~VC4_ING_VID_CHECK_MASK;
vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; if (enable_filtering) {
vc5 |= VC5_DROP_VTABLE_MISS; vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
vc5 |= VC5_DROP_VTABLE_MISS;
} else {
vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
vc5 &= ~VC5_DROP_VTABLE_MISS;
}
if (is5325(dev)) if (is5325(dev))
vc0 &= ~VC0_RESERVED_1; vc0 &= ~VC0_RESERVED_1;
@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
} }
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
dev->vlan_enabled = enable;
dev->vlan_filtering_enabled = enable_filtering;
} }
static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
} }
static u16 b53_default_pvid(struct b53_device *dev)
{
if (is5325(dev) || is5365(dev))
return 1;
else
return 0;
}
int b53_configure_vlan(struct dsa_switch *ds) int b53_configure_vlan(struct dsa_switch *ds)
{ {
struct b53_device *dev = ds->priv; struct b53_device *dev = ds->priv;
struct b53_vlan vl = { 0 }; struct b53_vlan vl = { 0 };
int i; int i, def_vid;
def_vid = b53_default_pvid(dev);
/* clear all vlan entries */ /* clear all vlan entries */
if (is5325(dev) || is5365(dev)) { if (is5325(dev) || is5365(dev)) {
for (i = 1; i < dev->num_vlans; i++) for (i = def_vid; i < dev->num_vlans; i++)
b53_set_vlan_entry(dev, i, &vl); b53_set_vlan_entry(dev, i, &vl);
} else { } else {
b53_do_vlan_op(dev, VTA_CMD_CLEAR); b53_do_vlan_op(dev, VTA_CMD_CLEAR);
} }
b53_enable_vlan(dev, false); b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
b53_for_each_port(dev, i) b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE, b53_write16(dev, B53_VLAN_PAGE,
B53_VLAN_PORT_DEF_TAG(i), 1); B53_VLAN_PORT_DEF_TAG(i), def_vid);
if (!is5325(dev) && !is5365(dev)) if (!is5325(dev) && !is5365(dev))
b53_set_jumbo(dev, dev->enable_jumbo, false); b53_set_jumbo(dev, dev->enable_jumbo, false);
@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{ {
struct b53_device *dev = ds->priv;
struct net_device *bridge_dev;
unsigned int i;
u16 pvid, new_pvid;
/* Handle the case were multiple bridges span the same switch device
* and one of them has a different setting than what is being requested
* which would be breaking filtering semantics for any of the other
* bridge devices.
*/
b53_for_each_port(dev, i) {
bridge_dev = dsa_to_port(ds, i)->bridge_dev;
if (bridge_dev &&
bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
br_vlan_enabled(bridge_dev) != vlan_filtering) {
netdev_err(bridge_dev,
"VLAN filtering is global to the switch!\n");
return -EINVAL;
}
}
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
new_pvid = pvid;
if (dev->vlan_filtering_enabled && !vlan_filtering) {
/* Filtering is currently enabled, use the default PVID since
* the bridge does not expect tagging anymore
*/
dev->ports[port].pvid = pvid;
new_pvid = b53_default_pvid(dev);
} else if (!dev->vlan_filtering_enabled && vlan_filtering) {
/* Filtering is currently disabled, restore the previous PVID */
new_pvid = dev->ports[port].pvid;
}
if (pvid != new_pvid)
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
new_pvid);
b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
return 0; return 0;
} }
EXPORT_SYMBOL(b53_vlan_filtering); EXPORT_SYMBOL(b53_vlan_filtering);
@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid_end > dev->num_vlans) if (vlan->vid_end > dev->num_vlans)
return -ERANGE; return -ERANGE;
b53_enable_vlan(dev, true); b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
return 0; return 0;
} }
@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_fast_age_vlan(dev, vid); b53_fast_age_vlan(dev, vid);
} }
if (pvid) { if (pvid && !dsa_is_cpu_port(ds, port)) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid_end); vlan->vid_end);
b53_fast_age_vlan(dev, vid); b53_fast_age_vlan(dev, vid);
@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
vl->members &= ~BIT(port); vl->members &= ~BIT(port);
if (pvid == vid) { if (pvid == vid)
if (is5325(dev) || is5365(dev)) pvid = b53_default_pvid(dev);
pvid = 1;
else
pvid = 0;
}
if (untagged && !dsa_is_cpu_port(ds, port)) if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag &= ~(BIT(port)); vl->untag &= ~(BIT(port));
@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan; dev->ports[port].vlan_ctl_mask = pvlan;
if (is5325(dev) || is5365(dev)) pvid = b53_default_pvid(dev);
pvid = 1;
else
pvid = 0;
/* Make this port join all VLANs without VLAN entries */ /* Make this port join all VLANs without VLAN entries */
if (is58xx(dev)) { if (is58xx(dev)) {

View File

@ -91,6 +91,7 @@ enum {
struct b53_port { struct b53_port {
u16 vlan_ctl_mask; u16 vlan_ctl_mask;
struct ethtool_eee eee; struct ethtool_eee eee;
u16 pvid;
}; };
struct b53_vlan { struct b53_vlan {
@ -137,6 +138,8 @@ struct b53_device {
unsigned int num_vlans; unsigned int num_vlans;
struct b53_vlan *vlans; struct b53_vlan *vlans;
bool vlan_enabled;
bool vlan_filtering_enabled;
unsigned int num_ports; unsigned int num_ports;
struct b53_port *ports; struct b53_port *ports;
}; };

View File

@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
{ {
struct net_device *p = ds->ports[port].cpu_dp->master; struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol; struct ethtool_wolinfo pwol = { };
/* Get the parent device WoL settings */ /* Get the parent device WoL settings */
p->ethtool_ops->get_wol(p, &pwol); if (p->ethtool_ops->get_wol)
p->ethtool_ops->get_wol(p, &pwol);
/* Advertise the parent device supported settings */ /* Advertise the parent device supported settings */
wol->supported = pwol.supported; wol->supported = pwol.supported;
@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct net_device *p = ds->ports[port].cpu_dp->master; struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index; s8 cpu_port = ds->ports[port].cpu_dp->index;
struct ethtool_wolinfo pwol; struct ethtool_wolinfo pwol = { };
p->ethtool_ops->get_wol(p, &pwol); if (p->ethtool_ops->get_wol)
p->ethtool_ops->get_wol(p, &pwol);
if (wol->wolopts & ~pwol.supported) if (wol->wolopts & ~pwol.supported)
return -EINVAL; return -EINVAL;

View File

@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev,
priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
reg = rxchk_readl(priv, RXCHK_CONTROL); reg = rxchk_readl(priv, RXCHK_CONTROL);
/* Clear L2 header checks, which would prevent BPDUs
* from being received.
*/
reg &= ~RXCHK_L2_HDR_DIS;
if (priv->rx_chk_en) if (priv->rx_chk_en)
reg |= RXCHK_EN; reg |= RXCHK_EN;
else else

View File

@ -3128,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
} }
put_device(&pdev->dev);
return 0; return 0;
} }
EXPORT_SYMBOL(hns_dsaf_roce_reset); EXPORT_SYMBOL(hns_dsaf_roce_reset);

View File

@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev); ret = mv643xx_eth_shared_of_probe(pdev);
if (ret) if (ret)
return ret; goto err_put_clk;
pd = dev_get_platdata(&pdev->dev); pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
infer_hw_params(msp); infer_hw_params(msp);
return 0; return 0;
err_put_clk:
if (!IS_ERR(msp->clk))
clk_disable_unprepare(msp->clk);
return ret;
} }
static int mv643xx_eth_shared_remove(struct platform_device *pdev) static int mv643xx_eth_shared_remove(struct platform_device *pdev)

View File

@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart); INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw); pci_set_drvdata(pdev, hw);
pdev->d3_delay = 200; pdev->d3_delay = 300;
return 0; return 0;

View File

@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->addr_len = ETH_ALEN; dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) { if (!is_valid_ether_addr(dev->dev_addr)) {
en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr); priv->port, dev->dev_addr);
err = -EINVAL; err = -EINVAL;
goto out; goto out;

View File

@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
bool configure = false; bool configure = false;
bool pfc = false; bool pfc = false;
u16 thres_cells;
u16 delay_cells;
bool lossy; bool lossy;
u16 thres;
for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
if (prio_tc[j] == i) { if (prio_tc[j] == i) {
@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
continue; continue;
lossy = !(pfc || pause_en); lossy = !(pfc || pause_en);
thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
pause_en); pfc, pause_en);
mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
thres_cells, lossy);
} }
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);

View File

@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
if (!ether_addr_equal(ethh->h_dest,
p_hwfn->p_rdma_info->iwarp.mac_addr)) {
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"Got unexpected mac %pM instead of %pM\n",
ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
return -EINVAL;
}
ether_addr_copy(remote_mac_addr, ethh->h_source); ether_addr_copy(remote_mac_addr, ethh->h_source);
ether_addr_copy(local_mac_addr, ethh->h_dest); ether_addr_copy(local_mac_addr, ethh->h_dest);
@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
struct qed_iwarp_info *iwarp_info; struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data; struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs; struct qed_ll2_cbs cbs;
u32 mpa_buff_size; u32 buff_size;
u16 n_ooo_bufs; u16 n_ooo_bufs;
int rc = 0; int rc = 0;
int i; int i;
@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data)); memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP; data.input.conn_type = QED_LL2_TYPE_IWARP;
data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
goto err; goto err;
} }
buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
QED_IWARP_LL2_SYN_RX_SIZE, QED_IWARP_LL2_SYN_RX_SIZE,
QED_IWARP_MAX_SYN_PKT_SIZE, buff_size,
iwarp_info->ll2_syn_handle); iwarp_info->ll2_syn_handle);
if (rc) if (rc)
goto err; goto err;
@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
goto err; goto err;
mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
data.input.rx_num_desc, data.input.rx_num_desc,
mpa_buff_size, buff_size,
iwarp_info->ll2_mpa_handle); iwarp_info->ll2_mpa_handle);
if (rc) if (rc)
goto err; goto err;
@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
if (!iwarp_info->mpa_intermediate_buf) if (!iwarp_info->mpa_intermediate_buf)
goto err; goto err;

View File

@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_TX_SIZE (128)
#define QED_IWARP_LL2_SYN_RX_SIZE (256) #define QED_IWARP_LL2_SYN_RX_SIZE (256)
#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
#define QED_IWARP_MAX_OOO (16) #define QED_IWARP_MAX_OOO (16)

View File

@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwmac4_rx_check_timestamp(void *desc) static int dwmac4_rx_check_timestamp(void *desc)
{ {
struct dma_desc *p = (struct dma_desc *)desc; struct dma_desc *p = (struct dma_desc *)desc;
unsigned int rdes0 = le32_to_cpu(p->des0);
unsigned int rdes1 = le32_to_cpu(p->des1);
unsigned int rdes3 = le32_to_cpu(p->des3);
u32 own, ctxt; u32 own, ctxt;
int ret = 1; int ret = 1;
own = p->des3 & RDES3_OWN; own = rdes3 & RDES3_OWN;
ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT); >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
if (likely(!own && ctxt)) { if (likely(!own && ctxt)) {
if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
/* Corrupted value */ /* Corrupted value */
ret = -EINVAL; ret = -EINVAL;
else else

View File

@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_eee *edata) struct ethtool_eee *edata)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int ret;
priv->eee_enabled = edata->eee_enabled; if (!edata->eee_enabled) {
if (!priv->eee_enabled)
stmmac_disable_eee_mode(priv); stmmac_disable_eee_mode(priv);
else { } else {
/* We are asking for enabling the EEE but it is safe /* We are asking for enabling the EEE but it is safe
* to verify all by invoking the eee_init function. * to verify all by invoking the eee_init function.
* In case of failure it will return an error. * In case of failure it will return an error.
*/ */
priv->eee_enabled = stmmac_eee_init(priv); edata->eee_enabled = stmmac_eee_init(priv);
if (!priv->eee_enabled) if (!edata->eee_enabled)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Do not change tx_lpi_timer in case of failure */
priv->tx_lpi_timer = edata->tx_lpi_timer;
} }
return phy_ethtool_set_eee(dev->phydev, edata); ret = phy_ethtool_set_eee(dev->phydev, edata);
if (ret)
return ret;
priv->eee_enabled = edata->eee_enabled;
priv->tx_lpi_timer = edata->tx_lpi_timer;
return 0;
} }
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)

View File

@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
const char *name; const char *name;
char node_name[32]; char node_name[32];
if (of_property_read_string(node, "label", &name) < 0) { if (of_property_read_string(child, "label", &name) < 0) {
snprintf(node_name, sizeof(node_name), "%pOFn", child); snprintf(node_name, sizeof(node_name), "%pOFn", child);
name = node_name; name = node_name;
} }

View File

@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
u16 val = 0; u16 val = 0;
int err; int err;
err = priv->phy_drv->read_status(phydev); if (priv->phy_drv->read_status)
err = priv->phy_drv->read_status(phydev);
else
err = genphy_read_status(phydev);
if (err < 0) if (err < 0)
return err; return err;

View File

@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
{QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */

View File

@ -557,6 +557,7 @@ enum spd_duplex {
/* MAC PASSTHRU */ /* MAC PASSTHRU */
#define AD_MASK 0xfee0 #define AD_MASK 0xfee0
#define BND_MASK 0x0004 #define BND_MASK 0x0004
#define BD_MASK 0x0001
#define EFUSE 0xcfdb #define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1 #define PASS_THRU_MASK 0x1
@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
return -ENODEV; return -ENODEV;
} }
} else { } else {
/* test for RTL8153-BND */ /* test for RTL8153-BND and RTL8153-BD */
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
if ((ocp_data & BND_MASK) == 0) { if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK)) {
netif_dbg(tp, probe, tp->netdev, netif_dbg(tp, probe, tp->netdev,
"Invalid variant for MAC pass through\n"); "Invalid variant for MAC pass through\n");
return -ENODEV; return -ENODEV;

View File

@ -158,6 +158,32 @@ static const struct ieee80211_ops mt76x0u_ops = {
.get_txpower = mt76x02_get_txpower, .get_txpower = mt76x02_get_txpower,
}; };
static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
{
int err;
mt76x0_chip_onoff(dev, true, true);
if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
err = mt76x0u_mcu_init(dev);
if (err < 0)
return err;
mt76x0_init_usb_dma(dev);
err = mt76x0_init_hardware(dev);
if (err < 0)
return err;
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
return 0;
}
static int mt76x0u_register_device(struct mt76x02_dev *dev) static int mt76x0u_register_device(struct mt76x02_dev *dev)
{ {
struct ieee80211_hw *hw = dev->mt76.hw; struct ieee80211_hw *hw = dev->mt76.hw;
@ -171,26 +197,10 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
if (err < 0) if (err < 0)
goto out_err; goto out_err;
mt76x0_chip_onoff(dev, true, true); err = mt76x0u_init_hardware(dev);
if (!mt76x02_wait_for_mac(&dev->mt76)) {
err = -ETIMEDOUT;
goto out_err;
}
err = mt76x0u_mcu_init(dev);
if (err < 0) if (err < 0)
goto out_err; goto out_err;
mt76x0_init_usb_dma(dev);
err = mt76x0_init_hardware(dev);
if (err < 0)
goto out_err;
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
err = mt76x0_register_device(dev); err = mt76x0_register_device(dev);
if (err < 0) if (err < 0)
goto out_err; goto out_err;
@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
mt76u_stop_queues(&dev->mt76); mt76u_stop_queues(&dev->mt76);
mt76x0u_mac_stop(dev); mt76x0u_mac_stop(dev);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
usb_kill_urb(usb->mcu.res.urb); usb_kill_urb(usb->mcu.res.urb);
return 0; return 0;
@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->rx_tasklet);
tasklet_enable(&usb->tx_tasklet); tasklet_enable(&usb->tx_tasklet);
ret = mt76x0_init_hardware(dev); ret = mt76x0u_init_hardware(dev);
if (ret) if (ret)
goto err; goto err;

View File

@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
len, iov, 64, VHOST_ACCESS_WO); len, iov, 64, VHOST_ACCESS_WO);
if (ret) if (ret < 0)
return ret; return ret;
for (i = 0; i < ret; i++) { for (i = 0; i < ret; i++) {

View File

@ -11,6 +11,8 @@
#define _LINUX_NETDEV_FEATURES_H #define _LINUX_NETDEV_FEATURES_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
typedef u64 netdev_features_t; typedef u64 netdev_features_t;
@ -154,8 +156,26 @@ enum {
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define for_each_netdev_feature(mask_addr, bit) \ /* Finds the next feature with the highest number of the range of start till 0.
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) */
static inline int find_next_netdev_feature(u64 feature, unsigned long start)
{
/* like BITMAP_LAST_WORD_MASK() for u64
* this sets the most significant 64 - start to 0.
*/
feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
return fls64(feature) - 1;
}
/* This goes for the MSB to the LSB through the set feature bits,
* mask_addr should be a u64 and bit an int
*/
#define for_each_netdev_feature(mask_addr, bit) \
for ((bit) = find_next_netdev_feature((mask_addr), \
NETDEV_FEATURE_COUNT); \
(bit) >= 0; \
(bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
/* Features valid for ethtool to change */ /* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */ /* = all defined minus driver/device-class-related */

View File

@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff); skb_set_transport_header(skb, keys.control.thoff);
else else if (offset_hint >= 0)
skb_set_transport_header(skb, offset_hint); skb_set_transport_header(skb, offset_hint);
} }
@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
} }
static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
{
return skb_is_gso(skb) &&
skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
}
static inline void skb_gso_reset(struct sk_buff *skb) static inline void skb_gso_reset(struct sk_buff *skb)
{ {
skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_size = 0;

View File

@ -57,6 +57,15 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off)) if (!skb_partial_csum_set(skb, start, off))
return -EINVAL; return -EINVAL;
} else {
/* gso packets without NEEDS_CSUM do not set transport_offset.
* probe and drop if does not match one of the above types.
*/
if (gso_type) {
skb_probe_transport_header(skb, -1);
if (!skb_transport_header_was_set(skb))
return -EINVAL;
}
} }
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {

View File

@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
struct stack_map_irq_work *work; struct stack_map_irq_work *work;
work = container_of(entry, struct stack_map_irq_work, irq_work); work = container_of(entry, struct stack_map_irq_work, irq_work);
up_read(work->sem); up_read_non_owner(work->sem);
work->sem = NULL; work->sem = NULL;
} }
@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
} else { } else {
work->sem = &current->mm->mmap_sem; work->sem = &current->mm->mmap_sem;
irq_work_queue(&work->irq_work); irq_work_queue(&work->irq_work);
/*
* The irq_work will release the mmap_sem with
* up_read_non_owner(). The rwsem_release() is called
* here to release the lock from lockdep's perspective.
*/
rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
} }
} }

View File

@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
return 0; return 0;
} }
static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
int size, enum bpf_access_type t) u32 regno, int off, int size,
enum bpf_access_type t)
{ {
struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = &regs[regno]; struct bpf_reg_state *reg = &regs[regno];
struct bpf_insn_access_aux info; struct bpf_insn_access_aux info = {};
if (reg->smin_value < 0) { if (reg->smin_value < 0) {
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
return -EACCES; return -EACCES;
} }
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
return 0; return 0;
} }
@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
verbose(env, "cannot write into socket\n"); verbose(env, "cannot write into socket\n");
return -EACCES; return -EACCES;
} }
err = check_sock_access(env, regno, off, size, t); err = check_sock_access(env, insn_idx, regno, off, size, t);
if (!err && value_regno >= 0) if (!err && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno); mark_reg_unknown(env, regs, value_regno);
} else { } else {

View File

@ -4675,11 +4675,11 @@ refill:
/* Even if we own the page, we do not use atomic_set(). /* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users. * This would break get_page_unless_zero() users.
*/ */
page_ref_add(page, size); page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
/* reset page count bias and offset to start of new frag */ /* reset page count bias and offset to start of new frag */
nc->pfmemalloc = page_is_pfmemalloc(page); nc->pfmemalloc = page_is_pfmemalloc(page);
nc->pagecnt_bias = size + 1; nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
nc->offset = size; nc->offset = size;
} }
@ -4695,10 +4695,10 @@ refill:
size = nc->size; size = nc->size;
#endif #endif
/* OK, page count is 0, we can safely set it */ /* OK, page count is 0, we can safely set it */
set_page_count(page, size + 1); set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
/* reset page count bias and offset to start of new frag */ /* reset page count bias and offset to start of new frag */
nc->pagecnt_bias = size + 1; nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz; offset = size - fragsz;
} }

View File

@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
netdev_features_t feature; netdev_features_t feature;
int feature_bit; int feature_bit;
for_each_netdev_feature(&upper_disables, feature_bit) { for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit); feature = __NETIF_F_BIT(feature_bit);
if (!(upper->wanted_features & feature) if (!(upper->wanted_features & feature)
&& (features & feature)) { && (features & feature)) {
@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_features_t feature; netdev_features_t feature;
int feature_bit; int feature_bit;
for_each_netdev_feature(&upper_disables, feature_bit) { for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit); feature = __NETIF_F_BIT(feature_bit);
if (!(features & feature) && (lower->features & feature)) { if (!(features & feature) && (lower->features & feature)) {
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",

View File

@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb); u32 off = skb_mac_header_len(skb);
int ret; int ret;
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ if (!skb_is_gso_tcp(skb))
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
return -ENOTSUPP; return -ENOTSUPP;
ret = skb_cow(skb, len_diff); ret = skb_cow(skb, len_diff);
@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb); u32 off = skb_mac_header_len(skb);
int ret; int ret;
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ if (!skb_is_gso_tcp(skb))
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
return -ENOTSUPP; return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC); ret = skb_unclone(skb, GFP_ATOMIC);
@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret; int ret;
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ if (!skb_is_gso_tcp(skb))
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
return -ENOTSUPP; return -ENOTSUPP;
ret = skb_cow(skb, len_diff); ret = skb_cow(skb, len_diff);
@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret; int ret;
/* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ if (!skb_is_gso_tcp(skb))
if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
return -ENOTSUPP; return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC); ret = skb_unclone(skb, GFP_ATOMIC);

View File

@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/ */
void *netdev_alloc_frag(unsigned int fragsz) void *netdev_alloc_frag(unsigned int fragsz)
{ {
fragsz = SKB_DATA_ALIGN(fragsz);
return __netdev_alloc_frag(fragsz, GFP_ATOMIC); return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
} }
EXPORT_SYMBOL(netdev_alloc_frag); EXPORT_SYMBOL(netdev_alloc_frag);
@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz) void *napi_alloc_frag(unsigned int fragsz)
{ {
fragsz = SKB_DATA_ALIGN(fragsz);
return __napi_alloc_frag(fragsz, GFP_ATOMIC); return __napi_alloc_frag(fragsz, GFP_ATOMIC);
} }
EXPORT_SYMBOL(napi_alloc_frag); EXPORT_SYMBOL(napi_alloc_frag);

View File

@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk)
sk_mem_reclaim(sk); sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk)); tcp_clear_all_retrans_hints(tcp_sk(sk));
tcp_sk(sk)->packets_out = 0; tcp_sk(sk)->packets_out = 0;
inet_csk(sk)->icsk_backoff = 0;
} }
int tcp_disconnect(struct sock *sk, int flags) int tcp_disconnect(struct sock *sk, int flags)
@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->write_seq += tp->max_window + 2; tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0) if (tp->write_seq == 0)
tp->write_seq = 1; tp->write_seq = 1;
icsk->icsk_backoff = 0;
tp->snd_cwnd = 2; tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0; icsk->icsk_probes_out = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;

View File

@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
break; break;
skb = tcp_rtx_queue_head(sk);
if (WARN_ON_ONCE(!skb))
break;
icsk->icsk_backoff--; icsk->icsk_backoff--;
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT; TCP_TIMEOUT_INIT;
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
skb = tcp_rtx_queue_head(sk);
tcp_mstamp_refresh(tp); tcp_mstamp_refresh(tp);
delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));

View File

@ -1719,6 +1719,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
return 0; return 0;
} }
static void ip6erspan_set_version(struct nlattr *data[],
struct __ip6_tnl_parm *parms)
{
parms->erspan_ver = 1;
if (data[IFLA_GRE_ERSPAN_VER])
parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
if (parms->erspan_ver == 1) {
if (data[IFLA_GRE_ERSPAN_INDEX])
parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
} else if (parms->erspan_ver == 2) {
if (data[IFLA_GRE_ERSPAN_DIR])
parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
if (data[IFLA_GRE_ERSPAN_HWID])
parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
}
}
static void ip6gre_netlink_parms(struct nlattr *data[], static void ip6gre_netlink_parms(struct nlattr *data[],
struct __ip6_tnl_parm *parms) struct __ip6_tnl_parm *parms)
{ {
@ -1767,20 +1785,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
if (data[IFLA_GRE_COLLECT_METADATA]) if (data[IFLA_GRE_COLLECT_METADATA])
parms->collect_md = true; parms->collect_md = true;
parms->erspan_ver = 1;
if (data[IFLA_GRE_ERSPAN_VER])
parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
if (parms->erspan_ver == 1) {
if (data[IFLA_GRE_ERSPAN_INDEX])
parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
} else if (parms->erspan_ver == 2) {
if (data[IFLA_GRE_ERSPAN_DIR])
parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
if (data[IFLA_GRE_ERSPAN_HWID])
parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
}
} }
static int ip6gre_tap_init(struct net_device *dev) static int ip6gre_tap_init(struct net_device *dev)
@ -2203,6 +2207,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
int err; int err;
ip6gre_netlink_parms(data, &nt->parms); ip6gre_netlink_parms(data, &nt->parms);
ip6erspan_set_version(data, &nt->parms);
ign = net_generic(net, ip6gre_net_id); ign = net_generic(net, ip6gre_net_id);
if (nt->parms.collect_md) { if (nt->parms.collect_md) {
@ -2248,6 +2253,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
if (IS_ERR(t)) if (IS_ERR(t))
return PTR_ERR(t); return PTR_ERR(t);
ip6erspan_set_version(data, &p);
ip6gre_tunnel_unlink_md(ign, t); ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t); ip6gre_tunnel_unlink(ign, t);
ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);

View File

@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_P2P_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_TXPOWER; BSS_CHANGED_TXPOWER;
int err; int err;
int prev_beacon_int;
old = sdata_dereference(sdata->u.ap.beacon, sdata); old = sdata_dereference(sdata->u.ap.beacon, sdata);
if (old) if (old)
@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->needed_rx_chains = sdata->local->rx_chains; sdata->needed_rx_chains = sdata->local->rx_chains;
prev_beacon_int = sdata->vif.bss_conf.beacon_int;
sdata->vif.bss_conf.beacon_int = params->beacon_interval; sdata->vif.bss_conf.beacon_int = params->beacon_interval;
if (params->he_cap) if (params->he_cap)
@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (!err) if (!err)
ieee80211_vif_copy_chanctx_to_vlans(sdata, false); ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
mutex_unlock(&local->mtx); mutex_unlock(&local->mtx);
if (err) if (err) {
sdata->vif.bss_conf.beacon_int = prev_beacon_int;
return err; return err;
}
/* /*
* Apply control port protocol, this allows us to * Apply control port protocol, this allows us to

View File

@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
* @dst: mesh path destination mac address * @dst: mesh path destination mac address
* @mpp: mesh proxy mac address * @mpp: mesh proxy mac address
* @rhash: rhashtable list pointer * @rhash: rhashtable list pointer
* @walk_list: linked list containing all mesh_path objects.
* @gate_list: list pointer for known gates list * @gate_list: list pointer for known gates list
* @sdata: mesh subif * @sdata: mesh subif
* @next_hop: mesh neighbor to which frames for this destination will be * @next_hop: mesh neighbor to which frames for this destination will be
@ -105,6 +106,7 @@ struct mesh_path {
u8 dst[ETH_ALEN]; u8 dst[ETH_ALEN];
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
struct rhash_head rhash; struct rhash_head rhash;
struct hlist_node walk_list;
struct hlist_node gate_list; struct hlist_node gate_list;
struct ieee80211_sub_if_data *sdata; struct ieee80211_sub_if_data *sdata;
struct sta_info __rcu *next_hop; struct sta_info __rcu *next_hop;
@ -133,12 +135,16 @@ struct mesh_path {
* gate's mpath may or may not be resolved and active. * gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates * @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
* @walk_head: linked list containging all mesh_path objects
* @walk_lock: lock protecting walk_head
* @entries: number of entries in the table * @entries: number of entries in the table
*/ */
struct mesh_table { struct mesh_table {
struct hlist_head known_gates; struct hlist_head known_gates;
spinlock_t gates_lock; spinlock_t gates_lock;
struct rhashtable rhead; struct rhashtable rhead;
struct hlist_head walk_head;
spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
}; };

View File

@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
return NULL; return NULL;
INIT_HLIST_HEAD(&newtbl->known_gates); INIT_HLIST_HEAD(&newtbl->known_gates);
INIT_HLIST_HEAD(&newtbl->walk_head);
atomic_set(&newtbl->entries, 0); atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock); spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
return newtbl; return newtbl;
} }
@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
static struct mesh_path * static struct mesh_path *
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
{ {
int i = 0, ret; int i = 0;
struct mesh_path *mpath = NULL; struct mesh_path *mpath;
struct rhashtable_iter iter;
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (ret)
return NULL;
rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
if (i++ == idx) if (i++ == idx)
break; break;
} }
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
if (IS_ERR(mpath) || !mpath) if (!mpath)
return NULL; return NULL;
if (mpath_expired(mpath)) { if (mpath_expired(mpath)) {
@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
tbl = sdata->u.mesh.mesh_paths; tbl = sdata->u.mesh.mesh_paths;
spin_lock_bh(&tbl->walk_lock);
do { do {
ret = rhashtable_lookup_insert_fast(&tbl->rhead, ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash, &new_mpath->rhash,
@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
mpath = rhashtable_lookup_fast(&tbl->rhead, mpath = rhashtable_lookup_fast(&tbl->rhead,
dst, dst,
mesh_rht_params); mesh_rht_params);
else if (!ret)
hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
} while (unlikely(ret == -EEXIST && !mpath)); } while (unlikely(ret == -EEXIST && !mpath));
spin_unlock_bh(&tbl->walk_lock);
if (ret && ret != -EEXIST) if (ret) {
return ERR_PTR(ret);
/* At this point either new_mpath was added, or we found a
* matching entry already in the table; in the latter case
* free the unnecessary new entry.
*/
if (ret == -EEXIST) {
kfree(new_mpath); kfree(new_mpath);
if (ret != -EEXIST)
return ERR_PTR(ret);
new_mpath = mpath; new_mpath = mpath;
} }
sdata->u.mesh.mesh_paths_generation++; sdata->u.mesh.mesh_paths_generation++;
return new_mpath; return new_mpath;
} }
@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
memcpy(new_mpath->mpp, mpp, ETH_ALEN); memcpy(new_mpath->mpp, mpp, ETH_ALEN);
tbl = sdata->u.mesh.mpp_paths; tbl = sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead, ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash, &new_mpath->rhash,
mesh_rht_params); mesh_rht_params);
if (!ret)
hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
spin_unlock_bh(&tbl->walk_lock);
if (ret)
kfree(new_mpath);
sdata->u.mesh.mpp_paths_generation++; sdata->u.mesh.mpp_paths_generation++;
return ret; return ret;
@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath; struct mesh_path *mpath;
struct rhashtable_iter iter;
int ret;
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); rcu_read_lock();
if (ret) hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
return;
rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
if (rcu_access_pointer(mpath->next_hop) == sta && if (rcu_access_pointer(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE && mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) { !(mpath->flags & MESH_PATH_FIXED)) {
@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
} }
} }
rhashtable_walk_stop(&iter); rcu_read_unlock();
rhashtable_walk_exit(&iter);
} }
static void mesh_path_free_rcu(struct mesh_table *tbl, static void mesh_path_free_rcu(struct mesh_table *tbl,
@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
{ {
hlist_del_rcu(&mpath->walk_list);
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
mesh_path_free_rcu(tbl, mpath); mesh_path_free_rcu(tbl, mpath);
} }
@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_path *mpath; struct mesh_path *mpath;
struct rhashtable_iter iter; struct hlist_node *n;
int ret;
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
if (ret)
return;
rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
spin_lock_bh(&tbl->walk_lock);
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta) if (rcu_access_pointer(mpath->next_hop) == sta)
__mesh_path_del(tbl, mpath); __mesh_path_del(tbl, mpath);
} }
spin_unlock_bh(&tbl->walk_lock);
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
} }
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
{ {
struct mesh_table *tbl = sdata->u.mesh.mpp_paths; struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
struct mesh_path *mpath; struct mesh_path *mpath;
struct rhashtable_iter iter; struct hlist_node *n;
int ret;
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
if (ret)
return;
rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
spin_lock_bh(&tbl->walk_lock);
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (ether_addr_equal(mpath->mpp, proxy)) if (ether_addr_equal(mpath->mpp, proxy))
__mesh_path_del(tbl, mpath); __mesh_path_del(tbl, mpath);
} }
spin_unlock_bh(&tbl->walk_lock);
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
} }
static void table_flush_by_iface(struct mesh_table *tbl) static void table_flush_by_iface(struct mesh_table *tbl)
{ {
struct mesh_path *mpath; struct mesh_path *mpath;
struct rhashtable_iter iter; struct hlist_node *n;
int ret;
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); spin_lock_bh(&tbl->walk_lock);
if (ret) hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
return;
rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
__mesh_path_del(tbl, mpath); __mesh_path_del(tbl, mpath);
} }
spin_unlock_bh(&tbl->walk_lock);
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
} }
/** /**
@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl,
{ {
struct mesh_path *mpath; struct mesh_path *mpath;
rcu_read_lock(); spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
if (!mpath) { if (!mpath) {
rcu_read_unlock(); spin_unlock_bh(&tbl->walk_lock);
return -ENXIO; return -ENXIO;
} }
__mesh_path_del(tbl, mpath); __mesh_path_del(tbl, mpath);
rcu_read_unlock(); spin_unlock_bh(&tbl->walk_lock);
return 0; return 0;
} }
@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl) struct mesh_table *tbl)
{ {
struct mesh_path *mpath; struct mesh_path *mpath;
struct rhashtable_iter iter; struct hlist_node *n;
int ret;
ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); spin_lock_bh(&tbl->walk_lock);
if (ret) hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
return;
rhashtable_walk_start(&iter);
while ((mpath = rhashtable_walk_next(&iter))) {
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
continue;
if (IS_ERR(mpath))
break;
if ((!(mpath->flags & MESH_PATH_RESOLVING)) && if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) && (!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
__mesh_path_del(tbl, mpath); __mesh_path_del(tbl, mpath);
} }
spin_unlock_bh(&tbl->walk_lock);
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
} }
void mesh_path_expire(struct ieee80211_sub_if_data *sdata) void mesh_path_expire(struct ieee80211_sub_if_data *sdata)

View File

@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
{ {
struct ip_vs_dest *dest; struct ip_vs_dest *dest;
unsigned int atype, i; unsigned int atype, i;
int ret = 0;
EnterFunction(2); EnterFunction(2);
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
if (udest->af == AF_INET6) { if (udest->af == AF_INET6) {
int ret;
atype = ipv6_addr_type(&udest->addr.in6); atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) || if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) && atype & IPV6_ADDR_LINKLOCAL) &&

View File

@ -313,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
int err; int err;
list_for_each_entry(rule, &ctx->chain->rules, list) { list_for_each_entry(rule, &ctx->chain->rules, list) {
if (!nft_is_active_next(ctx->net, rule))
continue;
err = nft_delrule(ctx, rule); err = nft_delrule(ctx, rule);
if (err < 0) if (err < 0)
return err; return err;

View File

@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
return 0; return 0;
err_unreg_umem: err_unreg_umem:
xdp_clear_umem_at_qid(dev, queue_id);
if (!force_zc) if (!force_zc)
err = 0; /* fallback to copy mode */ err = 0; /* fallback to copy mode */
if (err)
xdp_clear_umem_at_qid(dev, queue_id);
out_rtnl_unlock: out_rtnl_unlock:
rtnl_unlock(); rtnl_unlock();
return err; return err;
@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
if (!umem->pgs) if (!umem->pgs)
return -ENOMEM; return -ENOMEM;
down_write(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
npgs = get_user_pages(umem->address, umem->npgs, npgs = get_user_pages_longterm(umem->address, umem->npgs,
gup_flags, &umem->pgs[0], NULL); gup_flags, &umem->pgs[0], NULL);
up_write(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
if (npgs != umem->npgs) { if (npgs != umem->npgs) {
if (npgs >= 0) { if (npgs >= 0) {

View File

@ -669,6 +669,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
if (!umem) if (!umem)
return -EINVAL; return -EINVAL;
/* Matches the smp_wmb() in XDP_UMEM_REG */
smp_rmb();
if (offset == XDP_UMEM_PGOFF_FILL_RING) if (offset == XDP_UMEM_PGOFF_FILL_RING)
q = READ_ONCE(umem->fq); q = READ_ONCE(umem->fq);
else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
@ -678,6 +680,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
if (!q) if (!q)
return -EINVAL; return -EINVAL;
/* Matches the smp_wmb() in xsk_init_queue */
smp_rmb();
qpg = virt_to_head_page(q->ring); qpg = virt_to_head_page(q->ring);
if (size > (PAGE_SIZE << compound_order(qpg))) if (size > (PAGE_SIZE << compound_order(qpg)))
return -EINVAL; return -EINVAL;