Including fixes from wireless and netfilter

Current release - regressions:
 
   - udp: fall back to software USO if IPv6 extension headers are present
 
   - wifi: iwlwifi: correctly lookup DMA address in SG table
 
 Current release - new code bugs:
 
   - eth: mlx5e: fix queue stats access to non-existing channels splat
 
 Previous releases - regressions:
 
   - eth: mlx5e: take state lock during tx timeout reporter
 
   - eth: mlxbf_gige: disable RX filters until RX path initialized
 
   - eth: igc: fix reset adapter logics when tx mode change
 
 Previous releases - always broken:
 
   - tcp: update window clamping condition
 
   - netfilter:
     - nf_queue: drop packets with cloned unconfirmed conntracks
     - nf_tables: Add locking for NFT_MSG_GETOBJ_RESET requests
 
   - vsock: fix recursive ->recvmsg calls
 
   - dsa: vsc73xx: fix MDIO bus access and PHY opera
 
   - eth: gtp: pull network headers in gtp_dev_xmit()
 
   - eth: igc: fix packet still tx after gate close by reducing i226 MAC retry buffer
 
   - eth: mana: fix RX buf alloc_size alignment and atomic op panic
 
   - eth: hns3: fix a deadlock problem when config TC during resetting
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAma+CzgSHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOk46wP/0hNLi1qNd+zdv6E5nqYJ6ckgKJbaIwR
 mM3VGmZtLQXTNApzihFsmfEqT1EiIQuiVW4rqu0eJ28oMezDWyrEKHORx+BL4Omj
 6qnygnxQw1fDrhvTfZKXyOJw6mpJL3AMbygtw9DG1se4S5kbmo8cdTI9i9Q4Qcon
 ms6CExsHLR1Mtf2XIs8K45XQC07CMy76dvd30VxOKus/bHHt+KBnNJ9B12kBYpbD
 4Bko63KeJwhZ4n5soIC8MeqXcU1GyF+AgzQhGvuks8EvUVa4XfW7unxLZwuUsf0J
 ZPEKCTBinb1adZnHUx7CYRVHhzi+ptQfFW3bACAkK5cWSy8u0KLOb9Aoe68+HDev
 Qor2Hg3SckoFfXBEoZE0GbU+SosXMXIrs6qXOaMNo1gz062N7ZT8DoT6fNBamB31
 N8QsiNTOyYDZ6icoTir1PCEvuDyx+QVIdTYAKx8wc3Q5FbpHBDTeStNFZgskTW+/
 vEcOy23nXT0WImWP6wnK0REYur9UPb/pHwuBeglgBg/0zwuqioHpIjFUnphvQzBt
 kabkX/G4Un44w9E97/ERB7vmR1iKHPTtuU9xIsoO7dMDWxKi8v2TV6f/IBugAEFD
 Bx3frQFNayrhEnjm/dNnnwLpI0TZbw1YekVWBCk6pB1m7U+bpJHZfyipYloe8/yB
 TfoX+7zCQJtA
 =o4nr
 -----END PGP SIGNATURE-----

Merge tag 'net-6.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Including fixes from wireless and netfilter

  Current release - regressions:

   - udp: fall back to software USO if IPv6 extension headers are
     present

   - wifi: iwlwifi: correctly lookup DMA address in SG table

  Current release - new code bugs:

   - eth: mlx5e: fix queue stats access to non-existing channels splat

  Previous releases - regressions:

   - eth: mlx5e: take state lock during tx timeout reporter

   - eth: mlxbf_gige: disable RX filters until RX path initialized

   - eth: igc: fix reset adapter logics when tx mode change

  Previous releases - always broken:

   - tcp: update window clamping condition

   - netfilter:
      - nf_queue: drop packets with cloned unconfirmed conntracks
      - nf_tables: Add locking for NFT_MSG_GETOBJ_RESET requests

   - vsock: fix recursive ->recvmsg calls

   - dsa: vsc73xx: fix MDIO bus access and PHY opera

   - eth: gtp: pull network headers in gtp_dev_xmit()

   - eth: igc: fix packet still tx after gate close by reducing i226 MAC
     retry buffer

   - eth: mana: fix RX buf alloc_size alignment and atomic op panic

   - eth: hns3: fix a deadlock problem when config TC during resetting"

* tag 'net-6.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (58 commits)
  net: hns3: use correct release function during uninitialization
  net: hns3: void array out of bound when loop tnl_num
  net: hns3: fix a deadlock problem when config TC during resetting
  net: hns3: use the user's cfg after reset
  net: hns3: fix wrong use of semaphore up
  selftests: net: lib: kill PIDs before del netns
  pse-core: Conditionally set current limit during PI regulator registration
  net: thunder_bgx: Fix netdev structure allocation
  net: ethtool: Allow write mechanism of LPL and both LPL and EPL
  vsock: fix recursive ->recvmsg calls
  selftest: af_unix: Fix kselftest compilation warnings
  netfilter: nf_tables: Add locking for NFT_MSG_GETOBJ_RESET requests
  netfilter: nf_tables: Introduce nf_tables_getobj_single
  netfilter: nf_tables: Audit log dump reset after the fact
  selftests: netfilter: add test for br_netfilter+conntrack+queue combination
  netfilter: nf_queue: drop packets with cloned unconfirmed conntracks
  netfilter: flowtable: initialise extack before use
  netfilter: nfnetlink: Initialise extack before use in ACKs
  netfilter: allow ipv6 fragments to arrive on different devices
  tcp: Update window clamping condition
  ...
This commit is contained in:
Linus Torvalds 2024-08-15 10:35:20 -07:00
commit a4a35f6cbe
63 changed files with 797 additions and 268 deletions

View File

@ -38,6 +38,10 @@ properties:
managed: true
phys:
description: A reference to the SerDes lane(s)
maxItems: 1
required:
- reg

View File

@ -1118,8 +1118,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
rpp->len += skb->len;
if (stat & SAR_RSQE_EPDU) {
unsigned int len, truesize;
unsigned char *l1l2;
unsigned int len;
l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
@ -1189,14 +1189,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb);
truesize = skb->truesize;
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
if (skb->truesize > SAR_FB_SIZE_3)
if (truesize > SAR_FB_SIZE_3)
add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
else if (skb->truesize > SAR_FB_SIZE_2)
else if (truesize > SAR_FB_SIZE_2)
add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
else if (skb->truesize > SAR_FB_SIZE_1)
else if (truesize > SAR_FB_SIZE_1)
add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
else
add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);

View File

@ -40,6 +40,10 @@
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
/* MII Block subblock */
#define VSC73XX_BLOCK_MII_INTERNAL 0x0 /* Internal MDIO subblock */
#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */
#define CPU_PORT 6 /* CPU port */
/* MAC Block registers */
@ -225,6 +229,8 @@
#define VSC73XX_MII_CMD 0x1
#define VSC73XX_MII_DATA 0x2
#define VSC73XX_MII_STAT_BUSY BIT(3)
/* Arbiter block 5 registers */
#define VSC73XX_ARBEMPTY 0x0c
#define VSC73XX_ARBDISC 0x0e
@ -299,6 +305,7 @@
#define IS_739X(a) (IS_7395(a) || IS_7398(a))
#define VSC73XX_POLL_SLEEP_US 1000
#define VSC73XX_MDIO_POLL_SLEEP_US 5
#define VSC73XX_POLL_TIMEOUT_US 10000
struct vsc73xx_counter {
@ -527,6 +534,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
return 0;
}
static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc)
{
int ret, err;
u32 val;
ret = read_poll_timeout(vsc73xx_read, err,
err < 0 || !(val & VSC73XX_MII_STAT_BUSY),
VSC73XX_MDIO_POLL_SLEEP_US,
VSC73XX_POLL_TIMEOUT_US, false, vsc,
VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
VSC73XX_MII_STAT, &val);
if (ret)
return ret;
return err;
}
static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct vsc73xx *vsc = ds->priv;
@ -534,12 +557,20 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
u32 val;
int ret;
ret = vsc73xx_mdio_busy_check(vsc);
if (ret)
return ret;
/* Setting bit 26 means "read" */
cmd = BIT(26) | (phy << 21) | (regnum << 16);
ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
if (ret)
return ret;
msleep(2);
ret = vsc73xx_mdio_busy_check(vsc);
if (ret)
return ret;
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
if (ret)
return ret;
@ -563,18 +594,11 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
u32 cmd;
int ret;
/* It was found through tedious experiments that this router
* chip really hates to have it's PHYs reset. They
* never recover if that happens: autonegotiation stops
* working after a reset. Just filter out this command.
* (Resetting the whole chip is OK.)
*/
if (regnum == 0 && (val & BIT(15))) {
dev_info(vsc->dev, "reset PHY - disallowed\n");
return 0;
}
ret = vsc73xx_mdio_busy_check(vsc);
if (ret)
return ret;
cmd = (phy << 21) | (regnum << 16);
cmd = (phy << 21) | (regnum << 16) | val;
ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
if (ret)
return ret;
@ -957,6 +981,11 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
if (duplex == DUPLEX_FULL)
val |= VSC73XX_MAC_CFG_FDX;
else
/* In datasheet description ("Port Mode Procedure" in 5.6.2)
* this bit is configured only for half duplex.
*/
val |= VSC73XX_MAC_CFG_WEXC_DIS;
/* This routine is described in the datasheet (below ARBDISC register
* description)
@ -967,7 +996,6 @@ static void vsc73xx_mac_link_up(struct phylink_config *config,
get_random_bytes(&seed, 1);
val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
val |= VSC73XX_MAC_CFG_SEED_LOAD;
val |= VSC73XX_MAC_CFG_WEXC_DIS;
/* Those bits are responsible for MTU only. Kernel takes care about MTU,
* let's enable +8 bytes frame length unconditionally.

View File

@ -5250,8 +5250,8 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (bp->wol & MACB_WOL_ENABLED) {
/* Check for IP address in WOL ARP mode */
idev = __in_dev_get_rcu(bp->dev);
if (idev && idev->ifa_list)
ifa = rcu_access_pointer(idev->ifa_list);
if (idev)
ifa = rcu_dereference(idev->ifa_list);
if ((bp->wolopts & WAKE_ARP) && !ifa) {
netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
return -EOPNOTSUPP;

View File

@ -1054,18 +1054,12 @@ static int phy_interface_mode(u8 lmac_type)
static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac, **priv;
struct lmac *lmac;
u64 cfg;
lmac = &bgx->lmac[lmacid];
lmac->bgx = bgx;
lmac->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
if (!lmac->netdev)
return -ENOMEM;
priv = netdev_priv(lmac->netdev);
*priv = lmac;
if ((lmac->lmac_type == BGX_MODE_SGMII) ||
(lmac->lmac_type == BGX_MODE_QSGMII) ||
(lmac->lmac_type == BGX_MODE_RGMII)) {
@ -1191,7 +1185,6 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
(lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
phy_disconnect(lmac->phydev);
free_netdev(lmac->netdev);
lmac->phydev = NULL;
}
@ -1653,6 +1646,23 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_get_qlm_mode(bgx);
for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
struct lmac *lmacp, **priv;
lmacp = &bgx->lmac[lmac];
lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
if (!lmacp->netdev) {
for (int i = 0; i < lmac; i++)
free_netdev(bgx->lmac[i].netdev);
err = -ENOMEM;
goto err_enable;
}
priv = netdev_priv(lmacp->netdev);
*priv = lmacp;
}
err = bgx_init_phy(bgx);
if (err)
goto err_enable;
@ -1692,8 +1702,10 @@ static void bgx_remove(struct pci_dev *pdev)
u8 lmac;
/* Disable all LMACs */
for (lmac = 0; lmac < bgx->lmac_count; lmac++)
for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
bgx_lmac_disable(bgx, lmac);
free_netdev(bgx->lmac[lmac].netdev);
}
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);

View File

@ -5724,6 +5724,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
hns3_nic_net_stop(netdev);
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;

View File

@ -1598,8 +1598,7 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
{
u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0};
struct hclge_mod_reg_common_msg msg;
u8 i, j, num;
u32 loop_time;
u8 i, j, num, loop_time;
num = ARRAY_SIZE(hclge_ssu_reg_common_msg);
for (i = 0; i < num; i++) {
@ -1609,7 +1608,8 @@ static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
loop_time = 1;
loop_para[0] = 0;
if (msg.need_para) {
loop_time = hdev->ae_dev->dev_specs.tnl_num;
loop_time = min(hdev->ae_dev->dev_specs.tnl_num,
HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE);
for (j = 0; j < loop_time; j++)
loop_para[j] = j + 1;
}

View File

@ -2653,8 +2653,17 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int ret;
return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.req_speed = speed;
hdev->hw.mac.req_duplex = duplex;
return 0;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@ -2956,17 +2965,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
return ret;
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
if (ret)
return ret;
}
if (!hdev->hw.mac.autoneg) {
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
hdev->hw.mac.req_duplex,
hdev->hw.mac.lane_num);
if (ret)
return ret;
}
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
@ -11444,7 +11456,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
pci_release_mem_regions(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
@ -11516,8 +11528,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
hdev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
up(&hdev->reset_sem);
if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)

View File

@ -191,6 +191,9 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
hdev->hw.mac.req_speed = (u32)speed;
hdev->hw.mac.req_duplex = (u8)duplex;
ret = hclge_cfg_flowctrl(hdev);
if (ret)
netdev_err(netdev, "failed to configure flow control.\n");

View File

@ -1747,8 +1747,8 @@ static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
ret);
hdev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
up(&hdev->reset_sem);
if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
up(&hdev->reset_sem);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)

View File

@ -404,6 +404,12 @@
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
/* Retry Buffer Control */
#define IGC_RETX_CTL 0x041C
#define IGC_RETX_CTL_WATERMARK_MASK 0xF
#define IGC_RETX_CTL_QBVFULLTH_SHIFT 8 /* QBV Retry Buffer Full Threshold */
#define IGC_RETX_CTL_QBVFULLEN 0x1000 /* Enable QBV Retry Buffer Full Threshold */
/* Transmit Scheduling Latency */
/* Latency between transmission scheduling (LaunchTime) and the time
* the packet is transmitted to the network in nanosecond.

View File

@ -6315,12 +6315,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
igc_ptp_read(adapter, &now);
if (igc_tsn_is_taprio_activated_by_user(adapter) &&
is_base_time_past(qopt->base_time, &now))
adapter->qbv_config_change_errors++;
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
adapter->taprio_offload_enable = true;
igc_ptp_read(adapter, &now);
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];

View File

@ -49,12 +49,19 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
return new_flags;
}
static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
}
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u16 txoffset;
if (!is_any_launchtime(adapter))
if (!igc_tsn_is_tx_mode_in_tsn(adapter))
return;
switch (adapter->link_speed) {
@ -78,6 +85,23 @@ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
wr32(IGC_GTXOFFSET, txoffset);
}
static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 retxctl;
retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
wr32(IGC_RETX_CTL, retxctl);
}
bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
adapter->taprio_offload_enable;
}
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@ -91,6 +115,9 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
if (igc_is_device_id_i226(hw))
igc_tsn_restore_retx_default(adapter);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
@ -111,6 +138,25 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
return 0;
}
/* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
* to 88 Bytes by setting RETX_CTL register using the recommendation from:
* a) Ethernet Controller I225/I226 Specification Update Rev 2.1
* Item 9: TSN: Packet Transmission Might Cross the Qbv Window
* b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
*/
static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u32 retxctl, watermark;
retxctl = rd32(IGC_RETX_CTL);
watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
/* Set QBVFULLTH value using watermark and set QBVFULLEN */
retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
IGC_RETX_CTL_QBVFULLEN;
wr32(IGC_RETX_CTL, retxctl);
}
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@ -123,6 +169,9 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
@ -262,14 +311,6 @@ skip_cbs:
s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
/* Increase the counter if scheduling into the past while
* Gate Control List (GCL) is running.
*/
if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
(adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) &&
(adapter->qbv_count > 1))
adapter->qbv_config_change_errors++;
} else {
if (igc_is_device_id_i226(hw)) {
ktime_t adjust_time, expires_time;
@ -331,15 +372,22 @@ int igc_tsn_reset(struct igc_adapter *adapter)
return err;
}
static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter)
{
bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) &
IGC_FLAG_TSN_ANY_ENABLED);
return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) ||
(!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter));
}
int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
/* Per I225/6 HW Design Section 7.5.2.1, transmit mode
* cannot be changed dynamically. Require reset the adapter.
/* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change
* from legacy->tsn or tsn->legacy, then reset adapter is needed.
*/
if (netif_running(adapter->netdev) &&
(igc_is_device_id_i225(hw) || !adapter->qbv_count)) {
igc_tsn_will_tx_mode_change(adapter)) {
schedule_work(&adapter->reset_task);
return 0;
}

View File

@ -7,5 +7,6 @@
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */

View File

@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb)
if (skb->protocol != htons(ETH_P_IP))
return csum;
skb_set_network_header(skb, ETH_HLEN);
if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
(skb->len < (ETH_HLEN +
(ip_hdr(skb)->ihl << 2) +
sizeof(struct udphdr)))) {
if (ip_hdr(skb)->protocol != IPPROTO_UDP ||
skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) {
skb_reset_network_header(skb);
return csum;
}
skb_set_transport_header(skb,
ETH_HLEN + (ip_hdr(skb)->ihl << 2));
skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb));
csum = udp_hdr(skb)->check;
skb_reset_transport_header(skb);
skb_reset_network_header(skb);

View File

@ -2666,14 +2666,15 @@ mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri
{
struct mtk_wed_flow_block_priv *priv = cb_priv;
struct flow_cls_offload *cls = type_data;
struct mtk_wed_hw *hw = priv->hw;
struct mtk_wed_hw *hw = NULL;
if (!tc_can_offload(priv->dev))
if (!priv || !tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
hw = priv->hw;
return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
}
@ -2729,6 +2730,7 @@ mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
kfree(block_cb->cb_priv);
block_cb->cb_priv = NULL;
}
return 0;
default:

View File

@ -130,7 +130,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
#define MLX5E_DEFAULT_SHAMPO_TIMEOUT 1024
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3

View File

@ -928,7 +928,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_headers_entry_size,
mlx5e_shampo_get_log_hd_entry_size(mdev, params));
MLX5_SET(rqc, rqc, reservation_timeout,
params->packet_merge.timeout);
mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT));
MLX5_SET(rqc, rqc, shampo_match_criteria_type,
params->packet_merge.shampo.match_criteria_type);
MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
@ -1087,6 +1087,20 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
return wqebbs;
}
#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
/* The supported periods are organized in ascending order */
for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
break;
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)

View File

@ -108,6 +108,7 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);

View File

@ -146,7 +146,9 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
mutex_lock(&priv->state_lock);
err = mlx5e_safe_reopen_channels(priv);
mutex_unlock(&priv->state_lock);
if (!err) {
to_ctx->status = 1; /* all channels recovered */
return err;

View File

@ -734,7 +734,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
if (num_tuples <= 0) {
netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
__func__, num_tuples);
return num_tuples;
return num_tuples < 0 ? num_tuples : -EINVAL;
}
eth_ft = get_flow_table(priv, fs, num_tuples);

View File

@ -5167,18 +5167,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
#endif
};
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
/* The supported periods are organized in ascending order */
for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
break;
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
struct mlx5e_params *params = &priv->channels.params;
@ -5308,7 +5296,7 @@ static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
struct mlx5e_rq_stats *rq_stats;
ASSERT_RTNL();
if (mlx5e_is_uplink_rep(priv))
if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
return;
channel_stats = priv->channel_stats[i];
@ -5328,6 +5316,9 @@ static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
struct mlx5e_sq_stats *sq_stats;
ASSERT_RTNL();
if (!priv->stats_nch)
return;
/* no special case needed for ptp htb etc since txq2sq_stats is kept up
* to date for active sq_stats, otherwise get_base_stats takes care of
* inactive sqs.

View File

@ -126,7 +126,7 @@ static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
}
static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
u8 *host_buses, u8 *sd_group)
u8 *host_buses)
{
u32 out[MLX5_ST_SZ_DW(mpir_reg)];
int err;
@ -135,10 +135,6 @@ static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
if (err)
return err;
err = mlx5_query_nic_vport_sd_group(dev, sd_group);
if (err)
return err;
*sdm = MLX5_GET(mpir_reg, out, sdm);
*host_buses = MLX5_GET(mpir_reg, out, host_buses);
@ -166,19 +162,23 @@ static int sd_init(struct mlx5_core_dev *dev)
if (mlx5_core_is_ecpf(dev))
return 0;
err = mlx5_query_nic_vport_sd_group(dev, &sd_group);
if (err)
return err;
if (!sd_group)
return 0;
if (!MLX5_CAP_MCAM_REG(dev, mpir))
return 0;
err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group);
err = mlx5_query_sd(dev, &sdm, &host_buses);
if (err)
return err;
if (!sdm)
return 0;
if (!sd_group)
return 0;
group_id = mlx5_sd_group_id(dev, sd_group);
if (!mlx5_sd_is_supported(dev, host_buses)) {

View File

@ -40,6 +40,7 @@
*/
#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
#define MLXBF_GIGE_MAX_FILTER_IDX 3
/* Define for broadcast MAC literal */
#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
@ -175,6 +176,13 @@ enum mlxbf_gige_res {
int mlxbf_gige_mdio_probe(struct platform_device *pdev,
struct mlxbf_gige *priv);
void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv);
void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv);
void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index);
void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index);
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac);
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,

View File

@ -168,6 +168,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
if (err)
goto napi_deinit;
mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
mlxbf_gige_enable_multicast_rx(priv);
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
@ -379,6 +383,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
void __iomem *plu_base;
void __iomem *base;
int addr, phy_irq;
unsigned int i;
int err;
base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
@ -423,6 +428,11 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++)
mlxbf_gige_disable_mac_rx_filter(priv, i);
mlxbf_gige_disable_multicast_rx(priv);
mlxbf_gige_disable_promisc(priv);
/* Write initial MAC address to hardware */
mlxbf_gige_initial_mac(priv);

View File

@ -62,6 +62,8 @@
#define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528
#define MLXBF_GIGE_RX_MAC_FILTER_GENERAL 0x0530
#define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548

View File

@ -11,22 +11,60 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac)
void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv)
{
void __iomem *base = priv->base;
u64 data;
data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
}
void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv)
{
void __iomem *base = priv->base;
u64 data;
data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
}
void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index)
{
void __iomem *base = priv->base;
u64 control;
/* Write destination MAC to specified MAC RX filter */
writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
(index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
/* Enable MAC receive filter mask for specified index */
control = readq(base + MLXBF_GIGE_CONTROL);
control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
writeq(control, base + MLXBF_GIGE_CONTROL);
}
void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index)
{
void __iomem *base = priv->base;
u64 control;
/* Disable MAC receive filter mask for specified index */
control = readq(base + MLXBF_GIGE_CONTROL);
control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
writeq(control, base + MLXBF_GIGE_CONTROL);
}
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac)
{
void __iomem *base = priv->base;
/* Write destination MAC to specified MAC RX filter */
writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
(index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
}
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 *dmac)
{

View File

@ -599,7 +599,11 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
else
*headroom = XDP_PACKET_HEADROOM;
*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
*alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
/* Using page pool in this case, so alloc_size is PAGE_SIZE */
if (*alloc_size < PAGE_SIZE)
*alloc_size = PAGE_SIZE;
*datasize = mtu + ETH_HLEN;
}
@ -1788,7 +1792,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
{
struct mana_cq *cq = context;
u8 arm_bit;
int w;
WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
@ -1799,16 +1802,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
mana_poll_tx_cq(cq);
w = cq->work_done;
cq->work_done_since_doorbell += w;
if (w < cq->budget &&
napi_complete_done(&cq->napi, w)) {
arm_bit = SET_ARM_BIT;
} else {
arm_bit = 0;
if (w < cq->budget) {
mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
cq->work_done_since_doorbell = 0;
napi_complete_done(&cq->napi, w);
} else if (cq->work_done_since_doorbell >
cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
/* MANA hardware requires at least one doorbell ring every 8
* wraparounds of CQ even if there is no need to arm the CQ.
* This driver rings the doorbell as soon as we have exceeded
* 4 wraparounds.
*/
mana_gd_ring_cq(gdma_queue, 0);
cq->work_done_since_doorbell = 0;
}
mana_gd_ring_cq(gdma_queue, arm_bit);
return w;
}

View File

@ -160,16 +160,16 @@
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */
#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
#define XAE_FMI_OFFSET 0x00000708 /* Frame Filter Control */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
@ -308,7 +308,7 @@
*/
#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet FMI register */
/* Bit masks for Axi Ethernet FMC register */
#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */

View File

@ -1269,6 +1269,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_cow_head(skb, dev->needed_headroom))
goto tx_err;
if (!pskb_inet_may_pull(skb))
goto tx_err;
skb_reset_inner_headers(skb);
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */

View File

@ -237,16 +237,6 @@ static int vsc739x_config_init(struct phy_device *phydev)
return 0;
}
static int vsc73xx_config_aneg(struct phy_device *phydev)
{
/* The VSC73xx switches does not like to be instructed to
* do autonegotiation in any way, it prefers that you just go
* with the power-on/reset defaults. Writing some registers will
* just make autonegotiation permanently fail.
*/
return 0;
}
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
@ -444,7 +434,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@ -453,7 +442,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@ -462,7 +450,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@ -471,7 +458,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {

View File

@ -401,9 +401,14 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
rdesc->ops = &pse_pi_ops;
rdesc->owner = pcdev->owner;
rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS |
REGULATOR_CHANGE_CURRENT;
rinit_data->constraints.max_uA = MAX_PI_CURRENT;
rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
if (pcdev->ops->pi_set_current_limit) {
rinit_data->constraints.valid_ops_mask |=
REGULATOR_CHANGE_CURRENT;
rinit_data->constraints.max_uA = MAX_PI_CURRENT;
}
rinit_data->supply_regulator = "vpwr";
rconfig.dev = pcdev->dev;

View File

@ -286,10 +286,11 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
return;
}
if (urb->actual_length <= IPHETH_IP_ALIGN) {
dev->net->stats.rx_length_errors++;
return;
}
/* iPhone may periodically send URBs with no payload
* on the "bulk in" endpoint. It is safe to ignore them.
*/
if (urb->actual_length == 0)
goto rx_submit;
/* RX URBs starting with 0x00 0x01 do not encapsulate Ethernet frames,
* but rather are control frames. Their purpose is not documented, and
@ -298,7 +299,8 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
* URB received from the bulk IN endpoint.
*/
if (unlikely
(((char *)urb->transfer_buffer)[0] == 0 &&
(urb->actual_length == 4 &&
((char *)urb->transfer_buffer)[0] == 0 &&
((char *)urb->transfer_buffer)[1] == 1))
goto rx_submit;
@ -306,7 +308,6 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
if (retval != 0) {
dev_err(&dev->intf->dev, "%s: callback retval: %d\n",
__func__, retval);
return;
}
rx_submit:
@ -354,13 +355,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
0x02, /* index */
dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
IPHETH_CTRL_TIMEOUT);
if (retval < 0) {
if (retval <= 0) {
dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n",
__func__, retval);
return retval;
}
if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) ||
(retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) {
netif_carrier_on(dev->net);
if (dev->tx_urb->status != -EINPROGRESS)
netif_wake_queue(dev->net);
@ -475,8 +477,8 @@ static int ipheth_close(struct net_device *net)
{
struct ipheth_device *dev = netdev_priv(net);
cancel_delayed_work_sync(&dev->carrier_work);
netif_stop_queue(net);
cancel_delayed_work_sync(&dev->carrier_work);
return 0;
}

View File

@ -162,6 +162,60 @@ static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
return 0;
}
static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
unsigned long delta,
bool head)
{
unsigned long len = skb->len;
if (head) {
skb_push(skb, delta);
memmove(skb->data, skb->data + delta, len);
skb_trim(skb, len);
} else {
skb_put(skb, delta);
memmove(skb->data + delta, skb->data, len);
skb_pull(skb, delta);
}
}
static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
struct sk_buff **pskb)
{
u32 iova_mask = ab->hw_params->iova_mask;
unsigned long offset, delta1, delta2;
struct sk_buff *skb2, *skb = *pskb;
unsigned int headroom = skb_headroom(skb);
int tailroom = skb_tailroom(skb);
int ret = 0;
offset = (unsigned long)skb->data & iova_mask;
delta1 = offset;
delta2 = iova_mask - offset + 1;
if (headroom >= delta1) {
ath12k_dp_tx_move_payload(skb, delta1, true);
} else if (tailroom >= delta2) {
ath12k_dp_tx_move_payload(skb, delta2, false);
} else {
skb2 = skb_realloc_headroom(skb, iova_mask);
if (!skb2) {
ret = -ENOMEM;
goto out;
}
dev_kfree_skb_any(skb);
offset = (unsigned long)skb2->data & iova_mask;
if (offset)
ath12k_dp_tx_move_payload(skb2, offset, true);
*pskb = skb2;
}
out:
return ret;
}
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb)
{
@ -184,6 +238,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
bool tcl_ring_retry;
bool msdu_ext_desc = false;
bool add_htt_metadata = false;
u32 iova_mask = ab->hw_params->iova_mask;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@ -279,6 +334,23 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
if (iova_mask &&
(unsigned long)skb->data & iova_mask) {
ret = ath12k_dp_tx_align_payload(ab, &skb);
if (ret) {
ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
/* don't bail out, give original buffer
* a chance even unaligned.
*/
goto map;
}
/* hdr is pointing to a wrong place after alignment,
* so refresh it for later use.
*/
hdr = (void *)skb->data;
}
map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);

View File

@ -924,6 +924,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = NULL,
.supports_dynamic_smps_6ghz = true,
.iova_mask = 0,
},
{
.name = "wcn7850 hw2.0",
@ -1000,6 +1002,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = &wcn7850_uuid,
.supports_dynamic_smps_6ghz = false,
.iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
},
{
.name = "qcn9274 hw2.0",
@ -1072,6 +1076,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.acpi_guid = NULL,
.supports_dynamic_smps_6ghz = true,
.iova_mask = 0,
},
};

View File

@ -96,6 +96,8 @@
#define ATH12K_M3_FILE "m3.bin"
#define ATH12K_REGDB_FILE_NAME "regdb.bin"
#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128
enum ath12k_hw_rate_cck {
ATH12K_HW_RATE_CCK_LP_11M = 0,
ATH12K_HW_RATE_CCK_LP_5_5M,
@ -215,6 +217,8 @@ struct ath12k_hw_params {
const guid_t *acpi_guid;
bool supports_dynamic_smps_6ghz;
u32 iova_mask;
};
struct ath12k_hw_ops {

View File

@ -9193,6 +9193,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
hw->extra_tx_headroom = ab->hw_params->iova_mask;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);

View File

@ -4320,9 +4320,16 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
/* Single PMK operation */
pmk_op->count = cpu_to_le16(1);
length += sizeof(struct brcmf_pmksa_v3);
memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
if (pmksa->bssid)
memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
if (pmksa->pmkid) {
memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
}
if (pmksa->ssid && pmksa->ssid_len) {
memcpy(pmk_op->pmk[0].ssid.SSID, pmksa->ssid, pmksa->ssid_len);
pmk_op->pmk[0].ssid.SSID_len = pmksa->ssid_len;
}
pmk_op->pmk[0].time_left = cpu_to_le32(alive ? BRCMF_PMKSA_NO_EXPIRY : 0);
}

View File

@ -639,7 +639,8 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue);
dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr);
dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
unsigned int len);
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
u8 **hdr, unsigned int hdr_room);

View File

@ -168,6 +168,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
unsigned int data_offset = 0;
dma_addr_t start_hdr_phys;
u16 length, amsdu_pad;
u8 *start_hdr;
@ -260,7 +261,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
int ret;
tb_len = min_t(unsigned int, tso.size, data_left);
tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
tb_len);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
goto out_err;
@ -272,6 +274,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
data_left -= tb_len;
data_offset += tb_len;
tso_build_data(skb, &tso, tb_len);
}
}

View File

@ -1814,23 +1814,31 @@ out:
/**
* iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
* @sgt: scatter gather table
* @addr: Virtual address
* @offset: Offset into the mapped memory (i.e. SKB payload data)
* @len: Length of the area
*
* Find the entry that includes the address for the given address and return
* correct physical address for the TB entry.
* Find the DMA address that corresponds to the SKB payload data at the
* position given by @offset.
*
* Returns: Address for TB entry
*/
dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr)
dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
unsigned int len)
{
struct scatterlist *sg;
unsigned int sg_offset = 0;
int i;
/*
* Search the mapped DMA areas in the SG for the area that contains the
* data at offset with the given length.
*/
for_each_sgtable_dma_sg(sgt, sg, i) {
if (addr >= sg_virt(sg) &&
(u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg))
return sg_dma_address(sg) +
((unsigned long)addr - (unsigned long)sg_virt(sg));
if (offset >= sg_offset &&
offset + len <= sg_offset + sg_dma_len(sg))
return sg_dma_address(sg) + offset - sg_offset;
sg_offset += sg_dma_len(sg);
}
WARN_ON_ONCE(1);
@ -1875,7 +1883,9 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
/* Only map the data, not the header (it is copied to the TSO page) */
sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
skb->data_len);
if (WARN_ON_ONCE(sgt->orig_nents <= 0))
return NULL;
@ -1900,6 +1910,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
unsigned int data_offset = 0;
u16 length, iv_len, amsdu_pad;
dma_addr_t start_hdr_phys;
u8 *start_hdr, *pos_hdr;
@ -2000,7 +2011,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left);
dma_addr_t tb_phys;
tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data);
tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
/* Not a real mapping error, use direct comparison */
if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL;
@ -2011,6 +2022,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
tb_phys, size);
data_left -= size;
data_offset += size;
tso_build_data(skb, &tso, size);
}
}

View File

@ -1183,7 +1183,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
struct inet6_dev *idev)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mvif->phy->dev;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct inet6_ifaddr *ifa;
struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
struct sk_buff *skb;

View File

@ -181,11 +181,11 @@ static void _rtl92du_init_queue_reserved_page(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u32 txqpagenum, txqpageunit;
u32 txqremainingpage;
u32 value32 = 0;
u32 numhq = 0;
u32 numlq = 0;
u32 numnq = 0;
u32 numpubq;
u32 value32;
if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY) {
numpubq = NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC;

View File

@ -230,8 +230,12 @@ struct vsock_tap {
int vsock_add_tap(struct vsock_tap *vt);
int vsock_remove_tap(struct vsock_tap *vt);
void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);

View File

@ -275,6 +275,7 @@ struct mana_cq {
/* NAPI data */
struct napi_struct napi;
int work_done;
int work_done_since_doorbell;
int budget;
};

View File

@ -622,8 +622,12 @@ static unsigned int br_nf_local_in(void *priv,
if (likely(nf_ct_is_confirmed(ct)))
return NF_ACCEPT;
if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) {
nf_reset_ct(skb);
return NF_ACCEPT;
}
WARN_ON_ONCE(skb_shared(skb));
WARN_ON_ONCE(refcount_read(&nfct->use) != 1);
/* We can't call nf_confirm here, it would create a dependency
* on nf_conntrack module.

View File

@ -9912,6 +9912,15 @@ static void netdev_sync_lower_features(struct net_device *upper,
}
}
static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
{
netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
bool hw_csum = features & NETIF_F_HW_CSUM;
return ip_csum || hw_csum;
}
static netdev_features_t netdev_fix_features(struct net_device *dev,
netdev_features_t features)
{
@ -9993,15 +10002,9 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_HW_TLS_TX) {
bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
bool hw_csum = features & NETIF_F_HW_CSUM;
if (!ip_csum && !hw_csum) {
netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
features &= ~NETIF_F_HW_TLS_TX;
}
if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
features &= ~NETIF_F_HW_TLS_TX;
}
if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
@ -10009,6 +10012,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
features &= ~NETIF_F_HW_TLS_RX;
}
if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
features &= ~NETIF_F_GSO_UDP_L4;
}
return features;
}

View File

@ -35,7 +35,10 @@ struct cmis_cdb_fw_mng_features_rpl {
__be16 resv7;
};
#define CMIS_CDB_FW_WRITE_MECHANISM_LPL 0x01
enum cmis_cdb_fw_write_mechanism {
CMIS_CDB_FW_WRITE_MECHANISM_LPL = 0x01,
CMIS_CDB_FW_WRITE_MECHANISM_BOTH = 0x11,
};
static int
cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb,
@ -64,7 +67,8 @@ cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb,
}
rpl = (struct cmis_cdb_fw_mng_features_rpl *)args.req.payload;
if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL)) {
if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL ||
rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_BOTH)) {
ethnl_module_fw_flash_ntf_err(dev, ntf_params,
"Write LPL is not supported",
NULL);

View File

@ -238,9 +238,14 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
*/
if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
u8 old_ratio = tcp_sk(sk)->scaling_ratio;
do_div(val, skb->truesize);
tcp_sk(sk)->scaling_ratio = val ? val : 1;
if (old_ratio != tcp_sk(sk)->scaling_ratio)
WRITE_ONCE(tcp_sk(sk)->window_clamp,
tcp_win_from_space(sk, sk->sk_rcvbuf));
}
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
tcp_sk(sk)->advmss);
@ -754,7 +759,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
* <prev RTT . ><current RTT .. ><next RTT .... >
*/
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) {
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
u64 rcvwin, grow;
int rcvbuf;
@ -770,22 +776,12 @@ void tcp_rcv_space_adjust(struct sock *sk)
rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
if (rcvbuf > sk->sk_rcvbuf) {
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
if (rcvbuf > sk->sk_rcvbuf) {
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
/* Make the window clamp follow along. */
WRITE_ONCE(tp->window_clamp,
tcp_win_from_space(sk, rcvbuf));
}
} else {
/* Make the window clamp follow along while being bounded
* by SO_RCVBUF.
*/
int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf));
if (clamp > tp->window_clamp)
WRITE_ONCE(tp->window_clamp, clamp);
/* Make the window clamp follow along. */
WRITE_ONCE(tp->window_clamp,
tcp_win_from_space(sk, rcvbuf));
}
}
tp->rcvq_space.space = copied;

View File

@ -282,6 +282,12 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
skb_transport_header(gso_skb)))
return ERR_PTR(-EINVAL);
/* We don't know if egress device can segment and checksum the packet
* when IPv6 extension headers are present. Fall back to software GSO.
*/
if (gso_skb->ip_summed != CHECKSUM_PARTIAL)
features &= ~(NETIF_F_GSO_UDP_L4 | NETIF_F_CSUM_MASK);
if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),

View File

@ -154,6 +154,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
};
struct inet_frag_queue *q;
if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
IPV6_ADDR_LINKLOCAL)))
key.iif = 0;
q = inet_frag_find(nf_frag->fqdir, &key);
if (!q)
return NULL;

View File

@ -94,7 +94,7 @@ static size_t subflow_get_info_size(const struct sock *sk)
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */
nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */
nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */
nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */
nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */
nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */

View File

@ -841,8 +841,8 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
struct list_head *block_cb_list)
{
struct flow_cls_offload cls_flow = {};
struct netlink_ext_ack extack = {};
struct flow_block_cb *block_cb;
struct netlink_ext_ack extack;
__be16 proto = ETH_P_ALL;
int err, i = 0;

View File

@ -8020,6 +8020,19 @@ cont:
return skb->len;
}
static int nf_tables_dumpreset_obj(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct nftables_pernet *nft_net = nft_pernet(sock_net(skb->sk));
int ret;
mutex_lock(&nft_net->commit_mutex);
ret = nf_tables_dump_obj(skb, cb);
mutex_unlock(&nft_net->commit_mutex);
return ret;
}
static int nf_tables_dump_obj_start(struct netlink_callback *cb)
{
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
@ -8036,12 +8049,18 @@ static int nf_tables_dump_obj_start(struct netlink_callback *cb)
if (nla[NFTA_OBJ_TYPE])
ctx->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
ctx->reset = true;
return 0;
}
static int nf_tables_dumpreset_obj_start(struct netlink_callback *cb)
{
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
ctx->reset = true;
return nf_tables_dump_obj_start(cb);
}
static int nf_tables_dump_obj_done(struct netlink_callback *cb)
{
struct nft_obj_dump_ctx *ctx = (void *)cb->ctx;
@ -8052,8 +8071,9 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb)
}
/* called with rcu_read_lock held */
static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
static struct sk_buff *
nf_tables_getobj_single(u32 portid, const struct nfnl_info *info,
const struct nlattr * const nla[], bool reset)
{
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
@ -8062,10 +8082,47 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
struct net *net = info->net;
struct nft_object *obj;
struct sk_buff *skb2;
bool reset = false;
u32 objtype;
int err;
if (!nla[NFTA_OBJ_NAME] ||
!nla[NFTA_OBJ_TYPE])
return ERR_PTR(-EINVAL);
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
return ERR_CAST(table);
}
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
return ERR_CAST(obj);
}
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
return ERR_PTR(-ENOMEM);
err = nf_tables_fill_obj_info(skb2, net, portid,
info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
family, table, obj, reset);
if (err < 0) {
kfree_skb(skb2);
return ERR_PTR(err);
}
return skb2;
}
static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
const struct nlattr * const nla[])
{
u32 portid = NETLINK_CB(skb).portid;
struct sk_buff *skb2;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nf_tables_dump_obj_start,
@ -8078,56 +8135,56 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
if (!nla[NFTA_OBJ_NAME] ||
!nla[NFTA_OBJ_TYPE])
skb2 = nf_tables_getobj_single(portid, info, nla, false);
if (IS_ERR(skb2))
return PTR_ERR(skb2);
return nfnetlink_unicast(skb2, info->net, portid);
}
static int nf_tables_getobj_reset(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const nla[])
{
struct nftables_pernet *nft_net = nft_pernet(info->net);
u32 portid = NETLINK_CB(skb).portid;
struct net *net = info->net;
struct sk_buff *skb2;
char *buf;
if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.start = nf_tables_dumpreset_obj_start,
.dump = nf_tables_dumpreset_obj,
.done = nf_tables_dump_obj_done,
.module = THIS_MODULE,
.data = (void *)nla,
};
return nft_netlink_dump_start_rcu(info->sk, skb, info->nlh, &c);
}
if (!try_module_get(THIS_MODULE))
return -EINVAL;
rcu_read_unlock();
mutex_lock(&nft_net->commit_mutex);
skb2 = nf_tables_getobj_single(portid, info, nla, true);
mutex_unlock(&nft_net->commit_mutex);
rcu_read_lock();
module_put(THIS_MODULE);
table = nft_table_lookup(net, nla[NFTA_OBJ_TABLE], family, genmask, 0);
if (IS_ERR(table)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_TABLE]);
return PTR_ERR(table);
}
if (IS_ERR(skb2))
return PTR_ERR(skb2);
objtype = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
obj = nft_obj_lookup(net, table, nla[NFTA_OBJ_NAME], objtype, genmask);
if (IS_ERR(obj)) {
NL_SET_BAD_ATTR(extack, nla[NFTA_OBJ_NAME]);
return PTR_ERR(obj);
}
buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
nla_len(nla[NFTA_OBJ_TABLE]),
(char *)nla_data(nla[NFTA_OBJ_TABLE]),
nft_net->base_seq);
audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
kfree(buf);
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb2)
return -ENOMEM;
if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
reset = true;
if (reset) {
const struct nftables_pernet *nft_net;
char *buf;
nft_net = nft_pernet(net);
buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, nft_net->base_seq);
audit_log_nfcfg(buf,
family,
1,
AUDIT_NFT_OP_OBJ_RESET,
GFP_ATOMIC);
kfree(buf);
}
err = nf_tables_fill_obj_info(skb2, net, NETLINK_CB(skb).portid,
info->nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
family, table, obj, reset);
if (err < 0)
goto err_fill_obj_info;
return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err_fill_obj_info:
kfree_skb(skb2);
return err;
return nfnetlink_unicast(skb2, net, portid);
}
static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
@ -9410,7 +9467,7 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.policy = nft_obj_policy,
},
[NFT_MSG_GETOBJ_RESET] = {
.call = nf_tables_getobj,
.call = nf_tables_getobj_reset,
.type = NFNL_CB_RCU,
.attr_count = NFTA_OBJ_MAX,
.policy = nft_obj_policy,

View File

@ -427,8 +427,10 @@ replay_abort:
nfnl_unlock(subsys_id);
if (nlh->nlmsg_flags & NLM_F_ACK)
if (nlh->nlmsg_flags & NLM_F_ACK) {
memset(&extack, 0, sizeof(extack));
nfnl_err_add(&err_list, nlh, 0, &extack);
}
while (skb->len >= nlmsg_total_size(0)) {
int msglen, type;
@ -577,6 +579,7 @@ done:
ss->abort(net, oskb, NFNL_ABORT_NONE);
netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
} else if (nlh->nlmsg_flags & NLM_F_ACK) {
memset(&extack, 0, sizeof(extack));
nfnl_err_add(&err_list, nlh, 0, &extack);
}
} else {

View File

@ -820,10 +820,41 @@ static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
struct nf_conn *ct = (void *)skb_nfct(entry->skb);
unsigned long status;
unsigned int use;
if (ct && ((ct->status & flags) == IPS_DYING))
if (!ct)
return false;
status = READ_ONCE(ct->status);
if ((status & flags) == IPS_DYING)
return true;
if (status & IPS_CONFIRMED)
return false;
/* in some cases skb_clone() can occur after initial conntrack
* pickup, but conntrack assumes exclusive skb->_nfct ownership for
* unconfirmed entries.
*
* This happens for br_netfilter and with ip multicast routing.
* We can't be solved with serialization here because one clone could
* have been queued for local delivery.
*/
use = refcount_read(&ct->ct_general.use);
if (likely(use == 1))
return false;
/* Can't decrement further? Exclusive ownership. */
if (!refcount_dec_not_one(&ct->ct_general.use))
return false;
skb_set_nfct(entry->skb, 0);
/* No nf_ct_put(): we already decremented .use and it cannot
* drop down to 0.
*/
return true;
#endif
return false;
}

View File

@ -1270,25 +1270,28 @@ out:
return err;
}
int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
struct sock *sk = sock->sk;
struct vsock_sock *vsk = vsock_sk(sk);
return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
}
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
#ifdef CONFIG_BPF_SYSCALL
struct sock *sk = sock->sk;
const struct proto *prot;
#endif
struct vsock_sock *vsk;
struct sock *sk;
sk = sock->sk;
vsk = vsock_sk(sk);
#ifdef CONFIG_BPF_SYSCALL
prot = READ_ONCE(sk->sk_prot);
if (prot != &vsock_proto)
return prot->recvmsg(sk, msg, len, flags, NULL);
#endif
return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
return __vsock_dgram_recvmsg(sock, msg, len, flags);
}
EXPORT_SYMBOL_GPL(vsock_dgram_recvmsg);
@ -2174,15 +2177,12 @@ out:
}
int
vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
__vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk;
struct vsock_sock *vsk;
const struct vsock_transport *transport;
#ifdef CONFIG_BPF_SYSCALL
const struct proto *prot;
#endif
int err;
sk = sock->sk;
@ -2233,14 +2233,6 @@ vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto out;
}
#ifdef CONFIG_BPF_SYSCALL
prot = READ_ONCE(sk->sk_prot);
if (prot != &vsock_proto) {
release_sock(sk);
return prot->recvmsg(sk, msg, len, flags, NULL);
}
#endif
if (sk->sk_type == SOCK_STREAM)
err = __vsock_stream_recvmsg(sk, msg, len, flags);
else
@ -2250,6 +2242,22 @@ out:
release_sock(sk);
return err;
}
int
vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
#ifdef CONFIG_BPF_SYSCALL
struct sock *sk = sock->sk;
const struct proto *prot;
prot = READ_ONCE(sk->sk_prot);
if (prot != &vsock_proto)
return prot->recvmsg(sk, msg, len, flags, NULL);
#endif
return __vsock_connectible_recvmsg(sock, msg, len, flags);
}
EXPORT_SYMBOL_GPL(vsock_connectible_recvmsg);
static int vsock_set_rcvlowat(struct sock *sk, int val)

View File

@ -64,9 +64,9 @@ static int __vsock_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int
int err;
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)
err = vsock_connectible_recvmsg(sock, msg, len, flags);
err = __vsock_connectible_recvmsg(sock, msg, len, flags);
else if (sk->sk_type == SOCK_DGRAM)
err = vsock_dgram_recvmsg(sock, msg, len, flags);
err = __vsock_dgram_recvmsg(sock, msg, len, flags);
else
err = -EPROTOTYPE;

View File

@ -209,7 +209,7 @@ static void __sendpair(struct __test_metadata *_metadata,
static void __recvpair(struct __test_metadata *_metadata,
FIXTURE_DATA(msg_oob) *self,
const void *expected_buf, int expected_len,
const char *expected_buf, int expected_len,
int buf_len, int flags)
{
int i, ret[2], recv_errno[2], expected_errno = 0;

View File

@ -146,6 +146,7 @@ cleanup_ns()
for ns in "$@"; do
[ -z "${ns}" ] && continue
ip netns pids "${ns}" 2> /dev/null | xargs -r kill || true
ip netns delete "${ns}" &> /dev/null || true
if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
echo "Warn: Failed to remove namespace $ns"

View File

@ -7,6 +7,7 @@ MNL_CFLAGS := $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
MNL_LDLIBS := $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
TEST_PROGS := br_netfilter.sh bridge_brouter.sh
TEST_PROGS += br_netfilter_queue.sh
TEST_PROGS += conntrack_icmp_related.sh
TEST_PROGS += conntrack_ipip_mtu.sh
TEST_PROGS += conntrack_tcp_unreplied.sh

View File

@ -0,0 +1,78 @@
#!/bin/bash
source lib.sh
checktool "nft --version" "run test without nft tool"
cleanup() {
cleanup_all_ns
}
setup_ns c1 c2 c3 sender
trap cleanup EXIT
nf_queue_wait()
{
grep -q "^ *$1 " "/proc/self/net/netfilter/nfnetlink_queue"
}
port_add() {
ns="$1"
dev="$2"
a="$3"
ip link add name "$dev" type veth peer name "$dev" netns "$ns"
ip -net "$ns" addr add 192.168.1."$a"/24 dev "$dev"
ip -net "$ns" link set "$dev" up
ip link set "$dev" master br0
ip link set "$dev" up
}
[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
ip link add br0 type bridge
ip addr add 192.168.1.254/24 dev br0
port_add "$c1" "c1" 1
port_add "$c2" "c2" 2
port_add "$c3" "c3" 3
port_add "$sender" "sender" 253
ip link set br0 up
modprobe -q br_netfilter
sysctl net.bridge.bridge-nf-call-iptables=1 || exit 1
ip netns exec "$sender" ping -I sender -c1 192.168.1.1 || exit 1
ip netns exec "$sender" ping -I sender -c1 192.168.1.2 || exit 2
ip netns exec "$sender" ping -I sender -c1 192.168.1.3 || exit 3
nft -f /dev/stdin <<EOF
table ip filter {
chain forward {
type filter hook forward priority 0; policy accept;
ct state new counter
ip protocol icmp counter queue num 0 bypass
}
}
EOF
./nf_queue -t 5 > /dev/null &
busywait 5000 nf_queue_wait
for i in $(seq 1 5); do conntrack -F > /dev/null 2> /dev/null; sleep 0.1 ; done &
ip netns exec "$sender" ping -I sender -f -c 50 -b 192.168.1.255
read t < /proc/sys/kernel/tainted
if [ "$t" -eq 0 ];then
echo PASS: kernel not tainted
else
echo ERROR: kernel is tainted
exit 1
fi
exit 0

View File

@ -67,6 +67,7 @@ struct testcase {
int gso_len; /* mss after applying gso */
int r_num_mss; /* recv(): number of calls of full mss */
int r_len_last; /* recv(): size of last non-mss dgram, if any */
bool v6_ext_hdr; /* send() dgrams with IPv6 extension headers */
};
const struct in6_addr addr6 = {
@ -77,6 +78,8 @@ const struct in_addr addr4 = {
__constant_htonl(0x0a000001), /* 10.0.0.1 */
};
static const char ipv6_hopopts_pad1[8] = { 0 };
struct testcase testcases_v4[] = {
{
/* no GSO: send a single byte */
@ -255,6 +258,13 @@ struct testcase testcases_v6[] = {
.gso_len = 1,
.r_num_mss = 2,
},
{
/* send 2 1B segments with extension headers */
.tlen = 2,
.gso_len = 1,
.r_num_mss = 2,
.v6_ext_hdr = true,
},
{
/* send 2B + 2B + 1B segments */
.tlen = 5,
@ -396,11 +406,18 @@ static void run_one(struct testcase *test, int fdt, int fdr,
int i, ret, val, mss;
bool sent;
fprintf(stderr, "ipv%d tx:%d gso:%d %s\n",
fprintf(stderr, "ipv%d tx:%d gso:%d %s%s\n",
addr->sa_family == AF_INET ? 4 : 6,
test->tlen, test->gso_len,
test->v6_ext_hdr ? "ext-hdr " : "",
test->tfail ? "(fail)" : "");
if (test->v6_ext_hdr) {
if (setsockopt(fdt, IPPROTO_IPV6, IPV6_HOPOPTS,
ipv6_hopopts_pad1, sizeof(ipv6_hopopts_pad1)))
error(1, errno, "setsockopt ipv6 hopopts");
}
val = test->gso_len;
if (cfg_do_setsockopt) {
if (setsockopt(fdt, SOL_UDP, UDP_SEGMENT, &val, sizeof(val)))
@ -412,6 +429,12 @@ static void run_one(struct testcase *test, int fdt, int fdr,
error(1, 0, "send succeeded while expecting failure");
if (!sent && !test->tfail)
error(1, 0, "send failed while expecting success");
if (test->v6_ext_hdr) {
if (setsockopt(fdt, IPPROTO_IPV6, IPV6_HOPOPTS, NULL, 0))
error(1, errno, "setsockopt ipv6 hopopts clear");
}
if (!sent)
return;