mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 21:21:41 +00:00
Including fixes from wireless, bleutooth, BPF and netfilter.
Current release - regressions: - core: drop bad gso csum_start and offset in virtio_net_hdr - wifi: mt76: fix null pointer access in mt792x_mac_link_bss_remove - eth: tun: add missing bpf_net_ctx_clear() in do_xdp_generic() - phy: aquantia: only poll GLOBAL_CFG regs on aqr113, aqr113c and aqr115c Current release - new code bugs: - smc: prevent UAF in inet_create() - bluetooth: btmtk: fix kernel crash when entering btmtk_usb_suspend - eth: bnxt: reject unsupported hash functions Previous releases - regressions: - sched: act_ct: take care of padding in struct zones_ht_key - netfilter: fix null-ptr-deref in iptable_nat_table_init(). - tcp: adjust clamping window for applications specifying SO_RCVBUF Previous releases - always broken: - ethtool: rss: small fixes to spec and GET - mptcp: - fix signal endpoint re-add - pm: fix backup support in signal endpoints - wifi: ath12k: fix soft lockup on suspend - eth: bnxt_en: fix RSS logic in __bnxt_reserve_rings() - eth: ice: fix AF_XDP ZC timeout and concurrency issues - eth: mlx5: - fix missing lock on sync reset reload - fix error handling in irq_pool_request_irq Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmarelYSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkdPwP/2lxh5Cc/SK/mJjBvyBdO2+cuNR0M4Kf UV2PA4oOLREYXEPgmOtJQ/VcsmOLa1pEPAdJarZwB5ztalgRKkIogHzzjfY43Fmx rAgZqGnIJrWRtepDM8jAaEJC0bEKywH5Wo6eh+oi0GCS07B48lpYATI/1gQdwBjV CgcZTQd/04PVx69Bi8LiQyfbwppAsIQQa9YaGmqGuQa74Hp9gz+4VyeRFg54h3CP 6fWwRHNVO8GsGNA1UgWbeXXajhUU+AG/gDThqIcgxs3KmrREzU9EvcQ70XCzphOA JoUy9yykWRGen7aFGrggfY4NzjQmL6g+/rCvbIMfidRsJKBaQYBeMUkbQRnAh34V Pe3aSBEnv1aBKaQA7yntdqYGRJ2Sz56a1kjCvI86eDjExt4UshbZi+TfuQSj6zAY /ejOawhEYPFZw2FlvkBetyck7iroG1404DoBPghoRu9dG2e3p0eJOZfXiEzfS2qB PsJtMPiexSdEcY3sxVKOMh4hx0Zjkqest7laitb1Lrbg5pLhEiHvDkyhoUPGI2oa a3N4rsBc6sgSTQfJsx4nXFfKfNQsNu2Nr308BDk16XOHZ4J7Hgt6xR6STDo9ACz1 Gy5munCN2AhGSdhR5niFI3ocNpDM5oWkztBfjz7YmIQv18NcU5nO8ByYytXMbglq sSsnR+VbYeCu =z53B -----END PGP SIGNATURE----- Merge tag 'net-6.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from wireless, bleutooth, BPF and netfilter. Current release - regressions: - core: drop bad gso csum_start and offset in virtio_net_hdr - wifi: mt76: fix null pointer access in mt792x_mac_link_bss_remove - eth: tun: add missing bpf_net_ctx_clear() in do_xdp_generic() - phy: aquantia: only poll GLOBAL_CFG regs on aqr113, aqr113c and aqr115c Current release - new code bugs: - smc: prevent UAF in inet_create() - bluetooth: btmtk: fix kernel crash when entering btmtk_usb_suspend - eth: bnxt: reject unsupported hash functions Previous releases - regressions: - sched: act_ct: take care of padding in struct zones_ht_key - netfilter: fix null-ptr-deref in iptable_nat_table_init(). - tcp: adjust clamping window for applications specifying SO_RCVBUF Previous releases - always broken: - ethtool: rss: small fixes to spec and GET - mptcp: - fix signal endpoint re-add - pm: fix backup support in signal endpoints - wifi: ath12k: fix soft lockup on suspend - eth: bnxt_en: fix RSS logic in __bnxt_reserve_rings() - eth: ice: fix AF_XDP ZC timeout and concurrency issues - eth: mlx5: - fix missing lock on sync reset reload - fix error handling in irq_pool_request_irq" * tag 'net-6.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (76 commits) mptcp: fix duplicate data handling mptcp: fix bad RCVPRUNED mib accounting ipv6: fix ndisc_is_useropt() handling for PIO igc: Fix double reset adapter triggered from a single taprio cmd net: MAINTAINERS: Demote Qualcomm IPA to "maintained" net: wan: fsl_qmc_hdlc: Discard received CRC net: wan: fsl_qmc_hdlc: Convert carrier_lock spinlock to a mutex net/mlx5e: Add a check for the return value from mlx5_port_set_eth_ptys net/mlx5e: Fix CT entry update leaks of modify header context net/mlx5e: Require mlx5 tc classifier action support for IPsec prio capability net/mlx5: Fix missing lock on sync reset reload net/mlx5: Lag, don't use the hardcoded value of the first port net/mlx5: DR, Fix 'stack guard page was hit' error in dr_rule net/mlx5: Fix error handling in irq_pool_request_irq net/mlx5: Always drain health in shutdown callback net: Add skbuff.h to MAINTAINERS r8169: don't increment tx_dropped in case of NETDEV_TX_BUSY netfilter: iptables: Fix potential null-ptr-deref in ip6table_nat_table_init(). netfilter: iptables: Fix null-ptr-deref in iptable_nat_table_init(). net: drop bad gso csum_start and offset in virtio_net_hdr ...
This commit is contained in:
commit
183d46ff42
@ -1753,6 +1753,7 @@ operations:
|
||||
request:
|
||||
attributes:
|
||||
- header
|
||||
- context
|
||||
reply:
|
||||
attributes:
|
||||
- header
|
||||
@ -1761,7 +1762,6 @@ operations:
|
||||
- indir
|
||||
- hkey
|
||||
- input_xfrm
|
||||
dump: *rss-get-op
|
||||
-
|
||||
name: plca-get-cfg
|
||||
doc: Get PLCA params.
|
||||
|
@ -1875,6 +1875,7 @@ Kernel response contents:
|
||||
|
||||
===================================== ====== ==========================
|
||||
``ETHTOOL_A_RSS_HEADER`` nested reply header
|
||||
``ETHTOOL_A_RSS_CONTEXT`` u32 context number
|
||||
``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
|
||||
``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
|
||||
``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
|
||||
|
@ -15936,6 +15936,7 @@ F: include/linux/in.h
|
||||
F: include/linux/indirect_call_wrapper.h
|
||||
F: include/linux/net.h
|
||||
F: include/linux/netdevice.h
|
||||
F: include/linux/skbuff.h
|
||||
F: include/net/
|
||||
F: include/uapi/linux/in.h
|
||||
F: include/uapi/linux/net.h
|
||||
@ -18556,7 +18557,7 @@ F: drivers/usb/misc/qcom_eud.c
|
||||
QCOM IPA DRIVER
|
||||
M: Alex Elder <elder@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
S: Maintained
|
||||
F: drivers/net/ipa/
|
||||
|
||||
QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
|
||||
|
@ -413,6 +413,7 @@ config BT_ATH3K
|
||||
config BT_MTKSDIO
|
||||
tristate "MediaTek HCI SDIO driver"
|
||||
depends on MMC
|
||||
depends on USB || !BT_HCIBTUSB_MTK
|
||||
select BT_MTK
|
||||
help
|
||||
MediaTek Bluetooth HCI SDIO driver.
|
||||
@ -425,6 +426,7 @@ config BT_MTKSDIO
|
||||
config BT_MTKUART
|
||||
tristate "MediaTek HCI UART driver"
|
||||
depends on SERIAL_DEV_BUS
|
||||
depends on USB || !BT_HCIBTUSB_MTK
|
||||
select BT_MTK
|
||||
help
|
||||
MediaTek Bluetooth HCI UART driver.
|
||||
|
@ -3085,6 +3085,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
btintel_set_dsm_reset_method(hdev, &ver_tlv);
|
||||
|
||||
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
|
||||
if (err)
|
||||
goto exit_error;
|
||||
|
||||
btintel_register_devcoredump_support(hdev);
|
||||
btintel_print_fseq_info(hdev);
|
||||
break;
|
||||
|
@ -437,6 +437,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btmtk_process_coredump);
|
||||
|
||||
#if IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK)
|
||||
static void btmtk_usb_wmt_recv(struct urb *urb)
|
||||
{
|
||||
struct hci_dev *hdev = urb->context;
|
||||
@ -1262,7 +1263,8 @@ int btmtk_usb_suspend(struct hci_dev *hdev)
|
||||
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
|
||||
|
||||
/* Stop urb anchor for iso data transmission */
|
||||
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
|
||||
if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
|
||||
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1487,6 +1489,7 @@ int btmtk_usb_shutdown(struct hci_dev *hdev)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btmtk_usb_shutdown);
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
|
||||
MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
|
||||
|
@ -7649,8 +7649,8 @@ static int bnxt_get_avail_msix(struct bnxt *bp, int num);
|
||||
static int __bnxt_reserve_rings(struct bnxt *bp)
|
||||
{
|
||||
struct bnxt_hw_rings hwr = {0};
|
||||
int rx_rings, old_rx_rings, rc;
|
||||
int cp = bp->cp_nr_rings;
|
||||
int rx_rings, rc;
|
||||
int ulp_msix = 0;
|
||||
bool sh = false;
|
||||
int tx_cp;
|
||||
@ -7684,6 +7684,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
|
||||
hwr.grp = bp->rx_nr_rings;
|
||||
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
|
||||
hwr.stat = bnxt_get_func_stat_ctxs(bp);
|
||||
old_rx_rings = bp->hw_resc.resv_rx_rings;
|
||||
|
||||
rc = bnxt_hwrm_reserve_rings(bp, &hwr);
|
||||
if (rc)
|
||||
@ -7738,7 +7739,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
|
||||
if (!bnxt_rings_ok(bp, &hwr))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!netif_is_rxfh_configured(bp->dev))
|
||||
if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
|
||||
!netif_is_rxfh_configured(bp->dev))
|
||||
bnxt_set_dflt_rss_indir_tbl(bp, NULL);
|
||||
|
||||
if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
|
||||
|
@ -1863,8 +1863,14 @@ static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
|
||||
}
|
||||
|
||||
static int bnxt_rxfh_context_check(struct bnxt *bp,
|
||||
const struct ethtool_rxfh_param *rxfh,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
|
||||
return -EOPNOTSUPP;
|
||||
@ -1888,7 +1894,7 @@ static int bnxt_create_rxfh_context(struct net_device *dev,
|
||||
struct bnxt_vnic_info *vnic;
|
||||
int rc;
|
||||
|
||||
rc = bnxt_rxfh_context_check(bp, extack);
|
||||
rc = bnxt_rxfh_context_check(bp, rxfh, extack);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -1915,8 +1921,12 @@ static int bnxt_create_rxfh_context(struct net_device *dev,
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/* Populate defaults in the context */
|
||||
bnxt_set_dflt_rss_indir_tbl(bp, ctx);
|
||||
ctx->hfunc = ETH_RSS_HASH_TOP;
|
||||
memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
|
||||
memcpy(ethtool_rxfh_context_key(ctx),
|
||||
bp->rss_hash_key, HW_HASH_KEY_SIZE);
|
||||
|
||||
rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
|
||||
if (rc) {
|
||||
@ -1953,7 +1963,7 @@ static int bnxt_modify_rxfh_context(struct net_device *dev,
|
||||
struct bnxt_rss_ctx *rss_ctx;
|
||||
int rc;
|
||||
|
||||
rc = bnxt_rxfh_context_check(bp, extack);
|
||||
rc = bnxt_rxfh_context_check(bp, rxfh, extack);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xsk_pool - get XSK buffer pool bound to a ring
|
||||
* ice_rx_xsk_pool - assign XSK buff pool to Rx ring
|
||||
* @ring: Rx ring to use
|
||||
*
|
||||
* Returns a pointer to xsk_buff_pool structure if there is a buffer pool
|
||||
* present, NULL otherwise.
|
||||
* Sets XSK buff pool pointer on Rx ring.
|
||||
*/
|
||||
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
|
||||
static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
|
||||
{
|
||||
struct ice_vsi *vsi = ring->vsi;
|
||||
u16 qid = ring->q_index;
|
||||
|
||||
return ice_get_xp_from_qid(vsi, qid);
|
||||
WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
|
||||
if (!ring)
|
||||
return;
|
||||
|
||||
ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
|
||||
WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||||
return err;
|
||||
}
|
||||
|
||||
ring->xsk_pool = ice_xsk_pool(ring);
|
||||
ice_rx_xsk_pool(ring);
|
||||
if (ring->xsk_pool) {
|
||||
xdp_rxq_info_unreg(&ring->xdp_rxq);
|
||||
|
||||
@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
|
||||
ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
|
||||
if (!ok) {
|
||||
u16 pf_q = ring->vsi->rxq_map[ring->q_index];
|
||||
|
||||
|
@ -2948,7 +2948,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
|
||||
ice_for_each_rxq(vsi, i) {
|
||||
struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
|
||||
|
||||
if (rx_ring->xsk_pool)
|
||||
if (READ_ONCE(rx_ring->xsk_pool))
|
||||
napi_schedule(&rx_ring->q_vector->napi);
|
||||
}
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
if (rx_ring->vsi->type == ICE_VSI_PF)
|
||||
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
|
||||
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
|
||||
rx_ring->xdp_prog = NULL;
|
||||
WRITE_ONCE(rx_ring->xdp_prog, NULL);
|
||||
if (rx_ring->xsk_pool) {
|
||||
kfree(rx_ring->xdp_buf);
|
||||
rx_ring->xdp_buf = NULL;
|
||||
@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
|
||||
* budget and be more aggressive about cleaning up the Tx descriptors.
|
||||
*/
|
||||
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
|
||||
struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
|
||||
bool wd;
|
||||
|
||||
if (tx_ring->xsk_pool)
|
||||
wd = ice_xmit_zc(tx_ring);
|
||||
if (xsk_pool)
|
||||
wd = ice_xmit_zc(tx_ring, xsk_pool);
|
||||
else if (ice_ring_is_xdp(tx_ring))
|
||||
wd = true;
|
||||
else
|
||||
@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
|
||||
budget_per_ring = budget;
|
||||
|
||||
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
|
||||
struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
|
||||
int cleaned;
|
||||
|
||||
/* A dedicated path for zero-copy allows making a single
|
||||
@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
|
||||
* ice_clean_rx_irq function and makes the codebase cleaner.
|
||||
*/
|
||||
cleaned = rx_ring->xsk_pool ?
|
||||
ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
|
||||
ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
|
||||
ice_clean_rx_irq(rx_ring, budget_per_ring);
|
||||
work_done += cleaned;
|
||||
/* if we clean as many as budgeted, we must not be done */
|
||||
|
@ -52,10 +52,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
|
||||
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
|
||||
{
|
||||
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
|
||||
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||
synchronize_rcu();
|
||||
if (ice_is_xdp_ena_vsi(vsi))
|
||||
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
|
||||
}
|
||||
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
|
||||
}
|
||||
|
||||
@ -112,25 +110,29 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
|
||||
* ice_qvec_cfg_msix - Enable IRQ for given queue vector
|
||||
* @vsi: the VSI that contains queue vector
|
||||
* @q_vector: queue vector
|
||||
* @qid: queue index
|
||||
*/
|
||||
static void
|
||||
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
|
||||
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
|
||||
{
|
||||
u16 reg_idx = q_vector->reg_idx;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_tx_ring *tx_ring;
|
||||
struct ice_rx_ring *rx_ring;
|
||||
int q, _qid = qid;
|
||||
|
||||
ice_cfg_itr(hw, q_vector);
|
||||
|
||||
ice_for_each_tx_ring(tx_ring, q_vector->tx)
|
||||
ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
|
||||
q_vector->tx.itr_idx);
|
||||
for (q = 0; q < q_vector->num_ring_tx; q++) {
|
||||
ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx);
|
||||
_qid++;
|
||||
}
|
||||
|
||||
ice_for_each_rx_ring(rx_ring, q_vector->rx)
|
||||
ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
|
||||
q_vector->rx.itr_idx);
|
||||
_qid = qid;
|
||||
|
||||
for (q = 0; q < q_vector->num_ring_rx; q++) {
|
||||
ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx);
|
||||
_qid++;
|
||||
}
|
||||
|
||||
ice_flush(hw);
|
||||
}
|
||||
@ -164,6 +166,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
struct ice_tx_ring *tx_ring;
|
||||
struct ice_rx_ring *rx_ring;
|
||||
int timeout = 50;
|
||||
int fail = 0;
|
||||
int err;
|
||||
|
||||
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
|
||||
@ -180,15 +183,17 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
synchronize_net();
|
||||
netif_carrier_off(vsi->netdev);
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
|
||||
|
||||
ice_qvec_dis_irq(vsi, rx_ring, q_vector);
|
||||
ice_qvec_toggle_napi(vsi, q_vector, false);
|
||||
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
|
||||
|
||||
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
|
||||
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
|
||||
if (err)
|
||||
return err;
|
||||
if (!fail)
|
||||
fail = err;
|
||||
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
|
||||
|
||||
@ -196,17 +201,15 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
|
||||
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
|
||||
&txq_meta);
|
||||
if (err)
|
||||
return err;
|
||||
if (!fail)
|
||||
fail = err;
|
||||
}
|
||||
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
|
||||
ice_qp_clean_rings(vsi, q_idx);
|
||||
ice_qp_reset_stats(vsi, q_idx);
|
||||
|
||||
return 0;
|
||||
return fail;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -219,40 +222,48 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
|
||||
{
|
||||
struct ice_q_vector *q_vector;
|
||||
int fail = 0;
|
||||
bool link_up;
|
||||
int err;
|
||||
|
||||
err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
|
||||
if (err)
|
||||
return err;
|
||||
if (!fail)
|
||||
fail = err;
|
||||
|
||||
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
|
||||
|
||||
err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
|
||||
if (err)
|
||||
return err;
|
||||
if (!fail)
|
||||
fail = err;
|
||||
ice_set_ring_xdp(xdp_ring);
|
||||
ice_tx_xsk_pool(vsi, q_idx);
|
||||
}
|
||||
|
||||
err = ice_vsi_cfg_single_rxq(vsi, q_idx);
|
||||
if (err)
|
||||
return err;
|
||||
if (!fail)
|
||||
fail = err;
|
||||
|
||||
q_vector = vsi->rx_rings[q_idx]->q_vector;
|
||||
ice_qvec_cfg_msix(vsi, q_vector);
|
||||
ice_qvec_cfg_msix(vsi, q_vector, q_idx);
|
||||
|
||||
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
|
||||
if (err)
|
||||
return err;
|
||||
if (!fail)
|
||||
fail = err;
|
||||
|
||||
ice_qvec_toggle_napi(vsi, q_vector, true);
|
||||
ice_qvec_ena_irq(vsi, q_vector);
|
||||
|
||||
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
|
||||
/* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
|
||||
synchronize_net();
|
||||
ice_get_link_status(vsi->port_info, &link_up);
|
||||
if (link_up) {
|
||||
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
|
||||
netif_carrier_on(vsi->netdev);
|
||||
}
|
||||
clear_bit(ICE_CFG_BUSY, vsi->state);
|
||||
|
||||
return 0;
|
||||
return fail;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -459,6 +470,7 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
|
||||
/**
|
||||
* __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
|
||||
* @rx_ring: Rx ring
|
||||
* @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
|
||||
* @count: The number of buffers to allocate
|
||||
*
|
||||
* Place the @count of descriptors onto Rx ring. Handle the ring wrap
|
||||
@ -467,7 +479,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
|
||||
*
|
||||
* Returns true if all allocations were successful, false if any fail.
|
||||
*/
|
||||
static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
|
||||
struct xsk_buff_pool *xsk_pool, u16 count)
|
||||
{
|
||||
u32 nb_buffs_extra = 0, nb_buffs = 0;
|
||||
union ice_32b_rx_flex_desc *rx_desc;
|
||||
@ -479,8 +492,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
xdp = ice_xdp_buf(rx_ring, ntu);
|
||||
|
||||
if (ntu + count >= rx_ring->count) {
|
||||
nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
|
||||
rx_desc,
|
||||
nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc,
|
||||
rx_ring->count - ntu);
|
||||
if (nb_buffs_extra != rx_ring->count - ntu) {
|
||||
ntu += nb_buffs_extra;
|
||||
@ -493,7 +505,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
ice_release_rx_desc(rx_ring, 0);
|
||||
}
|
||||
|
||||
nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
|
||||
nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
|
||||
|
||||
ntu += nb_buffs;
|
||||
if (ntu == rx_ring->count)
|
||||
@ -509,6 +521,7 @@ exit:
|
||||
/**
|
||||
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
|
||||
* @rx_ring: Rx ring
|
||||
* @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
|
||||
* @count: The number of buffers to allocate
|
||||
*
|
||||
* Wrapper for internal allocation routine; figure out how many tail
|
||||
@ -516,7 +529,8 @@ exit:
|
||||
*
|
||||
* Returns true if all calls to internal alloc routine succeeded
|
||||
*/
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
|
||||
struct xsk_buff_pool *xsk_pool, u16 count)
|
||||
{
|
||||
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
|
||||
u16 leftover, i, tail_bumps;
|
||||
@ -525,9 +539,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
|
||||
leftover = count - (tail_bumps * rx_thresh);
|
||||
|
||||
for (i = 0; i < tail_bumps; i++)
|
||||
if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
|
||||
if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh))
|
||||
return false;
|
||||
return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
|
||||
return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -596,8 +610,10 @@ out:
|
||||
/**
|
||||
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
|
||||
* @xdp_ring: XDP Tx ring
|
||||
* @xsk_pool: AF_XDP buffer pool pointer
|
||||
*/
|
||||
static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
|
||||
static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
|
||||
struct xsk_buff_pool *xsk_pool)
|
||||
{
|
||||
u16 ntc = xdp_ring->next_to_clean;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
@ -648,7 +664,7 @@ skip:
|
||||
if (xdp_ring->next_to_clean >= cnt)
|
||||
xdp_ring->next_to_clean -= cnt;
|
||||
if (xsk_frames)
|
||||
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
|
||||
xsk_tx_completed(xsk_pool, xsk_frames);
|
||||
|
||||
return completed_frames;
|
||||
}
|
||||
@ -657,6 +673,7 @@ skip:
|
||||
* ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
|
||||
* @xdp: XDP buffer to xmit
|
||||
* @xdp_ring: XDP ring to produce descriptor onto
|
||||
* @xsk_pool: AF_XDP buffer pool pointer
|
||||
*
|
||||
* note that this function works directly on xdp_buff, no need to convert
|
||||
* it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
|
||||
@ -666,7 +683,8 @@ skip:
|
||||
* was not enough space on XDP ring
|
||||
*/
|
||||
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
|
||||
struct ice_tx_ring *xdp_ring)
|
||||
struct ice_tx_ring *xdp_ring,
|
||||
struct xsk_buff_pool *xsk_pool)
|
||||
{
|
||||
struct skb_shared_info *sinfo = NULL;
|
||||
u32 size = xdp->data_end - xdp->data;
|
||||
@ -680,7 +698,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
|
||||
|
||||
free_space = ICE_DESC_UNUSED(xdp_ring);
|
||||
if (free_space < ICE_RING_QUARTER(xdp_ring))
|
||||
free_space += ice_clean_xdp_irq_zc(xdp_ring);
|
||||
free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
|
||||
|
||||
if (unlikely(!free_space))
|
||||
goto busy;
|
||||
@ -700,7 +718,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
|
||||
dma_addr_t dma;
|
||||
|
||||
dma = xsk_buff_xdp_get_dma(xdp);
|
||||
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
|
||||
xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size);
|
||||
|
||||
tx_buf->xdp = xdp;
|
||||
tx_buf->type = ICE_TX_BUF_XSK_TX;
|
||||
@ -742,12 +760,14 @@ busy:
|
||||
* @xdp: xdp_buff used as input to the XDP program
|
||||
* @xdp_prog: XDP program to run
|
||||
* @xdp_ring: ring to be used for XDP_TX action
|
||||
* @xsk_pool: AF_XDP buffer pool pointer
|
||||
*
|
||||
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
|
||||
*/
|
||||
static int
|
||||
ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
|
||||
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
|
||||
struct xsk_buff_pool *xsk_pool)
|
||||
{
|
||||
int err, result = ICE_XDP_PASS;
|
||||
u32 act;
|
||||
@ -758,7 +778,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
|
||||
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
|
||||
if (!err)
|
||||
return ICE_XDP_REDIR;
|
||||
if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
|
||||
if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
|
||||
result = ICE_XDP_EXIT;
|
||||
else
|
||||
result = ICE_XDP_CONSUMED;
|
||||
@ -769,7 +789,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
|
||||
case XDP_PASS:
|
||||
break;
|
||||
case XDP_TX:
|
||||
result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
|
||||
result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool);
|
||||
if (result == ICE_XDP_CONSUMED)
|
||||
goto out_failure;
|
||||
break;
|
||||
@ -821,14 +841,16 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
|
||||
/**
|
||||
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
|
||||
* @rx_ring: AF_XDP Rx ring
|
||||
* @xsk_pool: AF_XDP buffer pool pointer
|
||||
* @budget: NAPI budget
|
||||
*
|
||||
* Returns number of processed packets on success, remaining budget on failure.
|
||||
*/
|
||||
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
|
||||
struct xsk_buff_pool *xsk_pool,
|
||||
int budget)
|
||||
{
|
||||
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
||||
struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
|
||||
u32 ntc = rx_ring->next_to_clean;
|
||||
u32 ntu = rx_ring->next_to_use;
|
||||
struct xdp_buff *first = NULL;
|
||||
@ -891,7 +913,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
|
||||
if (ice_is_non_eop(rx_ring, rx_desc))
|
||||
continue;
|
||||
|
||||
xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
|
||||
xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
|
||||
xsk_pool);
|
||||
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
|
||||
xdp_xmit |= xdp_res;
|
||||
} else if (xdp_res == ICE_XDP_EXIT) {
|
||||
@ -940,7 +963,8 @@ construct_skb:
|
||||
rx_ring->next_to_clean = ntc;
|
||||
entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
|
||||
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
|
||||
failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
|
||||
failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
|
||||
entries_to_alloc);
|
||||
|
||||
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
|
||||
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
|
||||
@ -963,17 +987,19 @@ construct_skb:
|
||||
/**
|
||||
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
|
||||
* @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
|
||||
* @desc: AF_XDP descriptor to pull the DMA address and length from
|
||||
* @total_bytes: bytes accumulator that will be used for stats update
|
||||
*/
|
||||
static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
|
||||
static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
|
||||
struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
|
||||
unsigned int *total_bytes)
|
||||
{
|
||||
struct ice_tx_desc *tx_desc;
|
||||
dma_addr_t dma;
|
||||
|
||||
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
|
||||
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
|
||||
dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr);
|
||||
xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len);
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
|
||||
tx_desc->buf_addr = cpu_to_le64(dma);
|
||||
@ -986,10 +1012,13 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
|
||||
/**
|
||||
* ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
|
||||
* @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
|
||||
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
|
||||
* @total_bytes: bytes accumulator that will be used for stats update
|
||||
*/
|
||||
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
|
||||
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
|
||||
struct xsk_buff_pool *xsk_pool,
|
||||
struct xdp_desc *descs,
|
||||
unsigned int *total_bytes)
|
||||
{
|
||||
u16 ntu = xdp_ring->next_to_use;
|
||||
@ -999,8 +1028,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
|
||||
loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
|
||||
dma_addr_t dma;
|
||||
|
||||
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
|
||||
xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
|
||||
dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
|
||||
xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
|
||||
|
||||
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
|
||||
tx_desc->buf_addr = cpu_to_le64(dma);
|
||||
@ -1016,60 +1045,69 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
|
||||
/**
|
||||
* ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
|
||||
* @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
|
||||
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
|
||||
* @nb_pkts: count of packets to be send
|
||||
* @total_bytes: bytes accumulator that will be used for stats update
|
||||
*/
|
||||
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
|
||||
u32 nb_pkts, unsigned int *total_bytes)
|
||||
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
|
||||
struct xsk_buff_pool *xsk_pool,
|
||||
struct xdp_desc *descs, u32 nb_pkts,
|
||||
unsigned int *total_bytes)
|
||||
{
|
||||
u32 batched, leftover, i;
|
||||
|
||||
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
|
||||
leftover = nb_pkts & (PKTS_PER_BATCH - 1);
|
||||
for (i = 0; i < batched; i += PKTS_PER_BATCH)
|
||||
ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
|
||||
ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes);
|
||||
for (; i < batched + leftover; i++)
|
||||
ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
|
||||
ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
|
||||
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
|
||||
* @xsk_pool: AF_XDP buffer pool pointer
|
||||
*
|
||||
* Returns true if there is no more work that needs to be done, false otherwise
|
||||
*/
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
|
||||
{
|
||||
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
|
||||
struct xdp_desc *descs = xsk_pool->tx_descs;
|
||||
u32 nb_pkts, nb_processed = 0;
|
||||
unsigned int total_bytes = 0;
|
||||
int budget;
|
||||
|
||||
ice_clean_xdp_irq_zc(xdp_ring);
|
||||
ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
|
||||
|
||||
if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
|
||||
!netif_running(xdp_ring->vsi->netdev))
|
||||
return true;
|
||||
|
||||
budget = ICE_DESC_UNUSED(xdp_ring);
|
||||
budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
|
||||
|
||||
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
|
||||
nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
|
||||
if (!nb_pkts)
|
||||
return true;
|
||||
|
||||
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
|
||||
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
|
||||
ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
|
||||
ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed,
|
||||
&total_bytes);
|
||||
xdp_ring->next_to_use = 0;
|
||||
}
|
||||
|
||||
ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
|
||||
&total_bytes);
|
||||
ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed],
|
||||
nb_pkts - nb_processed, &total_bytes);
|
||||
|
||||
ice_set_rs_bit(xdp_ring);
|
||||
ice_xdp_ring_update_tail(xdp_ring);
|
||||
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
|
||||
|
||||
if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
|
||||
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
|
||||
if (xsk_uses_need_wakeup(xsk_pool))
|
||||
xsk_set_tx_need_wakeup(xsk_pool);
|
||||
|
||||
return nb_pkts < budget;
|
||||
}
|
||||
@ -1091,7 +1129,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_tx_ring *ring;
|
||||
|
||||
if (test_bit(ICE_VSI_DOWN, vsi->state))
|
||||
if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (!ice_is_xdp_ena_vsi(vsi))
|
||||
@ -1102,7 +1140,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
|
||||
|
||||
ring = vsi->rx_rings[queue_id]->xdp_ring;
|
||||
|
||||
if (!ring->xsk_pool)
|
||||
if (!READ_ONCE(ring->xsk_pool))
|
||||
return -EINVAL;
|
||||
|
||||
/* The idea here is that if NAPI is running, mark a miss, so
|
||||
|
@ -20,16 +20,20 @@ struct ice_vsi;
|
||||
#ifdef CONFIG_XDP_SOCKETS
|
||||
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
|
||||
u16 qid);
|
||||
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
|
||||
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
|
||||
struct xsk_buff_pool *xsk_pool,
|
||||
int budget);
|
||||
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
|
||||
struct xsk_buff_pool *xsk_pool, u16 count);
|
||||
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
|
||||
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
|
||||
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
|
||||
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
|
||||
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
|
||||
#else
|
||||
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
|
||||
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
|
||||
struct xsk_buff_pool __always_unused *xsk_pool)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
|
||||
|
||||
static inline int
|
||||
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
|
||||
struct xsk_buff_pool __always_unused *xsk_pool,
|
||||
int __always_unused budget)
|
||||
{
|
||||
return 0;
|
||||
@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
|
||||
|
||||
static inline bool
|
||||
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
|
||||
struct xsk_buff_pool __always_unused *xsk_pool,
|
||||
u16 __always_unused count)
|
||||
{
|
||||
return false;
|
||||
|
@ -6306,21 +6306,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
|
||||
size_t n;
|
||||
int i;
|
||||
|
||||
switch (qopt->cmd) {
|
||||
case TAPRIO_CMD_REPLACE:
|
||||
break;
|
||||
case TAPRIO_CMD_DESTROY:
|
||||
return igc_tsn_clear_schedule(adapter);
|
||||
case TAPRIO_CMD_STATS:
|
||||
igc_taprio_stats(adapter->netdev, &qopt->stats);
|
||||
return 0;
|
||||
case TAPRIO_CMD_QUEUE_STATS:
|
||||
igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (qopt->base_time < 0)
|
||||
return -ERANGE;
|
||||
|
||||
@ -6429,7 +6414,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
|
||||
if (hw->mac.type != igc_i225)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = igc_save_qbv_schedule(adapter, qopt);
|
||||
switch (qopt->cmd) {
|
||||
case TAPRIO_CMD_REPLACE:
|
||||
err = igc_save_qbv_schedule(adapter, qopt);
|
||||
break;
|
||||
case TAPRIO_CMD_DESTROY:
|
||||
err = igc_tsn_clear_schedule(adapter);
|
||||
break;
|
||||
case TAPRIO_CMD_STATS:
|
||||
igc_taprio_stats(adapter->netdev, &qopt->stats);
|
||||
return 0;
|
||||
case TAPRIO_CMD_QUEUE_STATS:
|
||||
igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
|
||||
static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
|
||||
{
|
||||
struct mvpp2_port *port;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < priv->port_count; i++) {
|
||||
port = priv->port_list[i];
|
||||
if (port->priv->percpu_pools) {
|
||||
for (i = 0; i < port->nrxqs; i++)
|
||||
mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
|
||||
for (j = 0; j < port->nrxqs; j++)
|
||||
mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
|
||||
port->tx_fc & en);
|
||||
} else {
|
||||
mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
|
||||
|
@ -932,6 +932,7 @@ err_rule:
|
||||
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
|
||||
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
|
||||
err_mod_hdr:
|
||||
*attr = *old_attr;
|
||||
kfree(old_attr);
|
||||
err_attr:
|
||||
kvfree(spec);
|
||||
|
@ -51,9 +51,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
|
||||
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
|
||||
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
|
||||
|
||||
if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
|
||||
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
|
||||
if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
|
||||
((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
|
||||
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
|
||||
caps |= MLX5_IPSEC_CAP_PRIO;
|
||||
|
||||
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
|
||||
|
@ -1409,7 +1409,12 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
||||
if (!an_changes && link_modes == eproto.admin)
|
||||
goto out;
|
||||
|
||||
mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
|
||||
err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
|
||||
if (err) {
|
||||
netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mlx5_toggle_port_link(mdev);
|
||||
|
||||
out:
|
||||
|
@ -207,6 +207,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
|
||||
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
|
||||
/* if this is the driver that initiated the fw reset, devlink completed the reload */
|
||||
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
|
||||
@ -218,9 +219,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
|
||||
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
|
||||
else
|
||||
mlx5_load_one(dev, true);
|
||||
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
|
||||
devl_lock(devlink);
|
||||
devlink_remote_reload_actions_performed(devlink, 0,
|
||||
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
|
||||
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
|
||||
devl_unlock(devlink);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -48,6 +48,7 @@ static struct mlx5_irq *
|
||||
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
|
||||
{
|
||||
struct irq_affinity_desc auto_desc = {};
|
||||
struct mlx5_irq *irq;
|
||||
u32 irq_index;
|
||||
int err;
|
||||
|
||||
@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
|
||||
else
|
||||
cpu_get(pool, cpumask_first(&af_desc->mask));
|
||||
}
|
||||
return mlx5_irq_alloc(pool, irq_index,
|
||||
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
|
||||
NULL);
|
||||
irq = mlx5_irq_alloc(pool, irq_index,
|
||||
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
|
||||
NULL);
|
||||
if (IS_ERR(irq))
|
||||
xa_erase(&pool->irqs, irq_index);
|
||||
return irq;
|
||||
}
|
||||
|
||||
/* Looking for the IRQ with the smallest refcount that fits req_mask.
|
||||
|
@ -1538,7 +1538,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
|
||||
goto unlock;
|
||||
|
||||
for (i = 0; i < ldev->ports; i++) {
|
||||
if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
|
||||
if (ldev->pf[i].netdev == slave) {
|
||||
port = i;
|
||||
break;
|
||||
}
|
||||
|
@ -2142,7 +2142,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
|
||||
/* Panic tear down fw command will stop the PCI bus communication
|
||||
* with the HCA, so the health poll is no longer needed.
|
||||
*/
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_stop_health_poll(dev, false);
|
||||
|
||||
ret = mlx5_cmd_fast_teardown_hca(dev);
|
||||
@ -2177,6 +2176,7 @@ static void shutdown(struct pci_dev *pdev)
|
||||
|
||||
mlx5_core_info(dev, "Shutdown was called\n");
|
||||
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
|
||||
mlx5_drain_health_wq(dev);
|
||||
err = mlx5_try_fast_unload(dev);
|
||||
if (err)
|
||||
mlx5_unload_one(dev, false);
|
||||
|
@ -112,6 +112,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
|
||||
struct mlx5_core_dev *mdev = sf_dev->mdev;
|
||||
|
||||
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
|
||||
mlx5_drain_health_wq(mdev);
|
||||
mlx5_unload_one(mdev, false);
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
/* don't try to optimize STE allocation if the stack is too constaraining */
|
||||
#define DR_RULE_MAX_STES_OPTIMIZED 0
|
||||
#else
|
||||
#define DR_RULE_MAX_STES_OPTIMIZED 5
|
||||
#define DR_RULE_MAX_STES_OPTIMIZED 2
|
||||
#endif
|
||||
#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
|
||||
|
||||
|
@ -20,7 +20,7 @@ if NET_VENDOR_META
|
||||
config FBNIC
|
||||
tristate "Meta Platforms Host Network Interface"
|
||||
depends on X86_64 || COMPILE_TEST
|
||||
depends on S390=n
|
||||
depends on !S390
|
||||
depends on MAX_SKB_FRAGS < 22
|
||||
depends on PCI_MSI
|
||||
select PHYLINK
|
||||
|
@ -4349,7 +4349,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
||||
if (unlikely(!rtl_tx_slots_avail(tp))) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
|
||||
goto err_stop_0;
|
||||
netif_stop_queue(dev);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
opts[1] = rtl8169_tx_vlan_tag(skb);
|
||||
@ -4405,11 +4406,6 @@ err_dma_0:
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
err_stop_0:
|
||||
netif_stop_queue(dev);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
static unsigned int rtl_last_frag_len(struct sk_buff *skb)
|
||||
|
@ -2219,9 +2219,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
|
||||
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
|
||||
axienet_set_mac_address(ndev, NULL);
|
||||
axienet_set_multicast_list(ndev);
|
||||
axienet_setoptions(ndev, lp->options);
|
||||
napi_enable(&lp->napi_rx);
|
||||
napi_enable(&lp->napi_tx);
|
||||
axienet_setoptions(ndev, lp->options);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -653,13 +653,7 @@ static int aqr107_fill_interface_modes(struct phy_device *phydev)
|
||||
unsigned long *possible = phydev->possible_interfaces;
|
||||
unsigned int serdes_mode, rate_adapt;
|
||||
phy_interface_t interface;
|
||||
int i, val, ret;
|
||||
|
||||
ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
|
||||
VEND1_GLOBAL_CFG_10M, val, val != 0,
|
||||
1000, 100000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
int i, val;
|
||||
|
||||
/* Walk the media-speed configuration registers to determine which
|
||||
* host-side serdes modes may be used by the PHY depending on the
|
||||
@ -708,6 +702,25 @@ static int aqr107_fill_interface_modes(struct phy_device *phydev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aqr113c_fill_interface_modes(struct phy_device *phydev)
|
||||
{
|
||||
int val, ret;
|
||||
|
||||
/* It's been observed on some models that - when coming out of suspend
|
||||
* - the FW signals that the PHY is ready but the GLOBAL_CFG registers
|
||||
* continue on returning zeroes for some time. Let's poll the 100M
|
||||
* register until it returns a real value as both 113c and 115c support
|
||||
* this mode.
|
||||
*/
|
||||
ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
|
||||
VEND1_GLOBAL_CFG_100M, val, val != 0,
|
||||
1000, 100000, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return aqr107_fill_interface_modes(phydev);
|
||||
}
|
||||
|
||||
static int aqr113c_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
@ -725,7 +738,7 @@ static int aqr113c_config_init(struct phy_device *phydev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return aqr107_fill_interface_modes(phydev);
|
||||
return aqr113c_fill_interface_modes(phydev);
|
||||
}
|
||||
|
||||
static int aqr107_probe(struct phy_device *phydev)
|
||||
|
@ -1389,6 +1389,8 @@ static int ksz9131_config_init(struct phy_device *phydev)
|
||||
const struct device *dev_walker;
|
||||
int ret;
|
||||
|
||||
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
|
||||
|
||||
dev_walker = &phydev->mdio.dev;
|
||||
do {
|
||||
of_node = dev_walker->of_node;
|
||||
@ -1438,28 +1440,30 @@ static int ksz9131_config_init(struct phy_device *phydev)
|
||||
#define MII_KSZ9131_AUTO_MDIX 0x1C
|
||||
#define MII_KSZ9131_AUTO_MDI_SET BIT(7)
|
||||
#define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6)
|
||||
#define MII_KSZ9131_DIG_AXAN_STS 0x14
|
||||
#define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14)
|
||||
#define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12)
|
||||
|
||||
static int ksz9131_mdix_update(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) {
|
||||
if (ret & MII_KSZ9131_AUTO_MDI_SET)
|
||||
phydev->mdix_ctrl = ETH_TP_MDI;
|
||||
else
|
||||
phydev->mdix_ctrl = ETH_TP_MDI_X;
|
||||
if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) {
|
||||
phydev->mdix = phydev->mdix_ctrl;
|
||||
} else {
|
||||
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
|
||||
}
|
||||
ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret & MII_KSZ9131_AUTO_MDI_SET)
|
||||
phydev->mdix = ETH_TP_MDI;
|
||||
else
|
||||
phydev->mdix = ETH_TP_MDI_X;
|
||||
if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) {
|
||||
if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT)
|
||||
phydev->mdix = ETH_TP_MDI;
|
||||
else
|
||||
phydev->mdix = ETH_TP_MDI_X;
|
||||
} else {
|
||||
phydev->mdix = ETH_TP_MDI_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1465,6 +1465,13 @@ static struct phy_driver realtek_drvs[] = {
|
||||
.handle_interrupt = genphy_handle_interrupt_no_ack,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
}, {
|
||||
PHY_ID_MATCH_EXACT(0x001cc960),
|
||||
.name = "RTL8366S Gigabit Ethernet",
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
.read_mmd = genphy_read_mmd_unsupported,
|
||||
.write_mmd = genphy_write_mmd_unsupported,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
|
||||
struct usbnet *dev = netdev_priv(netdev);
|
||||
__le16 res;
|
||||
int rc = 0;
|
||||
int err;
|
||||
|
||||
if (phy_id) {
|
||||
netdev_dbg(netdev, "Only internal phy supported\n");
|
||||
@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
|
||||
if (loc == MII_BMSR) {
|
||||
u8 value;
|
||||
|
||||
sr_read_reg(dev, SR_NSR, &value);
|
||||
err = sr_read_reg(dev, SR_NSR, &value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (value & NSR_LINKST)
|
||||
rc = 1;
|
||||
}
|
||||
sr_share_read_word(dev, 1, loc, &res);
|
||||
err = sr_share_read_word(dev, 1, loc, &res);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (rc == 1)
|
||||
res = le16_to_cpu(res) | BMSR_LSTATUS;
|
||||
else
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/hdlc.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
@ -37,7 +38,7 @@ struct qmc_hdlc {
|
||||
struct qmc_chan *qmc_chan;
|
||||
struct net_device *netdev;
|
||||
struct framer *framer;
|
||||
spinlock_t carrier_lock; /* Protect carrier detection */
|
||||
struct mutex carrier_lock; /* Protect carrier detection */
|
||||
struct notifier_block nb;
|
||||
bool is_crc32;
|
||||
spinlock_t tx_lock; /* Protect tx descriptors */
|
||||
@ -60,7 +61,7 @@ static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
|
||||
if (!qmc_hdlc->framer)
|
||||
return 0;
|
||||
|
||||
guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
|
||||
guard(mutex)(&qmc_hdlc->carrier_lock);
|
||||
|
||||
ret = framer_get_status(qmc_hdlc->framer, &framer_status);
|
||||
if (ret) {
|
||||
@ -249,6 +250,7 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
|
||||
struct qmc_hdlc_desc *desc = context;
|
||||
struct net_device *netdev;
|
||||
struct qmc_hdlc *qmc_hdlc;
|
||||
size_t crc_size;
|
||||
int ret;
|
||||
|
||||
netdev = desc->netdev;
|
||||
@ -267,15 +269,26 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
|
||||
if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
|
||||
netdev->stats.rx_crc_errors++;
|
||||
kfree_skb(desc->skb);
|
||||
} else {
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += length;
|
||||
|
||||
skb_put(desc->skb, length);
|
||||
desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
|
||||
netif_rx(desc->skb);
|
||||
goto re_queue;
|
||||
}
|
||||
|
||||
/* Discard the CRC */
|
||||
crc_size = qmc_hdlc->is_crc32 ? 4 : 2;
|
||||
if (length < crc_size) {
|
||||
netdev->stats.rx_length_errors++;
|
||||
kfree_skb(desc->skb);
|
||||
goto re_queue;
|
||||
}
|
||||
length -= crc_size;
|
||||
|
||||
netdev->stats.rx_packets++;
|
||||
netdev->stats.rx_bytes += length;
|
||||
|
||||
skb_put(desc->skb, length);
|
||||
desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
|
||||
netif_rx(desc->skb);
|
||||
|
||||
re_queue:
|
||||
/* Re-queue a transfer using the same descriptor */
|
||||
ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
|
||||
if (ret) {
|
||||
@ -706,7 +719,7 @@ static int qmc_hdlc_probe(struct platform_device *pdev)
|
||||
|
||||
qmc_hdlc->dev = dev;
|
||||
spin_lock_init(&qmc_hdlc->tx_lock);
|
||||
spin_lock_init(&qmc_hdlc->carrier_lock);
|
||||
mutex_init(&qmc_hdlc->carrier_lock);
|
||||
|
||||
qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
|
||||
if (IS_ERR(qmc_hdlc->qmc_chan))
|
||||
|
@ -473,7 +473,8 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
|
||||
{
|
||||
int i;
|
||||
|
||||
clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
|
||||
if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
|
||||
return;
|
||||
|
||||
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
|
||||
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
|
||||
|
@ -361,7 +361,7 @@ static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
|
||||
struct ath12k *ar = arvif->ar;
|
||||
unsigned long wow_mask = 0;
|
||||
int pattern_id = 0;
|
||||
int ret, i;
|
||||
int ret, i, j;
|
||||
|
||||
/* Setup requested WOW features */
|
||||
switch (arvif->vdev_type) {
|
||||
@ -431,9 +431,9 @@ static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
|
||||
eth_pattern->pattern_len);
|
||||
|
||||
/* convert bitmask to bytemask */
|
||||
for (i = 0; i < eth_pattern->pattern_len; i++)
|
||||
if (eth_pattern->mask[i / 8] & BIT(i % 8))
|
||||
new_pattern.bytemask[i] = 0xff;
|
||||
for (j = 0; j < eth_pattern->pattern_len; j++)
|
||||
if (eth_pattern->mask[j / 8] & BIT(j % 8))
|
||||
new_pattern.bytemask[j] = 0xff;
|
||||
|
||||
new_pattern.pattern_len = eth_pattern->pattern_len;
|
||||
new_pattern.pkt_offset = eth_pattern->pkt_offset;
|
||||
|
@ -303,6 +303,7 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
|
||||
mvif->bss_conf.mt76.omac_idx = mvif->bss_conf.mt76.idx;
|
||||
mvif->phy = phy;
|
||||
mvif->bss_conf.vif = mvif;
|
||||
mvif->bss_conf.mt76.band_idx = 0;
|
||||
mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
|
||||
|
||||
|
@ -56,7 +56,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
unsigned int thlen = 0;
|
||||
unsigned int p_off = 0;
|
||||
unsigned int ip_proto;
|
||||
u64 ret, remainder, gso_size;
|
||||
|
||||
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
|
||||
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
|
||||
@ -99,16 +98,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
|
||||
u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
|
||||
|
||||
if (hdr->gso_size) {
|
||||
gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
|
||||
ret = div64_u64_rem(skb->len, gso_size, &remainder);
|
||||
if (!(ret && (hdr->gso_size > needed) &&
|
||||
((remainder > needed) || (remainder == 0)))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
skb_shinfo(skb)->tx_flags |= SKBFL_SHARED_FRAG;
|
||||
}
|
||||
|
||||
if (!pskb_may_pull(skb, needed))
|
||||
return -EINVAL;
|
||||
|
||||
@ -182,6 +171,11 @@ retry:
|
||||
if (gso_type != SKB_GSO_UDP_L4)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case SKB_GSO_TCPV4:
|
||||
case SKB_GSO_TCPV6:
|
||||
if (skb->csum_offset != offsetof(struct tcphdr, check))
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Kernel has a special handling for GSO_BY_FRAGS. */
|
||||
|
@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
|
||||
struct sock *ssk;
|
||||
|
||||
__entry->active = mptcp_subflow_active(subflow);
|
||||
__entry->backup = subflow->backup;
|
||||
__entry->backup = subflow->backup || subflow->request_bkup;
|
||||
|
||||
if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
|
||||
__entry->free = sk_stream_memory_free(subflow->tcp_sock);
|
||||
|
@ -119,13 +119,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
|
||||
case DISCOVERY_STARTING:
|
||||
break;
|
||||
case DISCOVERY_FINDING:
|
||||
/* If discovery was not started then it was initiated by the
|
||||
* MGMT interface so no MGMT event shall be generated either
|
||||
*/
|
||||
if (old_state != DISCOVERY_STARTING) {
|
||||
hdev->discovery.state = old_state;
|
||||
return;
|
||||
}
|
||||
mgmt_discovering(hdev, 1);
|
||||
break;
|
||||
case DISCOVERY_RESOLVING:
|
||||
|
@ -1721,9 +1721,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
|
||||
switch (enable) {
|
||||
case LE_SCAN_ENABLE:
|
||||
hci_dev_set_flag(hdev, HCI_LE_SCAN);
|
||||
if (hdev->le_scan_type == LE_SCAN_ACTIVE)
|
||||
if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
|
||||
clear_pending_adv_report(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
|
||||
}
|
||||
break;
|
||||
|
||||
case LE_SCAN_DISABLE:
|
||||
|
@ -2976,6 +2976,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
|
||||
*/
|
||||
filter_policy = hci_update_accept_list_sync(hdev);
|
||||
|
||||
/* If suspended and filter_policy set to 0x00 (no acceptlist) then
|
||||
* passive scanning cannot be started since that would require the host
|
||||
* to be woken up to process the reports.
|
||||
*/
|
||||
if (hdev->suspended && !filter_policy) {
|
||||
/* Check if accept list is empty then there is no need to scan
|
||||
* while suspended.
|
||||
*/
|
||||
if (list_empty(&hdev->le_accept_list))
|
||||
return 0;
|
||||
|
||||
/* If there are devices is the accept_list that means some
|
||||
* devices could not be programmed which in non-suspended case
|
||||
* means filter_policy needs to be set to 0x00 so the host needs
|
||||
* to filter, but since this is treating suspended case we
|
||||
* can ignore device needing host to filter to allow devices in
|
||||
* the acceptlist to be able to wakeup the system.
|
||||
*/
|
||||
filter_policy = 0x01;
|
||||
}
|
||||
|
||||
/* When the controller is using random resolvable addresses and
|
||||
* with that having LE privacy enabled, then controllers with
|
||||
* Extended Scanner Filter Policies support can now enable support
|
||||
|
@ -5150,6 +5150,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
|
||||
bpf_net_ctx_clear(bpf_net_ctx);
|
||||
return XDP_DROP;
|
||||
}
|
||||
bpf_net_ctx_clear(bpf_net_ctx);
|
||||
}
|
||||
return XDP_PASS;
|
||||
out_redir:
|
||||
|
@ -3288,7 +3288,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
if (ifm->ifi_index > 0)
|
||||
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
|
||||
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
|
||||
dev = rtnl_dev_get(net, tb);
|
||||
dev = rtnl_dev_get(tgt_net, tb);
|
||||
else if (tb[IFLA_GROUP])
|
||||
err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
|
||||
else
|
||||
|
@ -1331,13 +1331,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
|
||||
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
|
||||
const struct ethtool_ops *ops = dev->ethtool_ops;
|
||||
u32 dev_indir_size = 0, dev_key_size = 0, i;
|
||||
u32 user_indir_len = 0, indir_bytes = 0;
|
||||
struct ethtool_rxfh_param rxfh_dev = {};
|
||||
struct ethtool_rxfh_context *ctx = NULL;
|
||||
struct netlink_ext_ack *extack = NULL;
|
||||
struct ethtool_rxnfc rx_rings;
|
||||
struct ethtool_rxfh rxfh;
|
||||
bool locked = false; /* dev->ethtool->rss_lock taken */
|
||||
u32 indir_bytes = 0;
|
||||
bool create = false;
|
||||
u8 *rss_config;
|
||||
int ret;
|
||||
@ -1382,10 +1382,9 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
|
||||
rxfh.input_xfrm == RXH_XFRM_NO_CHANGE))
|
||||
return -EINVAL;
|
||||
|
||||
if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE)
|
||||
indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
|
||||
indir_bytes = dev_indir_size * sizeof(rxfh_dev.indir[0]);
|
||||
|
||||
rss_config = kzalloc(indir_bytes + rxfh.key_size, GFP_USER);
|
||||
rss_config = kzalloc(indir_bytes + dev_key_size, GFP_USER);
|
||||
if (!rss_config)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1400,6 +1399,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
|
||||
*/
|
||||
if (rxfh.indir_size &&
|
||||
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
|
||||
user_indir_len = indir_bytes;
|
||||
rxfh_dev.indir = (u32 *)rss_config;
|
||||
rxfh_dev.indir_size = dev_indir_size;
|
||||
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
|
||||
@ -1426,7 +1426,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
|
||||
rxfh_dev.key_size = dev_key_size;
|
||||
rxfh_dev.key = rss_config + indir_bytes;
|
||||
if (copy_from_user(rxfh_dev.key,
|
||||
useraddr + rss_cfg_offset + indir_bytes,
|
||||
useraddr + rss_cfg_offset + user_indir_len,
|
||||
rxfh.key_size)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
@ -1474,16 +1474,21 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
|
||||
rxfh_dev.input_xfrm = rxfh.input_xfrm;
|
||||
|
||||
if (rxfh.rss_context && ops->create_rxfh_context) {
|
||||
if (create)
|
||||
if (create) {
|
||||
ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev,
|
||||
extack);
|
||||
else if (rxfh_dev.rss_delete)
|
||||
/* Make sure driver populates defaults */
|
||||
WARN_ON_ONCE(!ret && !rxfh_dev.key &&
|
||||
!memchr_inv(ethtool_rxfh_context_key(ctx),
|
||||
0, ctx->key_size));
|
||||
} else if (rxfh_dev.rss_delete) {
|
||||
ret = ops->remove_rxfh_context(dev, ctx,
|
||||
rxfh.rss_context,
|
||||
extack);
|
||||
else
|
||||
} else {
|
||||
ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev,
|
||||
extack);
|
||||
}
|
||||
} else {
|
||||
ret = ops->set_rxfh(dev, &rxfh_dev, extack);
|
||||
}
|
||||
@ -1522,6 +1527,22 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
|
||||
kfree(ctx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Fetch the defaults for the old API, in the new API drivers
|
||||
* should write defaults into ctx themselves.
|
||||
*/
|
||||
rxfh_dev.indir = (u32 *)rss_config;
|
||||
rxfh_dev.indir_size = dev_indir_size;
|
||||
|
||||
rxfh_dev.key = rss_config + indir_bytes;
|
||||
rxfh_dev.key_size = dev_key_size;
|
||||
|
||||
ret = ops->get_rxfh(dev, &rxfh_dev);
|
||||
if (WARN_ON(ret)) {
|
||||
xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
|
||||
kfree(ctx);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (rxfh_dev.rss_delete) {
|
||||
WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx);
|
||||
@ -1530,12 +1551,14 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
|
||||
if (rxfh_dev.indir) {
|
||||
for (i = 0; i < dev_indir_size; i++)
|
||||
ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i];
|
||||
ctx->indir_configured = 1;
|
||||
ctx->indir_configured =
|
||||
rxfh.indir_size &&
|
||||
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE;
|
||||
}
|
||||
if (rxfh_dev.key) {
|
||||
memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key,
|
||||
dev_key_size);
|
||||
ctx->key_configured = 1;
|
||||
ctx->key_configured = !!rxfh.key_size;
|
||||
}
|
||||
if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE)
|
||||
ctx->hfunc = rxfh_dev.hfunc;
|
||||
|
@ -111,7 +111,8 @@ rss_reply_size(const struct ethnl_req_info *req_base,
|
||||
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
|
||||
int len;
|
||||
|
||||
len = nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
|
||||
len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */
|
||||
nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
|
||||
nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */
|
||||
nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */
|
||||
nla_total_size(data->hkey_size); /* _RSS_HKEY */
|
||||
@ -124,6 +125,11 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
|
||||
const struct ethnl_reply_data *reply_base)
|
||||
{
|
||||
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
|
||||
struct rss_req_info *request = RSS_REQINFO(req_base);
|
||||
|
||||
if (request->rss_context &&
|
||||
nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context))
|
||||
return -EMSGSIZE;
|
||||
|
||||
if ((data->hfunc &&
|
||||
nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
|
||||
|
@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
|
||||
|
||||
static int __init iptable_nat_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&nf_nat_ipv4_table,
|
||||
iptable_nat_table_init);
|
||||
int ret;
|
||||
|
||||
/* net->gen->ptr[iptable_nat_net_id] must be allocated
|
||||
* before calling iptable_nat_table_init().
|
||||
*/
|
||||
ret = register_pernet_subsys(&iptable_nat_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&iptable_nat_net_ops);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&nf_nat_ipv4_table);
|
||||
return ret;
|
||||
}
|
||||
ret = xt_register_template(&nf_nat_ipv4_table,
|
||||
iptable_nat_table_init);
|
||||
if (ret < 0)
|
||||
unregister_pernet_subsys(&iptable_nat_net_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_nat_exit(void)
|
||||
{
|
||||
unregister_pernet_subsys(&iptable_nat_net_ops);
|
||||
xt_unregister_template(&nf_nat_ipv4_table);
|
||||
unregister_pernet_subsys(&iptable_nat_net_ops);
|
||||
}
|
||||
|
||||
module_init(iptable_nat_init);
|
||||
|
@ -754,8 +754,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
||||
* <prev RTT . ><current RTT .. ><next RTT .... >
|
||||
*/
|
||||
|
||||
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
|
||||
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
|
||||
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) {
|
||||
u64 rcvwin, grow;
|
||||
int rcvbuf;
|
||||
|
||||
@ -771,12 +770,22 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
||||
|
||||
rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
|
||||
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
|
||||
if (rcvbuf > sk->sk_rcvbuf) {
|
||||
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
|
||||
if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
|
||||
if (rcvbuf > sk->sk_rcvbuf) {
|
||||
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
|
||||
|
||||
/* Make the window clamp follow along. */
|
||||
WRITE_ONCE(tp->window_clamp,
|
||||
tcp_win_from_space(sk, rcvbuf));
|
||||
/* Make the window clamp follow along. */
|
||||
WRITE_ONCE(tp->window_clamp,
|
||||
tcp_win_from_space(sk, rcvbuf));
|
||||
}
|
||||
} else {
|
||||
/* Make the window clamp follow along while being bounded
|
||||
* by SO_RCVBUF.
|
||||
*/
|
||||
int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf));
|
||||
|
||||
if (clamp > tp->window_clamp)
|
||||
WRITE_ONCE(tp->window_clamp, clamp);
|
||||
}
|
||||
}
|
||||
tp->rcvq_space.space = copied;
|
||||
|
@ -140,6 +140,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
|
||||
if (thlen < sizeof(*th))
|
||||
goto out;
|
||||
|
||||
if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
|
||||
goto out;
|
||||
|
||||
if (!pskb_may_pull(skb, thlen))
|
||||
goto out;
|
||||
|
||||
|
@ -278,6 +278,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
|
||||
if (gso_skb->len <= sizeof(*uh) + mss)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (unlikely(skb_checksum_start(gso_skb) !=
|
||||
skb_transport_header(gso_skb)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
|
||||
/* Packet is from an untrusted source, reset gso_segs. */
|
||||
skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),
|
||||
|
@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
|
||||
return NULL;
|
||||
memset(ndopts, 0, sizeof(*ndopts));
|
||||
while (opt_len) {
|
||||
bool unknown = false;
|
||||
int l;
|
||||
if (opt_len < sizeof(struct nd_opt_hdr))
|
||||
return NULL;
|
||||
@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
if (ndisc_is_useropt(dev, nd_opt)) {
|
||||
ndopts->nd_useropts_end = nd_opt;
|
||||
if (!ndopts->nd_useropts)
|
||||
ndopts->nd_useropts = nd_opt;
|
||||
} else {
|
||||
/*
|
||||
* Unknown options must be silently ignored,
|
||||
* to accommodate future extension to the
|
||||
* protocol.
|
||||
*/
|
||||
ND_PRINTK(2, notice,
|
||||
"%s: ignored unsupported option; type=%d, len=%d\n",
|
||||
__func__,
|
||||
nd_opt->nd_opt_type,
|
||||
nd_opt->nd_opt_len);
|
||||
}
|
||||
unknown = true;
|
||||
}
|
||||
if (ndisc_is_useropt(dev, nd_opt)) {
|
||||
ndopts->nd_useropts_end = nd_opt;
|
||||
if (!ndopts->nd_useropts)
|
||||
ndopts->nd_useropts = nd_opt;
|
||||
} else if (unknown) {
|
||||
/*
|
||||
* Unknown options must be silently ignored,
|
||||
* to accommodate future extension to the
|
||||
* protocol.
|
||||
*/
|
||||
ND_PRINTK(2, notice,
|
||||
"%s: ignored unsupported option; type=%d, len=%d\n",
|
||||
__func__,
|
||||
nd_opt->nd_opt_type,
|
||||
nd_opt->nd_opt_len);
|
||||
}
|
||||
next_opt:
|
||||
opt_len -= l;
|
||||
|
@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
|
||||
|
||||
static int __init ip6table_nat_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&nf_nat_ipv6_table,
|
||||
ip6table_nat_table_init);
|
||||
int ret;
|
||||
|
||||
/* net->gen->ptr[ip6table_nat_net_id] must be allocated
|
||||
* before calling ip6t_nat_register_lookups().
|
||||
*/
|
||||
ret = register_pernet_subsys(&ip6table_nat_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_nat_net_ops);
|
||||
ret = xt_register_template(&nf_nat_ipv6_table,
|
||||
ip6table_nat_table_init);
|
||||
if (ret)
|
||||
xt_unregister_template(&nf_nat_ipv6_table);
|
||||
unregister_pernet_subsys(&ip6table_nat_net_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_nat_exit(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_nat_net_ops);
|
||||
xt_unregister_template(&nf_nat_ipv6_table);
|
||||
unregister_pernet_subsys(&ip6table_nat_net_ops);
|
||||
}
|
||||
|
||||
module_init(ip6table_nat_init);
|
||||
|
@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
|
||||
struct iucv_sock *iucv = iucv_sk(sk);
|
||||
struct iucv_path *path = iucv->path;
|
||||
|
||||
if (iucv->path) {
|
||||
iucv->path = NULL;
|
||||
/* Whoever resets the path pointer, must sever and free it. */
|
||||
if (xchg(&iucv->path, NULL)) {
|
||||
if (with_user_data) {
|
||||
low_nmcpy(user_data, iucv->src_name);
|
||||
high_nmcpy(user_data, iucv->dst_name);
|
||||
|
@ -114,7 +114,7 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
/* apply all changes now - no failures allowed */
|
||||
|
||||
if (monitor_sdata)
|
||||
if (monitor_sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
|
||||
ieee80211_set_mu_mimo_follow(monitor_sdata, params);
|
||||
|
||||
if (params->flags) {
|
||||
@ -3053,6 +3053,9 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
|
||||
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
|
||||
|
||||
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
|
||||
if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
sdata = wiphy_dereference(local->hw.wiphy,
|
||||
local->monitor_sdata);
|
||||
if (!sdata)
|
||||
@ -3115,7 +3118,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
|
||||
if (has_monitor) {
|
||||
sdata = wiphy_dereference(local->hw.wiphy,
|
||||
local->monitor_sdata);
|
||||
if (sdata) {
|
||||
if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
|
||||
sdata->deflink.user_power_level = local->user_power_level;
|
||||
if (txp_type != sdata->vif.bss_conf.txpower_type)
|
||||
update_txp_type = true;
|
||||
|
@ -1768,7 +1768,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
|
||||
break;
|
||||
}
|
||||
sdata = rcu_dereference(local->monitor_sdata);
|
||||
if (sdata) {
|
||||
if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
|
||||
vif = &sdata->vif;
|
||||
info->hw_queue =
|
||||
vif->hw_queue[skb_get_queue_mapping(skb)];
|
||||
@ -3957,7 +3957,8 @@ begin:
|
||||
break;
|
||||
}
|
||||
tx.sdata = rcu_dereference(local->monitor_sdata);
|
||||
if (tx.sdata) {
|
||||
if (tx.sdata &&
|
||||
ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
|
||||
vif = &tx.sdata->vif;
|
||||
info->hw_queue =
|
||||
vif->hw_queue[skb_get_queue_mapping(skb)];
|
||||
|
@ -776,7 +776,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
|
||||
sdata = rcu_dereference_check(local->monitor_sdata,
|
||||
lockdep_is_held(&local->iflist_mtx) ||
|
||||
lockdep_is_held(&local->hw.wiphy->mtx));
|
||||
if (sdata &&
|
||||
if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
|
||||
(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
|
||||
sdata->flags & IEEE80211_SDATA_IN_DRIVER))
|
||||
iterator(data, sdata->vif.addr, &sdata->vif);
|
||||
|
@ -19,7 +19,9 @@ static const struct snmp_mib mptcp_snmp_list[] = {
|
||||
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
|
||||
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
|
||||
SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
|
||||
SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX),
|
||||
SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
|
||||
SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX),
|
||||
SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
|
||||
SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
|
||||
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
|
||||
|
@ -14,7 +14,9 @@ enum linux_mptcp_mib_field {
|
||||
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
|
||||
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
|
||||
MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
|
||||
MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */
|
||||
MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
|
||||
MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */
|
||||
MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
|
||||
MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
|
||||
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
|
||||
|
@ -909,7 +909,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
|
||||
return true;
|
||||
} else if (subflow_req->mp_join) {
|
||||
opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
|
||||
opts->backup = subflow_req->backup;
|
||||
opts->backup = subflow_req->request_bkup;
|
||||
opts->join_id = subflow_req->local_id;
|
||||
opts->thmac = subflow_req->thmac;
|
||||
opts->nonce = subflow_req->local_nonce;
|
||||
|
@ -426,6 +426,18 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
|
||||
return mptcp_pm_nl_get_local_id(msk, &skc_local);
|
||||
}
|
||||
|
||||
bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
|
||||
{
|
||||
struct mptcp_addr_info skc_local;
|
||||
|
||||
mptcp_local_address((struct sock_common *)skc, &skc_local);
|
||||
|
||||
if (mptcp_pm_is_userspace(msk))
|
||||
return mptcp_userspace_pm_is_backup(msk, &skc_local);
|
||||
|
||||
return mptcp_pm_nl_is_backup(msk, &skc_local);
|
||||
}
|
||||
|
||||
int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
|
||||
u8 *flags, int *ifindex)
|
||||
{
|
||||
|
@ -471,7 +471,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
|
||||
slow = lock_sock_fast(ssk);
|
||||
if (prio) {
|
||||
subflow->send_mp_prio = 1;
|
||||
subflow->backup = backup;
|
||||
subflow->request_bkup = backup;
|
||||
}
|
||||
|
||||
@ -1102,6 +1101,24 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
|
||||
{
|
||||
struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
|
||||
struct mptcp_pm_addr_entry *entry;
|
||||
bool backup = false;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
|
||||
if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
|
||||
backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return backup;
|
||||
}
|
||||
|
||||
#define MPTCP_PM_CMD_GRP_OFFSET 0
|
||||
#define MPTCP_PM_EV_GRP_OFFSET 1
|
||||
|
||||
@ -1401,6 +1418,7 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
|
||||
ret = remove_anno_list_by_saddr(msk, addr);
|
||||
if (ret || force) {
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
msk->pm.add_addr_signaled -= ret;
|
||||
mptcp_pm_remove_addr(msk, &list);
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
}
|
||||
@ -1534,16 +1552,25 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
|
||||
{
|
||||
struct mptcp_rm_list alist = { .nr = 0 };
|
||||
struct mptcp_pm_addr_entry *entry;
|
||||
int anno_nr = 0;
|
||||
|
||||
list_for_each_entry(entry, rm_list, list) {
|
||||
if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
|
||||
lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
|
||||
alist.nr < MPTCP_RM_IDS_MAX)
|
||||
alist.ids[alist.nr++] = entry->addr.id;
|
||||
if (alist.nr >= MPTCP_RM_IDS_MAX)
|
||||
break;
|
||||
|
||||
/* only delete if either announced or matching a subflow */
|
||||
if (remove_anno_list_by_saddr(msk, &entry->addr))
|
||||
anno_nr++;
|
||||
else if (!lookup_subflow_by_saddr(&msk->conn_list,
|
||||
&entry->addr))
|
||||
continue;
|
||||
|
||||
alist.ids[alist.nr++] = entry->addr.id;
|
||||
}
|
||||
|
||||
if (alist.nr) {
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
msk->pm.add_addr_signaled -= anno_nr;
|
||||
mptcp_pm_remove_addr(msk, &alist);
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
}
|
||||
@ -1556,17 +1583,18 @@ static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
|
||||
struct mptcp_pm_addr_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, rm_list, list) {
|
||||
if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
|
||||
slist.nr < MPTCP_RM_IDS_MAX)
|
||||
if (slist.nr < MPTCP_RM_IDS_MAX &&
|
||||
lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
|
||||
slist.ids[slist.nr++] = entry->addr.id;
|
||||
|
||||
if (remove_anno_list_by_saddr(msk, &entry->addr) &&
|
||||
alist.nr < MPTCP_RM_IDS_MAX)
|
||||
if (alist.nr < MPTCP_RM_IDS_MAX &&
|
||||
remove_anno_list_by_saddr(msk, &entry->addr))
|
||||
alist.ids[alist.nr++] = entry->addr.id;
|
||||
}
|
||||
|
||||
if (alist.nr) {
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
msk->pm.add_addr_signaled -= alist.nr;
|
||||
mptcp_pm_remove_addr(msk, &alist);
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
}
|
||||
|
@ -165,6 +165,24 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
|
||||
return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
|
||||
}
|
||||
|
||||
bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk,
|
||||
struct mptcp_addr_info *skc)
|
||||
{
|
||||
struct mptcp_pm_addr_entry *entry;
|
||||
bool backup = false;
|
||||
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
|
||||
if (mptcp_addresses_equal(&entry->addr, skc, false)) {
|
||||
backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
|
||||
return backup;
|
||||
}
|
||||
|
||||
int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
|
||||
|
@ -350,8 +350,10 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
|
||||
skb_orphan(skb);
|
||||
|
||||
/* try to fetch required memory from subflow */
|
||||
if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
|
||||
if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
|
||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
|
||||
|
||||
@ -844,10 +846,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
|
||||
sk_rbuf = ssk_rbuf;
|
||||
|
||||
/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
|
||||
if (__mptcp_rmem(sk) > sk_rbuf) {
|
||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
|
||||
if (__mptcp_rmem(sk) > sk_rbuf)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Wake-up the reader only for in-sequence data */
|
||||
mptcp_data_lock(sk);
|
||||
@ -1422,13 +1422,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
|
||||
}
|
||||
|
||||
mptcp_for_each_subflow(msk, subflow) {
|
||||
bool backup = subflow->backup || subflow->request_bkup;
|
||||
|
||||
trace_mptcp_subflow_get_send(subflow);
|
||||
ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
if (!mptcp_subflow_active(subflow))
|
||||
continue;
|
||||
|
||||
tout = max(tout, mptcp_timeout_from_subflow(subflow));
|
||||
nr_active += !subflow->backup;
|
||||
nr_active += !backup;
|
||||
pace = subflow->avg_pacing_rate;
|
||||
if (unlikely(!pace)) {
|
||||
/* init pacing rate from socket */
|
||||
@ -1439,9 +1441,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
|
||||
}
|
||||
|
||||
linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
|
||||
if (linger_time < send_info[subflow->backup].linger_time) {
|
||||
send_info[subflow->backup].ssk = ssk;
|
||||
send_info[subflow->backup].linger_time = linger_time;
|
||||
if (linger_time < send_info[backup].linger_time) {
|
||||
send_info[backup].ssk = ssk;
|
||||
send_info[backup].linger_time = linger_time;
|
||||
}
|
||||
}
|
||||
__mptcp_set_timeout(sk, tout);
|
||||
|
@ -448,6 +448,7 @@ struct mptcp_subflow_request_sock {
|
||||
u16 mp_capable : 1,
|
||||
mp_join : 1,
|
||||
backup : 1,
|
||||
request_bkup : 1,
|
||||
csum_reqd : 1,
|
||||
allow_join_id0 : 1;
|
||||
u8 local_id;
|
||||
@ -1108,6 +1109,9 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
|
||||
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
|
||||
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
|
||||
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
|
||||
bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
|
||||
bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
|
||||
bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
|
||||
int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
|
||||
int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
|
||||
struct netlink_callback *cb);
|
||||
|
@ -100,6 +100,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
|
||||
return NULL;
|
||||
}
|
||||
subflow_req->local_id = local_id;
|
||||
subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
|
||||
|
||||
return msk;
|
||||
}
|
||||
@ -168,6 +169,9 @@ static int subflow_check_req(struct request_sock *req,
|
||||
return 0;
|
||||
} else if (opt_mp_join) {
|
||||
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
|
||||
|
||||
if (mp_opt.backup)
|
||||
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
|
||||
}
|
||||
|
||||
if (opt_mp_capable && listener->request_mptcp) {
|
||||
@ -577,6 +581,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
|
||||
subflow->mp_join = 1;
|
||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
|
||||
|
||||
if (subflow->backup)
|
||||
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
|
||||
|
||||
if (subflow_use_different_dport(msk, sk)) {
|
||||
pr_debug("synack inet_dport=%d %d",
|
||||
ntohs(inet_sk(sk)->inet_dport),
|
||||
@ -614,6 +621,8 @@ static int subflow_chk_local_id(struct sock *sk)
|
||||
return err;
|
||||
|
||||
subflow_set_local_id(subflow, err);
|
||||
subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1221,14 +1230,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
|
||||
u32 incr;
|
||||
struct tcp_sock *tp = tcp_sk(ssk);
|
||||
u32 offset, incr, avail_len;
|
||||
|
||||
incr = limit >= skb->len ? skb->len + fin : limit;
|
||||
offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
|
||||
if (WARN_ON_ONCE(offset > skb->len))
|
||||
goto out;
|
||||
|
||||
pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
|
||||
subflow->map_subflow_seq);
|
||||
avail_len = skb->len - offset;
|
||||
incr = limit >= avail_len ? avail_len + fin : limit;
|
||||
|
||||
pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
|
||||
offset, subflow->map_subflow_seq);
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
|
||||
tcp_sk(ssk)->copied_seq += incr;
|
||||
|
||||
out:
|
||||
if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
|
||||
sk_eat_skb(ssk, skb);
|
||||
if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
|
||||
@ -2005,6 +2022,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
|
||||
new_ctx->fully_established = 1;
|
||||
new_ctx->remote_key_valid = 1;
|
||||
new_ctx->backup = subflow_req->backup;
|
||||
new_ctx->request_bkup = subflow_req->request_bkup;
|
||||
WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
|
||||
new_ctx->token = subflow_req->token;
|
||||
new_ctx->thmac = subflow_req->thmac;
|
||||
|
@ -44,6 +44,8 @@ static DEFINE_MUTEX(zones_mutex);
|
||||
struct zones_ht_key {
|
||||
struct net *net;
|
||||
u16 zone;
|
||||
/* Note : pad[] must be the last field. */
|
||||
u8 pad[];
|
||||
};
|
||||
|
||||
struct tcf_ct_flow_table {
|
||||
@ -60,7 +62,7 @@ struct tcf_ct_flow_table {
|
||||
static const struct rhashtable_params zones_params = {
|
||||
.head_offset = offsetof(struct tcf_ct_flow_table, node),
|
||||
.key_offset = offsetof(struct tcf_ct_flow_table, key),
|
||||
.key_len = sizeof_field(struct tcf_ct_flow_table, key),
|
||||
.key_len = offsetof(struct zones_ht_key, pad),
|
||||
.automatic_shrinking = true,
|
||||
};
|
||||
|
||||
|
@ -3319,10 +3319,8 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family)
|
||||
|
||||
rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
|
||||
&smc->clcsock);
|
||||
if (rc) {
|
||||
sk_common_release(sk);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* smc_clcsock_release() does not wait smc->clcsock->sk's
|
||||
* destruction; its sk_state might not be TCP_CLOSE after
|
||||
@ -3368,6 +3366,9 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
|
||||
smc->clcsock = clcsock;
|
||||
else
|
||||
rc = smc_create_clcsk(net, sk, family);
|
||||
|
||||
if (rc)
|
||||
sk_common_release(sk);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
@ -3178,8 +3178,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
|
||||
struct ieee80211_mgmt *mgmt, size_t len,
|
||||
gfp_t gfp)
|
||||
{
|
||||
size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
|
||||
u.probe_resp.variable);
|
||||
size_t min_hdr_len;
|
||||
struct ieee80211_ext *ext = NULL;
|
||||
enum cfg80211_bss_frame_type ftype;
|
||||
u16 beacon_interval;
|
||||
@ -3202,10 +3201,16 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
|
||||
|
||||
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
|
||||
ext = (void *) mgmt;
|
||||
min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon);
|
||||
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
|
||||
min_hdr_len = offsetof(struct ieee80211_ext,
|
||||
u.s1g_short_beacon.variable);
|
||||
else
|
||||
min_hdr_len = offsetof(struct ieee80211_ext,
|
||||
u.s1g_beacon.variable);
|
||||
} else {
|
||||
/* same for beacons */
|
||||
min_hdr_len = offsetof(struct ieee80211_mgmt,
|
||||
u.probe_resp.variable);
|
||||
}
|
||||
|
||||
if (WARN_ON(len < min_hdr_len))
|
||||
|
@ -1045,6 +1045,7 @@ void cfg80211_connect_done(struct net_device *dev,
|
||||
cfg80211_hold_bss(
|
||||
bss_from_pub(params->links[link].bss));
|
||||
ev->cr.links[link].bss = params->links[link].bss;
|
||||
ev->cr.links[link].status = params->links[link].status;
|
||||
|
||||
if (params->links[link].addr) {
|
||||
ev->cr.links[link].addr = next;
|
||||
|
@ -713,7 +713,7 @@ $(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp
|
||||
# Make sure we are able to include and link libbpf against c++.
|
||||
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
|
||||
$(call msg,CXX,,$@)
|
||||
$(Q)$(CXX) $(CFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
|
||||
$(Q)$(CXX) $(subst -D_GNU_SOURCE=,,$(CFLAGS)) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@
|
||||
|
||||
# Benchmark runner
|
||||
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ)
|
||||
|
@ -253,7 +253,7 @@ static void test_uretprobe_syscall_call(void)
|
||||
struct uprobe_syscall_executed *skel;
|
||||
int pid, status, err, go[2], c;
|
||||
|
||||
if (ASSERT_OK(pipe(go), "pipe"))
|
||||
if (!ASSERT_OK(pipe(go), "pipe"))
|
||||
return;
|
||||
|
||||
skel = uprobe_syscall_executed__open_and_load();
|
||||
|
@ -19,6 +19,15 @@ def _rss_key_rand(length):
|
||||
return [random.randint(0, 255) for _ in range(length)]
|
||||
|
||||
|
||||
def _rss_key_check(cfg, data=None, context=0):
|
||||
if data is None:
|
||||
data = get_rss(cfg, context=context)
|
||||
if 'rss-hash-key' not in data:
|
||||
return
|
||||
non_zero = [x for x in data['rss-hash-key'] if x != 0]
|
||||
ksft_eq(bool(non_zero), True, comment=f"RSS key is all zero {data['rss-hash-key']}")
|
||||
|
||||
|
||||
def get_rss(cfg, context=0):
|
||||
return ethtool(f"-x {cfg.ifname} context {context}", json=True)[0]
|
||||
|
||||
@ -90,8 +99,9 @@ def _send_traffic_check(cfg, port, name, params):
|
||||
def test_rss_key_indir(cfg):
|
||||
"""Test basics like updating the main RSS key and indirection table."""
|
||||
|
||||
if len(_get_rx_cnts(cfg)) < 2:
|
||||
KsftSkipEx("Device has only one queue (or doesn't support queue stats)")
|
||||
qcnt = len(_get_rx_cnts(cfg))
|
||||
if qcnt < 3:
|
||||
KsftSkipEx("Device has fewer than 3 queues (or doesn't support queue stats)")
|
||||
|
||||
data = get_rss(cfg)
|
||||
want_keys = ['rss-hash-key', 'rss-hash-function', 'rss-indirection-table']
|
||||
@ -101,6 +111,7 @@ def test_rss_key_indir(cfg):
|
||||
if not data[k]:
|
||||
raise KsftFailEx(f"ethtool results empty for '{k}': {data[k]}")
|
||||
|
||||
_rss_key_check(cfg, data=data)
|
||||
key_len = len(data['rss-hash-key'])
|
||||
|
||||
# Set the key
|
||||
@ -110,9 +121,26 @@ def test_rss_key_indir(cfg):
|
||||
data = get_rss(cfg)
|
||||
ksft_eq(key, data['rss-hash-key'])
|
||||
|
||||
# Set the indirection table and the key together
|
||||
key = _rss_key_rand(key_len)
|
||||
ethtool(f"-X {cfg.ifname} equal 3 hkey " + _rss_key_str(key))
|
||||
reset_indir = defer(ethtool, f"-X {cfg.ifname} default")
|
||||
|
||||
data = get_rss(cfg)
|
||||
_rss_key_check(cfg, data=data)
|
||||
ksft_eq(0, min(data['rss-indirection-table']))
|
||||
ksft_eq(2, max(data['rss-indirection-table']))
|
||||
|
||||
# Reset indirection table and set the key
|
||||
key = _rss_key_rand(key_len)
|
||||
ethtool(f"-X {cfg.ifname} default hkey " + _rss_key_str(key))
|
||||
data = get_rss(cfg)
|
||||
_rss_key_check(cfg, data=data)
|
||||
ksft_eq(0, min(data['rss-indirection-table']))
|
||||
ksft_eq(qcnt - 1, max(data['rss-indirection-table']))
|
||||
|
||||
# Set the indirection table
|
||||
ethtool(f"-X {cfg.ifname} equal 2")
|
||||
reset_indir = defer(ethtool, f"-X {cfg.ifname} default")
|
||||
data = get_rss(cfg)
|
||||
ksft_eq(0, min(data['rss-indirection-table']))
|
||||
ksft_eq(1, max(data['rss-indirection-table']))
|
||||
@ -317,8 +345,11 @@ def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None):
|
||||
ctx_cnt = i
|
||||
break
|
||||
|
||||
_rss_key_check(cfg, context=ctx_id)
|
||||
|
||||
if not create_with_cfg:
|
||||
ethtool(f"-X {cfg.ifname} context {ctx_id} {want_cfg}")
|
||||
_rss_key_check(cfg, context=ctx_id)
|
||||
|
||||
# Sanity check the context we just created
|
||||
data = get_rss(cfg, ctx_id)
|
||||
|
@ -1115,11 +1115,11 @@ again:
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (--cfg_repeat > 0) {
|
||||
if (cfg_input)
|
||||
close(fd);
|
||||
if (cfg_input)
|
||||
close(fd);
|
||||
|
||||
if (--cfg_repeat > 0)
|
||||
goto again;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -661,7 +661,7 @@ pm_nl_check_endpoint()
|
||||
done
|
||||
|
||||
if [ -z "${id}" ]; then
|
||||
test_fail "bad test - missing endpoint id"
|
||||
fail_test "bad test - missing endpoint id"
|
||||
return
|
||||
fi
|
||||
|
||||
@ -1634,6 +1634,8 @@ chk_prio_nr()
|
||||
{
|
||||
local mp_prio_nr_tx=$1
|
||||
local mp_prio_nr_rx=$2
|
||||
local mpj_syn=$3
|
||||
local mpj_syn_ack=$4
|
||||
local count
|
||||
|
||||
print_check "ptx"
|
||||
@ -1655,6 +1657,26 @@ chk_prio_nr()
|
||||
else
|
||||
print_ok
|
||||
fi
|
||||
|
||||
print_check "syn backup"
|
||||
count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynBackupRx")
|
||||
if [ -z "$count" ]; then
|
||||
print_skip
|
||||
elif [ "$count" != "$mpj_syn" ]; then
|
||||
fail_test "got $count JOIN[s] syn with Backup expected $mpj_syn"
|
||||
else
|
||||
print_ok
|
||||
fi
|
||||
|
||||
print_check "synack backup"
|
||||
count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckBackupRx")
|
||||
if [ -z "$count" ]; then
|
||||
print_skip
|
||||
elif [ "$count" != "$mpj_syn_ack" ]; then
|
||||
fail_test "got $count JOIN[s] synack with Backup expected $mpj_syn_ack"
|
||||
else
|
||||
print_ok
|
||||
fi
|
||||
}
|
||||
|
||||
chk_subflow_nr()
|
||||
@ -2612,11 +2634,24 @@ backup_tests()
|
||||
sflags=nobackup speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 1 1 1
|
||||
chk_prio_nr 0 1
|
||||
chk_prio_nr 0 1 1 0
|
||||
fi
|
||||
|
||||
# single address, backup
|
||||
if reset "single address, backup" &&
|
||||
continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
|
||||
pm_nl_set_limits $ns1 0 1
|
||||
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
|
||||
pm_nl_set_limits $ns2 1 1
|
||||
sflags=nobackup speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 1 1 1
|
||||
chk_add_nr 1 1
|
||||
chk_prio_nr 1 0 0 1
|
||||
fi
|
||||
|
||||
# single address, switch to backup
|
||||
if reset "single address, switch to backup" &&
|
||||
continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
|
||||
pm_nl_set_limits $ns1 0 1
|
||||
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
|
||||
@ -2625,20 +2660,20 @@ backup_tests()
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 1 1 1
|
||||
chk_add_nr 1 1
|
||||
chk_prio_nr 1 1
|
||||
chk_prio_nr 1 1 0 0
|
||||
fi
|
||||
|
||||
# single address with port, backup
|
||||
if reset "single address with port, backup" &&
|
||||
continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
|
||||
pm_nl_set_limits $ns1 0 1
|
||||
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
|
||||
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup port 10100
|
||||
pm_nl_set_limits $ns2 1 1
|
||||
sflags=backup speed=slow \
|
||||
sflags=nobackup speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 1 1 1
|
||||
chk_add_nr 1 1
|
||||
chk_prio_nr 1 1
|
||||
chk_prio_nr 1 0 0 1
|
||||
fi
|
||||
|
||||
if reset "mpc backup" &&
|
||||
@ -2647,17 +2682,26 @@ backup_tests()
|
||||
speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 0 0 0
|
||||
chk_prio_nr 0 1
|
||||
chk_prio_nr 0 1 0 0
|
||||
fi
|
||||
|
||||
if reset "mpc backup both sides" &&
|
||||
continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
|
||||
pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
|
||||
pm_nl_set_limits $ns1 0 2
|
||||
pm_nl_set_limits $ns2 1 2
|
||||
pm_nl_add_endpoint $ns1 10.0.1.1 flags signal,backup
|
||||
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
|
||||
|
||||
# 10.0.2.2 (non-backup) -> 10.0.1.1 (backup)
|
||||
pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
|
||||
# 10.0.1.2 (backup) -> 10.0.2.1 (non-backup)
|
||||
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
|
||||
ip -net "$ns2" route add 10.0.2.1 via 10.0.1.1 dev ns2eth1 # force this path
|
||||
|
||||
speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 0 0 0
|
||||
chk_prio_nr 1 1
|
||||
chk_join_nr 2 2 2
|
||||
chk_prio_nr 1 1 1 1
|
||||
fi
|
||||
|
||||
if reset "mpc switch to backup" &&
|
||||
@ -2666,7 +2710,7 @@ backup_tests()
|
||||
sflags=backup speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 0 0 0
|
||||
chk_prio_nr 0 1
|
||||
chk_prio_nr 0 1 0 0
|
||||
fi
|
||||
|
||||
if reset "mpc switch to backup both sides" &&
|
||||
@ -2676,7 +2720,7 @@ backup_tests()
|
||||
sflags=backup speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 0 0 0
|
||||
chk_prio_nr 1 1
|
||||
chk_prio_nr 1 1 0 0
|
||||
fi
|
||||
}
|
||||
|
||||
@ -3053,7 +3097,7 @@ fullmesh_tests()
|
||||
addr_nr_ns2=1 sflags=backup,fullmesh speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 2 2 2
|
||||
chk_prio_nr 0 1
|
||||
chk_prio_nr 0 1 1 0
|
||||
chk_rm_nr 0 1
|
||||
fi
|
||||
|
||||
@ -3066,7 +3110,7 @@ fullmesh_tests()
|
||||
sflags=nobackup,nofullmesh speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 2 2 2
|
||||
chk_prio_nr 0 1
|
||||
chk_prio_nr 0 1 1 0
|
||||
chk_rm_nr 0 1
|
||||
fi
|
||||
}
|
||||
@ -3318,7 +3362,7 @@ userspace_tests()
|
||||
sflags=backup speed=slow \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 1 1 0
|
||||
chk_prio_nr 0 0
|
||||
chk_prio_nr 0 0 0 0
|
||||
fi
|
||||
|
||||
# userspace pm type prevents rm_addr
|
||||
@ -3526,6 +3570,35 @@ endpoint_tests()
|
||||
chk_mptcp_info subflows 1 subflows 1
|
||||
mptcp_lib_kill_wait $tests_pid
|
||||
fi
|
||||
|
||||
# remove and re-add
|
||||
if reset "delete re-add signal" &&
|
||||
mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
|
||||
pm_nl_set_limits $ns1 1 1
|
||||
pm_nl_set_limits $ns2 1 1
|
||||
pm_nl_add_endpoint $ns1 10.0.2.1 id 1 flags signal
|
||||
test_linkfail=4 speed=20 \
|
||||
run_tests $ns1 $ns2 10.0.1.1 &
|
||||
local tests_pid=$!
|
||||
|
||||
wait_mpj $ns2
|
||||
pm_nl_check_endpoint "creation" \
|
||||
$ns1 10.0.2.1 id 1 flags signal
|
||||
chk_subflow_nr "before delete" 2
|
||||
chk_mptcp_info subflows 1 subflows 1
|
||||
|
||||
pm_nl_del_endpoint $ns1 1 10.0.2.1
|
||||
sleep 0.5
|
||||
chk_subflow_nr "after delete" 1
|
||||
chk_mptcp_info subflows 0 subflows 0
|
||||
|
||||
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
|
||||
wait_mpj $ns2
|
||||
chk_subflow_nr "after re-add" 2
|
||||
chk_mptcp_info subflows 1 subflows 1
|
||||
mptcp_lib_kill_wait $tests_pid
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# [$1: error message]
|
||||
|
Loading…
Reference in New Issue
Block a user