mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 05:32:00 +00:00
Including fixes from BPF and netfilter.
Current release - regressions: - core: fix undefined behavior in netdev name allocation - bpf: do not allocate percpu memory at init stage - netfilter: nf_tables: split async and sync catchall in two functions - mptcp: fix possible NULL pointer dereference on close Current release - new code bugs: - eth: ice: dpll: fix initial lock status of dpll Previous releases - regressions: - bpf: fix precision backtracking instruction iteration - af_unix: fix use-after-free in unix_stream_read_actor() - tipc: fix kernel-infoleak due to uninitialized TLV value - eth: bonding: stop the device in bond_setup_by_slave() - eth: mlx5: - fix double free of encap_header - avoid referencing skb after free-ing in drop path - eth: hns3: fix VF reset - eth: mvneta: fix calls to page_pool_get_stats Previous releases - always broken: - core: set SOCK_RCU_FREE before inserting socket into hashtable - bpf: fix control-flow graph checking in privileged mode - eth: ppp: limit MRU to 64K - eth: stmmac: avoid rx queue overrun - eth: icssg-prueth: fix error cleanup on failing initialization - eth: hns3: fix out-of-bounds access may occur when coalesce info is read via debugfs - eth: cortina: handle large frames Misc: - selftests: gso: support CONFIG_MAX_SKB_FRAGS up to 45 Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmVV9akSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkICMP/1+QHUaD4JG1mW9oYc2zINPfQl3dqQt3 2CGSE2yrtbQvyQl39BDa0WFzV5X6So6/U50twhTNM+UAJsCaOvxCUDvUP9eY9Dcm z2H4oITZimyP4CEb3l7JpL2PImvfImL7D/fCPPMUZVzNY6dkEFznaQrnawbJz4gg mZXDnjwIXq7OchoJy3dHzyOn4ZQj2Df5VcfBzkVMdMcwV55Sd5JezbhwJ6NOmnKA uoXlq4pFYj3ahAhEQfLWUwXmF3e6esHs/WUCMe5FR9YkanJlu4oHUmY3RLzfcdQA PPIPDRxOzthcXyymqvqs7gnZ3ruMUll4B7tGTVFpJch8ts+DwGdUyBIIoDd/1BUT gmjipP5HPia3Qdtk3Jc4vMkcf5AwoGo0hXku7YYJ1K7+4+t8ep3/hDbQc0PLWX6J afiQgqpnNXHSTqBO5zl91vSwhGr/AAtAkDlPnsQL/RDAxY4teIwxHuoMvwPWaHZJ sMo5ZcHXvNnBbGhpozFtmrnbf1nduUrQmW5LkJViCLf25Sj6pDYbo8WnhMuOKSnZ 7an2YqniCgBtrX4MEVn2jsWgavI+SxndVIQR04u0uwqmP+dn8s9LUfjKKDtPWHsK +zMFtk+Op03TW5ur9w3+dgrGH0cLogPO3BJkho7xXKBfZ6/tN/pOef3/nV9xY6g8 JjnBUdpZRTWI =VjWw -----END PGP SIGNATURE----- Merge tag 'net-6.7-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from BPF and netfilter. Current release - regressions: - core: fix undefined behavior in netdev name allocation - bpf: do not allocate percpu memory at init stage - netfilter: nf_tables: split async and sync catchall in two functions - mptcp: fix possible NULL pointer dereference on close Current release - new code bugs: - eth: ice: dpll: fix initial lock status of dpll Previous releases - regressions: - bpf: fix precision backtracking instruction iteration - af_unix: fix use-after-free in unix_stream_read_actor() - tipc: fix kernel-infoleak due to uninitialized TLV value - eth: bonding: stop the device in bond_setup_by_slave() - eth: mlx5: - fix double free of encap_header - avoid referencing skb after free-ing in drop path - eth: hns3: fix VF reset - eth: mvneta: fix calls to page_pool_get_stats Previous releases - always broken: - core: set SOCK_RCU_FREE before inserting socket into hashtable - bpf: fix control-flow graph checking in privileged mode - eth: ppp: limit MRU to 64K - eth: stmmac: avoid rx queue overrun - eth: icssg-prueth: fix error cleanup on failing initialization - eth: hns3: fix out-of-bounds access may occur when coalesce info is read via debugfs - eth: cortina: handle large frames Misc: - selftests: gso: support CONFIG_MAX_SKB_FRAGS up to 45" * tag 'net-6.7-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (78 commits) macvlan: Don't propagate promisc change to lower dev in passthru net: sched: do not offload flows with a helper in act_ct net/mlx5e: Check return value of snprintf writing to fw_version buffer for representors net/mlx5e: Check return value of snprintf writing to fw_version buffer net/mlx5e: Reduce the size of icosq_str net/mlx5: Increase size of irq name buffer net/mlx5e: Update doorbell for port timestamping CQ before the software counter net/mlx5e: Track xmit submission to PTP WQ after populating metadata map net/mlx5e: Avoid referencing skb after free-ing in drop path of mlx5e_sq_xmit_wqe net/mlx5e: Don't modify the peer sent-to-vport rules for IPSec offload net/mlx5e: Fix pedit endianness net/mlx5e: fix double free of encap_header in update funcs net/mlx5e: fix double free of encap_header net/mlx5: Decouple PHC .adjtime and .adjphase implementations net/mlx5: DR, Allow old devices to use multi destination FTE net/mlx5: Free used cpus mask when an IRQ is released Revert "net/mlx5: DR, Supporting inline WQE when possible" bpf: Do not allocate percpu memory at init stage net: Fix undefined behavior in netdev name allocation dt-bindings: net: ethernet-controller: Fix formatting error ...
This commit is contained in:
commit
7475e51b87
@ -275,12 +275,12 @@ allOf:
|
||||
properties:
|
||||
rx-internal-delay-ps:
|
||||
description:
|
||||
RGMII Receive Clock Delay defined in pico seconds.This is used for
|
||||
RGMII Receive Clock Delay defined in pico seconds. This is used for
|
||||
controllers that have configurable RX internal delays. If this
|
||||
property is present then the MAC applies the RX delay.
|
||||
tx-internal-delay-ps:
|
||||
description:
|
||||
RGMII Transmit Clock Delay defined in pico seconds.This is used for
|
||||
RGMII Transmit Clock Delay defined in pico seconds. This is used for
|
||||
controllers that have configurable TX internal delays. If this
|
||||
property is present then the MAC applies the TX delay.
|
||||
|
||||
|
13
MAINTAINERS
13
MAINTAINERS
@ -21768,7 +21768,9 @@ F: Documentation/devicetree/bindings/counter/ti-eqep.yaml
|
||||
F: drivers/counter/ti-eqep.c
|
||||
|
||||
TI ETHERNET SWITCH DRIVER (CPSW)
|
||||
R: Grygorii Strashko <grygorii.strashko@ti.com>
|
||||
R: Siddharth Vadapalli <s-vadapalli@ti.com>
|
||||
R: Ravi Gunasekaran <r-gunasekaran@ti.com>
|
||||
R: Roger Quadros <rogerq@kernel.org>
|
||||
L: linux-omap@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -21792,6 +21794,15 @@ F: Documentation/devicetree/bindings/media/i2c/ti,ds90*
|
||||
F: drivers/media/i2c/ds90*
|
||||
F: include/media/i2c/ds90*
|
||||
|
||||
TI ICSSG ETHERNET DRIVER (ICSSG)
|
||||
R: MD Danish Anwar <danishanwar@ti.com>
|
||||
R: Roger Quadros <rogerq@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/ti,icss*.yaml
|
||||
F: drivers/net/ethernet/ti/icssg/*
|
||||
|
||||
TI J721E CSI2RX DRIVER
|
||||
M: Jai Luthra <j-luthra@ti.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
|
@ -1500,6 +1500,10 @@ done:
|
||||
static void bond_setup_by_slave(struct net_device *bond_dev,
|
||||
struct net_device *slave_dev)
|
||||
{
|
||||
bool was_up = !!(bond_dev->flags & IFF_UP);
|
||||
|
||||
dev_close(bond_dev);
|
||||
|
||||
bond_dev->header_ops = slave_dev->header_ops;
|
||||
|
||||
bond_dev->type = slave_dev->type;
|
||||
@ -1514,6 +1518,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
|
||||
bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
|
||||
bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
|
||||
}
|
||||
if (was_up)
|
||||
dev_open(bond_dev, NULL);
|
||||
}
|
||||
|
||||
/* On bonding slaves other than the currently active slave, suppress
|
||||
|
@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
|
||||
}
|
||||
|
||||
queue_work(pdsc->wq, &qcq->work);
|
||||
pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
|
||||
pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -15,7 +15,7 @@
|
||||
#define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
|
||||
|
||||
#define PDSC_WATCHDOG_SECS 5
|
||||
#define PDSC_QUEUE_NAME_MAX_SZ 32
|
||||
#define PDSC_QUEUE_NAME_MAX_SZ 16
|
||||
#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
|
||||
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
|
||||
#define PDSC_TEARDOWN_RECOVERY false
|
||||
|
@ -261,10 +261,14 @@ static int pdsc_identify(struct pdsc *pdsc)
|
||||
struct pds_core_drv_identity drv = {};
|
||||
size_t sz;
|
||||
int err;
|
||||
int n;
|
||||
|
||||
drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
|
||||
snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
|
||||
"%s %s", PDS_CORE_DRV_NAME, utsname()->release);
|
||||
/* Catching the return quiets a Wformat-truncation complaint */
|
||||
n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
|
||||
"%s %s", PDS_CORE_DRV_NAME, utsname()->release);
|
||||
if (n > sizeof(drv.driver_ver_str))
|
||||
dev_dbg(pdsc->dev, "release name truncated, don't care\n");
|
||||
|
||||
/* Next let's get some info about the device
|
||||
* We use the devcmd_lock at this level in order to
|
||||
|
@ -104,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
|
||||
struct pds_core_fw_list_info fw_list;
|
||||
struct pdsc *pdsc = devlink_priv(dl);
|
||||
union pds_core_dev_comp comp;
|
||||
char buf[16];
|
||||
char buf[32];
|
||||
int listlen;
|
||||
int err;
|
||||
int i;
|
||||
|
@ -6889,7 +6889,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
|
||||
desc_idx, *post_ptr);
|
||||
drop_it_no_recycle:
|
||||
/* Other statistics kept track of by card. */
|
||||
tp->rx_dropped++;
|
||||
tnapi->rx_dropped++;
|
||||
goto next_pkt;
|
||||
}
|
||||
|
||||
@ -7918,8 +7918,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
|
||||
|
||||
segs = skb_gso_segment(skb, tp->dev->features &
|
||||
~(NETIF_F_TSO | NETIF_F_TSO6));
|
||||
if (IS_ERR(segs) || !segs)
|
||||
if (IS_ERR(segs) || !segs) {
|
||||
tnapi->tx_dropped++;
|
||||
goto tg3_tso_bug_end;
|
||||
}
|
||||
|
||||
skb_list_walk_safe(segs, seg, next) {
|
||||
skb_mark_not_on_list(seg);
|
||||
@ -8190,7 +8192,7 @@ dma_error:
|
||||
drop:
|
||||
dev_kfree_skb_any(skb);
|
||||
drop_nofree:
|
||||
tp->tx_dropped++;
|
||||
tnapi->tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
@ -9405,7 +9407,7 @@ static void __tg3_set_rx_mode(struct net_device *);
|
||||
/* tp->lock is held. */
|
||||
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
|
||||
{
|
||||
int err;
|
||||
int err, i;
|
||||
|
||||
tg3_stop_fw(tp);
|
||||
|
||||
@ -9426,6 +9428,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
|
||||
|
||||
/* And make sure the next sample is new data */
|
||||
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
|
||||
|
||||
for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
|
||||
struct tg3_napi *tnapi = &tp->napi[i];
|
||||
|
||||
tnapi->rx_dropped = 0;
|
||||
tnapi->tx_dropped = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -11975,6 +11984,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
|
||||
struct tg3_hw_stats *hw_stats = tp->hw_stats;
|
||||
unsigned long rx_dropped;
|
||||
unsigned long tx_dropped;
|
||||
int i;
|
||||
|
||||
stats->rx_packets = old_stats->rx_packets +
|
||||
get_stat64(&hw_stats->rx_ucast_packets) +
|
||||
@ -12021,8 +12033,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
|
||||
stats->rx_missed_errors = old_stats->rx_missed_errors +
|
||||
get_stat64(&hw_stats->rx_discards);
|
||||
|
||||
stats->rx_dropped = tp->rx_dropped;
|
||||
stats->tx_dropped = tp->tx_dropped;
|
||||
/* Aggregate per-queue counters. The per-queue counters are updated
|
||||
* by a single writer, race-free. The result computed by this loop
|
||||
* might not be 100% accurate (counters can be updated in the middle of
|
||||
* the loop) but the next tg3_get_nstats() will recompute the current
|
||||
* value so it is acceptable.
|
||||
*
|
||||
* Note that these counters wrap around at 4G on 32bit machines.
|
||||
*/
|
||||
rx_dropped = (unsigned long)(old_stats->rx_dropped);
|
||||
tx_dropped = (unsigned long)(old_stats->tx_dropped);
|
||||
|
||||
for (i = 0; i < tp->irq_cnt; i++) {
|
||||
struct tg3_napi *tnapi = &tp->napi[i];
|
||||
|
||||
rx_dropped += tnapi->rx_dropped;
|
||||
tx_dropped += tnapi->tx_dropped;
|
||||
}
|
||||
|
||||
stats->rx_dropped = rx_dropped;
|
||||
stats->tx_dropped = tx_dropped;
|
||||
}
|
||||
|
||||
static int tg3_get_regs_len(struct net_device *dev)
|
||||
|
@ -3018,6 +3018,7 @@ struct tg3_napi {
|
||||
u16 *rx_rcb_prod_idx;
|
||||
struct tg3_rx_prodring_set prodring;
|
||||
struct tg3_rx_buffer_desc *rx_rcb;
|
||||
unsigned long rx_dropped;
|
||||
|
||||
u32 tx_prod ____cacheline_aligned;
|
||||
u32 tx_cons;
|
||||
@ -3026,6 +3027,7 @@ struct tg3_napi {
|
||||
u32 prodmbox;
|
||||
struct tg3_tx_buffer_desc *tx_ring;
|
||||
struct tg3_tx_ring_info *tx_buffers;
|
||||
unsigned long tx_dropped;
|
||||
|
||||
dma_addr_t status_mapping;
|
||||
dma_addr_t rx_rcb_mapping;
|
||||
@ -3220,8 +3222,6 @@ struct tg3 {
|
||||
|
||||
|
||||
/* begin "everything else" cacheline(s) section */
|
||||
unsigned long rx_dropped;
|
||||
unsigned long tx_dropped;
|
||||
struct rtnl_link_stats64 net_stats_prev;
|
||||
struct tg3_ethtool_stats estats_prev;
|
||||
|
||||
|
@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
|
||||
.val = CONFIG0_MAXLEN_1536,
|
||||
},
|
||||
{
|
||||
.max_l3_len = 1542,
|
||||
.val = CONFIG0_MAXLEN_1542,
|
||||
.max_l3_len = 1548,
|
||||
.val = CONFIG0_MAXLEN_1548,
|
||||
},
|
||||
{
|
||||
.max_l3_len = 9212,
|
||||
@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
||||
dma_addr_t mapping;
|
||||
unsigned short mtu;
|
||||
void *buffer;
|
||||
int ret;
|
||||
|
||||
mtu = ETH_HLEN;
|
||||
mtu += netdev->mtu;
|
||||
@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
||||
word3 |= mtu;
|
||||
}
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_NONE) {
|
||||
if (skb->len >= ETH_FRAME_LEN) {
|
||||
/* Hardware offloaded checksumming isn't working on frames
|
||||
* bigger than 1514 bytes. A hypothesis about this is that the
|
||||
* checksum buffer is only 1518 bytes, so when the frames get
|
||||
* bigger they get truncated, or the last few bytes get
|
||||
* overwritten by the FCS.
|
||||
*
|
||||
* Just use software checksumming and bypass on bigger frames.
|
||||
*/
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
ret = skb_checksum_help(skb);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
word1 |= TSS_BYPASS_BIT;
|
||||
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
int tcp = 0;
|
||||
|
||||
/* We do not switch off the checksumming on non TCP/UDP
|
||||
* frames: as is shown from tests, the checksumming engine
|
||||
* is smart enough to see that a frame is not actually TCP
|
||||
* or UDP and then just pass it through without any changes
|
||||
* to the frame.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
word1 |= TSS_IP_CHKSUM_BIT;
|
||||
tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
|
||||
@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static netdev_features_t gmac_fix_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
|
||||
features &= ~GMAC_OFFLOAD_FEATURES;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static int gmac_set_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = {
|
||||
.ndo_set_mac_address = gmac_set_mac_address,
|
||||
.ndo_get_stats64 = gmac_get_stats64,
|
||||
.ndo_change_mtu = gmac_change_mtu,
|
||||
.ndo_fix_features = gmac_fix_features,
|
||||
.ndo_set_features = gmac_set_features,
|
||||
};
|
||||
|
||||
@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
|
||||
|
||||
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
|
||||
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
|
||||
/* We can handle jumbo frames up to 10236 bytes so, let's accept
|
||||
* payloads of 10236 bytes minus VLAN and ethernet header
|
||||
/* We can receive jumbo frames up to 10236 bytes but only
|
||||
* transmit 2047 bytes so, let's accept payloads of 2047
|
||||
* bytes minus VLAN and ethernet header
|
||||
*/
|
||||
netdev->min_mtu = ETH_MIN_MTU;
|
||||
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
|
||||
netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
|
||||
|
||||
port->freeq_refill = 0;
|
||||
netif_napi_add(netdev, &port->napi, gmac_napi_poll);
|
||||
|
@ -502,7 +502,7 @@ union gmac_txdesc_3 {
|
||||
#define SOF_BIT 0x80000000
|
||||
#define EOF_BIT 0x40000000
|
||||
#define EOFIE_BIT BIT(29)
|
||||
#define MTU_SIZE_BIT_MASK 0x1fff
|
||||
#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
|
||||
|
||||
/* GMAC Tx Descriptor */
|
||||
struct gmac_txdesc {
|
||||
@ -787,7 +787,7 @@ union gmac_config0 {
|
||||
#define CONFIG0_MAXLEN_1536 0
|
||||
#define CONFIG0_MAXLEN_1518 1
|
||||
#define CONFIG0_MAXLEN_1522 2
|
||||
#define CONFIG0_MAXLEN_1542 3
|
||||
#define CONFIG0_MAXLEN_1548 3
|
||||
#define CONFIG0_MAXLEN_9k 4 /* 9212 */
|
||||
#define CONFIG0_MAXLEN_10k 5 /* 10236 */
|
||||
#define CONFIG0_MAXLEN_1518__6 6
|
||||
|
@ -254,10 +254,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
|
||||
if (block->tx) {
|
||||
if (block->tx->q_num < priv->tx_cfg.num_queues)
|
||||
reschedule |= gve_tx_poll(block, budget);
|
||||
else
|
||||
else if (budget)
|
||||
reschedule |= gve_xdp_poll(block, budget);
|
||||
}
|
||||
|
||||
if (!budget)
|
||||
return 0;
|
||||
|
||||
if (block->rx) {
|
||||
work_done = gve_rx_poll(block, budget);
|
||||
reschedule |= work_done == budget;
|
||||
@ -298,6 +301,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
|
||||
if (block->tx)
|
||||
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
|
||||
|
||||
if (!budget)
|
||||
return 0;
|
||||
|
||||
if (block->rx) {
|
||||
work_done = gve_rx_poll_dqo(block, budget);
|
||||
reschedule |= work_done == budget;
|
||||
|
@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget)
|
||||
|
||||
feat = block->napi.dev->features;
|
||||
|
||||
/* If budget is 0, do all the work */
|
||||
if (budget == 0)
|
||||
budget = INT_MAX;
|
||||
|
||||
if (budget > 0)
|
||||
work_done = gve_clean_rx_done(rx, budget, feat);
|
||||
|
||||
|
@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
|
||||
bool repoll;
|
||||
u32 to_do;
|
||||
|
||||
/* If budget is 0, do all the work */
|
||||
if (budget == 0)
|
||||
budget = INT_MAX;
|
||||
|
||||
/* Find out how much work there is to be done */
|
||||
nic_done = gve_tx_load_event_counter(priv, tx);
|
||||
to_do = min_t(u32, (nic_done - tx->done), budget);
|
||||
|
@ -503,11 +503,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
|
||||
}
|
||||
|
||||
sprintf(result[j++], "%d", i);
|
||||
sprintf(result[j++], "%s", dim_state_str[dim->state]);
|
||||
sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
|
||||
dim_state_str[dim->state] : "unknown");
|
||||
sprintf(result[j++], "%u", dim->profile_ix);
|
||||
sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
|
||||
sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
|
||||
dim_cqe_mode_str[dim->mode] : "unknown");
|
||||
sprintf(result[j++], "%s",
|
||||
dim_tune_stat_str[dim->tune_state]);
|
||||
dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
|
||||
dim_tune_stat_str[dim->tune_state] : "unknown");
|
||||
sprintf(result[j++], "%u", dim->steps_left);
|
||||
sprintf(result[j++], "%u", dim->steps_right);
|
||||
sprintf(result[j++], "%u", dim->tired);
|
||||
|
@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
u8 mac_addr_temp[ETH_ALEN];
|
||||
u8 mac_addr_temp[ETH_ALEN] = {0};
|
||||
int ret = 0;
|
||||
|
||||
if (h->ae_algo->ops->get_mac_addr)
|
||||
|
@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
|
||||
static void hclge_update_fec_stats(struct hclge_dev *hdev);
|
||||
static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
|
||||
int wait_cnt);
|
||||
static int hclge_update_port_info(struct hclge_dev *hdev);
|
||||
|
||||
static struct hnae3_ae_algo ae_algo;
|
||||
|
||||
@ -3041,6 +3042,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
|
||||
|
||||
if (state != hdev->hw.mac.link) {
|
||||
hdev->hw.mac.link = state;
|
||||
if (state == HCLGE_LINK_STATUS_UP)
|
||||
hclge_update_port_info(hdev);
|
||||
|
||||
client->ops->link_status_change(handle, state);
|
||||
hclge_config_mac_tnl_int(hdev, state);
|
||||
if (rclient && rclient->ops->link_status_change)
|
||||
@ -10025,8 +10029,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
|
||||
struct hclge_vport_vlan_cfg *vlan, *tmp;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
|
||||
mutex_lock(&hdev->vport_lock);
|
||||
|
||||
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
|
||||
if (vlan->vlan_id == vlan_id) {
|
||||
if (is_write_tbl && vlan->hd_tbl_status)
|
||||
@ -10041,8 +10043,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&hdev->vport_lock);
|
||||
}
|
||||
|
||||
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
|
||||
@ -10451,11 +10451,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
|
||||
* handle mailbox. Just record the vlan id, and remove it after
|
||||
* reset finished.
|
||||
*/
|
||||
mutex_lock(&hdev->vport_lock);
|
||||
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
|
||||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
|
||||
set_bit(vlan_id, vport->vlan_del_fail_bmap);
|
||||
mutex_unlock(&hdev->vport_lock);
|
||||
return -EBUSY;
|
||||
} else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
|
||||
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
|
||||
}
|
||||
mutex_unlock(&hdev->vport_lock);
|
||||
|
||||
/* when port base vlan enabled, we use port base vlan as the vlan
|
||||
* filter entry. In this case, we don't update vlan filter table
|
||||
@ -10470,17 +10475,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
if (!is_kill)
|
||||
if (!is_kill) {
|
||||
hclge_add_vport_vlan_table(vport, vlan_id,
|
||||
writen_to_tbl);
|
||||
else if (is_kill && vlan_id != 0)
|
||||
} else if (is_kill && vlan_id != 0) {
|
||||
mutex_lock(&hdev->vport_lock);
|
||||
hclge_rm_vport_vlan_table(vport, vlan_id, false);
|
||||
mutex_unlock(&hdev->vport_lock);
|
||||
}
|
||||
} else if (is_kill) {
|
||||
/* when remove hw vlan filter failed, record the vlan id,
|
||||
* and try to remove it from hw later, to be consistence
|
||||
* with stack
|
||||
*/
|
||||
mutex_lock(&hdev->vport_lock);
|
||||
set_bit(vlan_id, vport->vlan_del_fail_bmap);
|
||||
mutex_unlock(&hdev->vport_lock);
|
||||
}
|
||||
|
||||
hclge_set_vport_vlan_fltr_change(vport);
|
||||
@ -10520,6 +10530,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
|
||||
int i, ret, sync_cnt = 0;
|
||||
u16 vlan_id;
|
||||
|
||||
mutex_lock(&hdev->vport_lock);
|
||||
/* start from vport 1 for PF is always alive */
|
||||
for (i = 0; i < hdev->num_alloc_vport; i++) {
|
||||
struct hclge_vport *vport = &hdev->vport[i];
|
||||
@ -10530,21 +10541,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
|
||||
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
|
||||
vport->vport_id, vlan_id,
|
||||
true);
|
||||
if (ret && ret != -EINVAL)
|
||||
if (ret && ret != -EINVAL) {
|
||||
mutex_unlock(&hdev->vport_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
|
||||
hclge_rm_vport_vlan_table(vport, vlan_id, false);
|
||||
hclge_set_vport_vlan_fltr_change(vport);
|
||||
|
||||
sync_cnt++;
|
||||
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
|
||||
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
|
||||
mutex_unlock(&hdev->vport_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
|
||||
VLAN_N_VID);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&hdev->vport_lock);
|
||||
|
||||
hclge_sync_vlan_fltr_state(hdev);
|
||||
}
|
||||
@ -11651,6 +11667,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
goto err_msi_irq_uninit;
|
||||
|
||||
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
|
||||
clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
|
||||
if (hnae3_dev_phy_imp_supported(hdev))
|
||||
ret = hclge_update_tp_port_info(hdev);
|
||||
else
|
||||
|
@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
|
||||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
|
||||
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
|
||||
return -EBUSY;
|
||||
} else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
|
||||
clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
|
||||
}
|
||||
|
||||
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
|
||||
@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
|
||||
int ret, sync_cnt = 0;
|
||||
u16 vlan_id;
|
||||
|
||||
if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
|
||||
return;
|
||||
|
||||
rtnl_lock();
|
||||
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
|
||||
while (vlan_id != VLAN_N_VID) {
|
||||
ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
|
||||
vlan_id, true);
|
||||
if (ret)
|
||||
return;
|
||||
break;
|
||||
|
||||
clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
|
||||
sync_cnt++;
|
||||
if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
|
||||
return;
|
||||
break;
|
||||
|
||||
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
|
||||
@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
|
||||
return HCLGEVF_VECTOR0_EVENT_OTHER;
|
||||
}
|
||||
|
||||
static void hclgevf_reset_timer(struct timer_list *t)
|
||||
{
|
||||
struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
|
||||
|
||||
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
}
|
||||
|
||||
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
|
||||
{
|
||||
#define HCLGEVF_RESET_DELAY 5
|
||||
|
||||
enum hclgevf_evt_cause event_cause;
|
||||
struct hclgevf_dev *hdev = data;
|
||||
u32 clearval;
|
||||
@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
|
||||
|
||||
switch (event_cause) {
|
||||
case HCLGEVF_VECTOR0_EVENT_RST:
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
mod_timer(&hdev->reset_timer,
|
||||
jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
|
||||
break;
|
||||
case HCLGEVF_VECTOR0_EVENT_MBX:
|
||||
hclgevf_mbx_handler(hdev);
|
||||
@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
|
||||
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -219,6 +219,7 @@ struct hclgevf_dev {
|
||||
enum hnae3_reset_type reset_level;
|
||||
unsigned long reset_pending;
|
||||
enum hnae3_reset_type reset_type;
|
||||
struct timer_list reset_timer;
|
||||
|
||||
#define HCLGEVF_RESET_REQUESTED 0
|
||||
#define HCLGEVF_RESET_PENDING 1
|
||||
|
@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
|
||||
i++;
|
||||
}
|
||||
|
||||
/* ensure additional_info will be seen after received_resp */
|
||||
smp_rmb();
|
||||
|
||||
if (i >= HCLGEVF_MAX_TRY_TIMES) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
|
||||
@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
|
||||
resp->resp_status = hclgevf_resp_to_errno(resp_status);
|
||||
memcpy(resp->additional_info, req->msg.resp_data,
|
||||
HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
|
||||
|
||||
/* ensure additional_info will be seen before setting received_resp */
|
||||
smp_wmb();
|
||||
|
||||
if (match_id) {
|
||||
/* If match_id is not zero, it means PF support match_id.
|
||||
* if the match_id is right, VF get the right response, or
|
||||
|
@ -1479,14 +1479,14 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_download_pkg
|
||||
* ice_download_pkg_with_sig_seg
|
||||
* @hw: pointer to the hardware structure
|
||||
* @pkg_hdr: pointer to package header
|
||||
*
|
||||
* Handles the download of a complete package.
|
||||
*/
|
||||
static enum ice_ddp_state
|
||||
ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
|
||||
ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
|
||||
{
|
||||
enum ice_aq_err aq_err = hw->adminq.sq_last_status;
|
||||
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
|
||||
@ -1519,6 +1519,103 @@ ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
|
||||
state = ice_post_dwnld_pkg_actions(hw);
|
||||
|
||||
ice_release_global_cfg_lock(hw);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dwnld_cfg_bufs
|
||||
* @hw: pointer to the hardware structure
|
||||
* @bufs: pointer to an array of buffers
|
||||
* @count: the number of buffers in the array
|
||||
*
|
||||
* Obtains global config lock and downloads the package configuration buffers
|
||||
* to the firmware.
|
||||
*/
|
||||
static enum ice_ddp_state
|
||||
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
|
||||
{
|
||||
enum ice_ddp_state state;
|
||||
struct ice_buf_hdr *bh;
|
||||
int status;
|
||||
|
||||
if (!bufs || !count)
|
||||
return ICE_DDP_PKG_ERR;
|
||||
|
||||
/* If the first buffer's first section has its metadata bit set
|
||||
* then there are no buffers to be downloaded, and the operation is
|
||||
* considered a success.
|
||||
*/
|
||||
bh = (struct ice_buf_hdr *)bufs;
|
||||
if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
|
||||
return ICE_DDP_PKG_SUCCESS;
|
||||
|
||||
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
|
||||
if (status) {
|
||||
if (status == -EALREADY)
|
||||
return ICE_DDP_PKG_ALREADY_LOADED;
|
||||
return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
|
||||
}
|
||||
|
||||
state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
|
||||
if (!state)
|
||||
state = ice_post_dwnld_pkg_actions(hw);
|
||||
|
||||
ice_release_global_cfg_lock(hw);
|
||||
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_download_pkg_without_sig_seg
|
||||
* @hw: pointer to the hardware structure
|
||||
* @ice_seg: pointer to the segment of the package to be downloaded
|
||||
*
|
||||
* Handles the download of a complete package without signature segment.
|
||||
*/
|
||||
static enum ice_ddp_state
|
||||
ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
|
||||
{
|
||||
struct ice_buf_table *ice_buf_tbl;
|
||||
|
||||
ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
|
||||
ice_seg->hdr.seg_format_ver.major,
|
||||
ice_seg->hdr.seg_format_ver.minor,
|
||||
ice_seg->hdr.seg_format_ver.update,
|
||||
ice_seg->hdr.seg_format_ver.draft);
|
||||
|
||||
ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
|
||||
le32_to_cpu(ice_seg->hdr.seg_type),
|
||||
le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
|
||||
|
||||
ice_buf_tbl = ice_find_buf_table(ice_seg);
|
||||
|
||||
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
|
||||
le32_to_cpu(ice_buf_tbl->buf_count));
|
||||
|
||||
return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
|
||||
le32_to_cpu(ice_buf_tbl->buf_count));
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_download_pkg
|
||||
* @hw: pointer to the hardware structure
|
||||
* @pkg_hdr: pointer to package header
|
||||
* @ice_seg: pointer to the segment of the package to be downloaded
|
||||
*
|
||||
* Handles the download of a complete package.
|
||||
*/
|
||||
static enum ice_ddp_state
|
||||
ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
|
||||
struct ice_seg *ice_seg)
|
||||
{
|
||||
enum ice_ddp_state state;
|
||||
|
||||
if (hw->pkg_has_signing_seg)
|
||||
state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
|
||||
else
|
||||
state = ice_download_pkg_without_sig_seg(hw, ice_seg);
|
||||
|
||||
ice_post_pkg_dwnld_vlan_mode_cfg(hw);
|
||||
|
||||
return state;
|
||||
@ -2083,7 +2180,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
|
||||
|
||||
/* initialize package hints and then download package */
|
||||
ice_init_pkg_hints(hw, seg);
|
||||
state = ice_download_pkg(hw, pkg);
|
||||
state = ice_download_pkg(hw, pkg, seg);
|
||||
if (state == ICE_DDP_PKG_ALREADY_LOADED) {
|
||||
ice_debug(hw, ICE_DBG_INIT,
|
||||
"package previously loaded - no work.\n");
|
||||
|
@ -815,12 +815,6 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
|
||||
struct ice_pf *pf = d->pf;
|
||||
int ret;
|
||||
|
||||
if (prio > ICE_DPLL_PRIO_MAX) {
|
||||
NL_SET_ERR_MSG_FMT(extack, "prio out of supported range 0-%d",
|
||||
ICE_DPLL_PRIO_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&pf->dplls.lock);
|
||||
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
|
||||
mutex_unlock(&pf->dplls.lock);
|
||||
@ -1756,6 +1750,7 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
|
||||
}
|
||||
d->pf = pf;
|
||||
if (cgu) {
|
||||
ice_dpll_update_state(pf, d, true);
|
||||
ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
|
||||
if (ret) {
|
||||
dpll_device_put(d->dpll);
|
||||
@ -1796,8 +1791,6 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
|
||||
struct ice_dplls *d = &pf->dplls;
|
||||
struct kthread_worker *kworker;
|
||||
|
||||
ice_dpll_update_state(pf, &d->eec, true);
|
||||
ice_dpll_update_state(pf, &d->pps, true);
|
||||
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
|
||||
kworker = kthread_create_worker(0, "ice-dplls-%s",
|
||||
dev_name(ice_pf_to_dev(pf)));
|
||||
@ -1830,6 +1823,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
|
||||
int num_pins, i, ret = -EINVAL;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
struct ice_dpll_pin *pins;
|
||||
unsigned long caps;
|
||||
u8 freq_supp_num;
|
||||
bool input;
|
||||
|
||||
@ -1849,6 +1843,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
|
||||
}
|
||||
|
||||
for (i = 0; i < num_pins; i++) {
|
||||
caps = 0;
|
||||
pins[i].idx = i;
|
||||
pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input);
|
||||
pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input);
|
||||
@ -1861,8 +1856,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
|
||||
&dp->input_prio[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
pins[i].prop.capabilities |=
|
||||
DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
|
||||
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
|
||||
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
|
||||
pins[i].prop.phase_range.min =
|
||||
pf->dplls.input_phase_adj_max;
|
||||
pins[i].prop.phase_range.max =
|
||||
@ -1872,9 +1867,11 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
|
||||
pf->dplls.output_phase_adj_max;
|
||||
pins[i].prop.phase_range.max =
|
||||
-pf->dplls.output_phase_adj_max;
|
||||
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
pins[i].prop.capabilities |=
|
||||
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
|
||||
pins[i].prop.capabilities = caps;
|
||||
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -6,7 +6,6 @@
|
||||
|
||||
#include "ice.h"
|
||||
|
||||
#define ICE_DPLL_PRIO_MAX 0xF
|
||||
#define ICE_DPLL_RCLK_NUM_MAX 4
|
||||
|
||||
/** ice_dpll_pin - store info about pins
|
||||
|
@ -3961,3 +3961,57 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_cgu_get_output_pin_state_caps - get output pin state capabilities
|
||||
* @hw: pointer to the hw struct
|
||||
* @pin_id: id of a pin
|
||||
* @caps: capabilities to modify
|
||||
*
|
||||
* Return:
|
||||
* * 0 - success, state capabilities were modified
|
||||
* * negative - failure, capabilities were not modified
|
||||
*/
|
||||
int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
|
||||
unsigned long *caps)
|
||||
{
|
||||
bool can_change = true;
|
||||
|
||||
switch (hw->device_id) {
|
||||
case ICE_DEV_ID_E810C_SFP:
|
||||
if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
|
||||
can_change = false;
|
||||
break;
|
||||
case ICE_DEV_ID_E810C_QSFP:
|
||||
if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
|
||||
can_change = false;
|
||||
break;
|
||||
case ICE_DEV_ID_E823L_10G_BASE_T:
|
||||
case ICE_DEV_ID_E823L_1GBE:
|
||||
case ICE_DEV_ID_E823L_BACKPLANE:
|
||||
case ICE_DEV_ID_E823L_QSFP:
|
||||
case ICE_DEV_ID_E823L_SFP:
|
||||
case ICE_DEV_ID_E823C_10G_BASE_T:
|
||||
case ICE_DEV_ID_E823C_BACKPLANE:
|
||||
case ICE_DEV_ID_E823C_QSFP:
|
||||
case ICE_DEV_ID_E823C_SFP:
|
||||
case ICE_DEV_ID_E823C_SGMII:
|
||||
if (hw->cgu_part_number ==
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
|
||||
pin_id == ZL_OUT2)
|
||||
can_change = false;
|
||||
else if (hw->cgu_part_number ==
|
||||
ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
|
||||
pin_id == SI_OUT1)
|
||||
can_change = false;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (can_change)
|
||||
*caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
|
||||
else
|
||||
*caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -282,6 +282,8 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
|
||||
int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
|
||||
|
||||
void ice_ptp_init_phy_model(struct ice_hw *hw);
|
||||
int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
|
||||
unsigned long *caps);
|
||||
|
||||
#define PFTSYN_SEM_BYTES 4
|
||||
|
||||
|
@ -4790,14 +4790,17 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
|
||||
u8 *data)
|
||||
{
|
||||
if (sset == ETH_SS_STATS) {
|
||||
struct mvneta_port *pp = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
|
||||
memcpy(data + i * ETH_GSTRING_LEN,
|
||||
mvneta_statistics[i].name, ETH_GSTRING_LEN);
|
||||
|
||||
data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
|
||||
page_pool_ethtool_stats_get_strings(data);
|
||||
if (!pp->bm_priv) {
|
||||
data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
|
||||
page_pool_ethtool_stats_get_strings(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
|
||||
struct page_pool_stats stats = {};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rxq_number; i++)
|
||||
page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
|
||||
for (i = 0; i < rxq_number; i++) {
|
||||
if (pp->rxqs[i].page_pool)
|
||||
page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
|
||||
}
|
||||
|
||||
page_pool_ethtool_stats_get(data, &stats);
|
||||
}
|
||||
@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
|
||||
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
|
||||
*data++ = pp->ethtool_stats[i];
|
||||
|
||||
mvneta_ethtool_pp_stats(pp, data);
|
||||
if (!pp->bm_priv)
|
||||
mvneta_ethtool_pp_stats(pp, data);
|
||||
}
|
||||
|
||||
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
|
||||
{
|
||||
if (sset == ETH_SS_STATS)
|
||||
return ARRAY_SIZE(mvneta_statistics) +
|
||||
page_pool_ethtool_stats_get_count();
|
||||
if (sset == ETH_SS_STATS) {
|
||||
int count = ARRAY_SIZE(mvneta_statistics);
|
||||
struct mvneta_port *pp = netdev_priv(dev);
|
||||
|
||||
if (!pp->bm_priv)
|
||||
count += page_pool_ethtool_stats_get_count();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -177,6 +177,8 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
|
||||
|
||||
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
|
||||
struct mlx5_cqe64 *cqe,
|
||||
u8 *md_buff,
|
||||
u8 *md_buff_sz,
|
||||
int budget)
|
||||
{
|
||||
struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
|
||||
@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
|
||||
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
|
||||
out:
|
||||
napi_consume_skb(skb, budget);
|
||||
mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
|
||||
md_buff[*md_buff_sz++] = metadata_id;
|
||||
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
|
||||
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
|
||||
}
|
||||
|
||||
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
|
||||
struct mlx5_cqwq *cqwq = &cq->wq;
|
||||
int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
|
||||
u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
|
||||
u8 metadata_buff_sz = 0;
|
||||
struct mlx5_cqwq *cqwq;
|
||||
struct mlx5_cqe64 *cqe;
|
||||
int work_done = 0;
|
||||
|
||||
cqwq = &cq->wq;
|
||||
|
||||
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
|
||||
return false;
|
||||
|
||||
@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
do {
|
||||
mlx5_cqwq_pop(cqwq);
|
||||
|
||||
mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
|
||||
mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
|
||||
metadata_buff, &metadata_buff_sz, napi_budget);
|
||||
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
|
||||
|
||||
mlx5_cqwq_update_db_record(cqwq);
|
||||
@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
/* ensure cq space is freed before enabling more cqes */
|
||||
wmb();
|
||||
|
||||
while (metadata_buff_sz > 0)
|
||||
mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
|
||||
metadata_buff[--metadata_buff_sz]);
|
||||
|
||||
mlx5e_txqsq_wake(&ptpsq->txqsq);
|
||||
|
||||
return work_done == budget;
|
||||
|
@ -492,11 +492,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
|
||||
|
||||
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
|
||||
{
|
||||
char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
|
||||
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
|
||||
struct mlx5e_icosq *icosq = rq->icosq;
|
||||
struct mlx5e_priv *priv = rq->priv;
|
||||
struct mlx5e_err_ctx err_ctx = {};
|
||||
char icosq_str[32] = {};
|
||||
|
||||
err_ctx.ctx = rq;
|
||||
err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
|
||||
@ -505,7 +505,7 @@ void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
|
||||
if (icosq)
|
||||
snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
|
||||
snprintf(err_str, sizeof(err_str),
|
||||
"RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x",
|
||||
"RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
|
||||
rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
|
||||
|
||||
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
|
||||
|
@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto free_encap;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
* and not used before that.
|
||||
*/
|
||||
goto release_neigh;
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto free_encap;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
* and not used before that.
|
||||
*/
|
||||
goto release_neigh;
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
|
@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
|
||||
struct ethtool_drvinfo *drvinfo)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int count;
|
||||
|
||||
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)",
|
||||
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
|
||||
mdev->board_id);
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
|
||||
strscpy(drvinfo->bus_info, dev_name(mdev->device),
|
||||
sizeof(drvinfo->bus_info));
|
||||
}
|
||||
|
@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int count;
|
||||
|
||||
strscpy(drvinfo->driver, mlx5e_rep_driver_name,
|
||||
sizeof(drvinfo->driver));
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)",
|
||||
fw_rev_maj(mdev), fw_rev_min(mdev),
|
||||
fw_rev_sub(mdev), mdev->board_id);
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
}
|
||||
|
||||
static const struct counter_desc sw_rep_stats_desc[] = {
|
||||
|
@ -3147,7 +3147,7 @@ static struct mlx5_fields fields[] = {
|
||||
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
|
||||
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
|
||||
OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
|
||||
OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
|
||||
|
||||
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
|
||||
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
|
||||
@ -3158,21 +3158,31 @@ static struct mlx5_fields fields[] = {
|
||||
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
|
||||
};
|
||||
|
||||
static unsigned long mask_to_le(unsigned long mask, int size)
|
||||
static u32 mask_field_get(void *mask, struct mlx5_fields *f)
|
||||
{
|
||||
__be32 mask_be32;
|
||||
__be16 mask_be16;
|
||||
|
||||
if (size == 32) {
|
||||
mask_be32 = (__force __be32)(mask);
|
||||
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
|
||||
} else if (size == 16) {
|
||||
mask_be32 = (__force __be32)(mask);
|
||||
mask_be16 = *(__be16 *)&mask_be32;
|
||||
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
|
||||
switch (f->field_bsize) {
|
||||
case 32:
|
||||
return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
|
||||
case 16:
|
||||
return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
|
||||
default:
|
||||
return *(u8 *)mask & (u8)f->field_mask;
|
||||
}
|
||||
}
|
||||
|
||||
return mask;
|
||||
static void mask_field_clear(void *mask, struct mlx5_fields *f)
|
||||
{
|
||||
switch (f->field_bsize) {
|
||||
case 32:
|
||||
*(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
|
||||
break;
|
||||
case 16:
|
||||
*(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
|
||||
break;
|
||||
default:
|
||||
*(u8 *)mask &= ~(u8)f->field_mask;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
@ -3184,11 +3194,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
|
||||
struct pedit_headers_action *hdrs = parse_attr->hdrs;
|
||||
void *headers_c, *headers_v, *action, *vals_p;
|
||||
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
|
||||
struct mlx5e_tc_mod_hdr_acts *mod_acts;
|
||||
unsigned long mask, field_mask;
|
||||
void *s_masks_p, *a_masks_p;
|
||||
int i, first, last, next_z;
|
||||
struct mlx5_fields *f;
|
||||
unsigned long mask;
|
||||
u32 s_mask, a_mask;
|
||||
u8 cmd;
|
||||
|
||||
mod_acts = &parse_attr->mod_hdr_acts;
|
||||
@ -3204,15 +3215,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
bool skip;
|
||||
|
||||
f = &fields[i];
|
||||
/* avoid seeing bits set from previous iterations */
|
||||
s_mask = 0;
|
||||
a_mask = 0;
|
||||
|
||||
s_masks_p = (void *)set_masks + f->offset;
|
||||
a_masks_p = (void *)add_masks + f->offset;
|
||||
|
||||
s_mask = *s_masks_p & f->field_mask;
|
||||
a_mask = *a_masks_p & f->field_mask;
|
||||
s_mask = mask_field_get(s_masks_p, f);
|
||||
a_mask = mask_field_get(a_masks_p, f);
|
||||
|
||||
if (!s_mask && !a_mask) /* nothing to offload here */
|
||||
continue;
|
||||
@ -3239,22 +3246,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
match_mask, f->field_bsize))
|
||||
skip = true;
|
||||
/* clear to denote we consumed this field */
|
||||
*s_masks_p &= ~f->field_mask;
|
||||
mask_field_clear(s_masks_p, f);
|
||||
} else {
|
||||
cmd = MLX5_ACTION_TYPE_ADD;
|
||||
mask = a_mask;
|
||||
vals_p = (void *)add_vals + f->offset;
|
||||
/* add 0 is no change */
|
||||
if ((*(u32 *)vals_p & f->field_mask) == 0)
|
||||
if (!mask_field_get(vals_p, f))
|
||||
skip = true;
|
||||
/* clear to denote we consumed this field */
|
||||
*a_masks_p &= ~f->field_mask;
|
||||
mask_field_clear(a_masks_p, f);
|
||||
}
|
||||
if (skip)
|
||||
continue;
|
||||
|
||||
mask = mask_to_le(mask, f->field_bsize);
|
||||
|
||||
first = find_first_bit(&mask, f->field_bsize);
|
||||
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
|
||||
last = find_last_bit(&mask, f->field_bsize);
|
||||
@ -3281,10 +3286,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
|
||||
MLX5_SET(set_action_in, action, field, f->field);
|
||||
|
||||
if (cmd == MLX5_ACTION_TYPE_SET) {
|
||||
unsigned long field_mask = f->field_mask;
|
||||
int start;
|
||||
|
||||
field_mask = mask_to_le(f->field_mask, f->field_bsize);
|
||||
|
||||
/* if field is bit sized it can start not from first bit */
|
||||
start = find_first_bit(&field_mask, f->field_bsize);
|
||||
|
||||
|
@ -399,9 +399,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
|
||||
|
||||
mlx5e_skb_cb_hwtstamp_init(skb);
|
||||
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
|
||||
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
|
||||
metadata_index);
|
||||
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
|
||||
if (!netif_tx_queue_stopped(sq->txq) &&
|
||||
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
|
||||
netif_tx_stop_queue(sq->txq);
|
||||
@ -494,10 +494,10 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
|
||||
err_drop:
|
||||
stats->dropped++;
|
||||
dev_kfree_skb_any(skb);
|
||||
if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
|
||||
mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
|
||||
be32_to_cpu(eseg->flow_table_metadata));
|
||||
dev_kfree_skb_any(skb);
|
||||
mlx5e_tx_flush(sq);
|
||||
}
|
||||
|
||||
|
@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
struct mlx5_irq *irq;
|
||||
int cpu;
|
||||
|
||||
irq = xa_load(&table->comp_irqs, vecidx);
|
||||
if (!irq)
|
||||
return;
|
||||
|
||||
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
|
||||
cpumask_clear_cpu(cpu, &table->used_cpus);
|
||||
xa_erase(&table->comp_irqs, vecidx);
|
||||
mlx5_irq_affinity_irq_release(dev, irq);
|
||||
}
|
||||
@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
|
||||
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
|
||||
{
|
||||
struct mlx5_eq_table *table = dev->priv.eq_table;
|
||||
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
|
||||
struct irq_affinity_desc af_desc = {};
|
||||
struct mlx5_irq *irq;
|
||||
|
||||
irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
|
||||
if (IS_ERR(irq)) {
|
||||
/* In case SF irq pool does not exist, fallback to the PF irqs*/
|
||||
if (PTR_ERR(irq) == -ENOENT)
|
||||
return comp_irq_request_pci(dev, vecidx);
|
||||
/* In case SF irq pool does not exist, fallback to the PF irqs*/
|
||||
if (!mlx5_irq_pool_is_sf_pool(pool))
|
||||
return comp_irq_request_pci(dev, vecidx);
|
||||
|
||||
af_desc.is_managed = 1;
|
||||
cpumask_copy(&af_desc.mask, cpu_online_mask);
|
||||
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
|
||||
irq = mlx5_irq_affinity_request(pool, &af_desc);
|
||||
if (IS_ERR(irq))
|
||||
return PTR_ERR(irq);
|
||||
}
|
||||
|
||||
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
|
||||
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
||||
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
|
||||
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
|
||||
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
|
||||
|
||||
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
|
||||
}
|
||||
|
@ -984,7 +984,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
|
||||
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
|
||||
if (rep->vport == MLX5_VPORT_UPLINK &&
|
||||
on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
|
||||
dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
|
||||
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
|
||||
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
|
||||
|
@ -168,45 +168,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
|
||||
if (pool->irqs_per_cpu)
|
||||
cpu_put(pool, cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
|
||||
* @dev: mlx5 device that is requesting the IRQ.
|
||||
* @used_cpus: cpumask of bounded cpus by the device
|
||||
* @vecidx: vector index to request an IRQ for.
|
||||
*
|
||||
* Each IRQ is bounded to at most 1 CPU.
|
||||
* This function is requesting an IRQ according to the default assignment.
|
||||
* The default assignment policy is:
|
||||
* - request the least loaded IRQ which is not bound to any
|
||||
* CPU of the previous IRQs requested.
|
||||
*
|
||||
* On success, this function updates used_cpus mask and returns an irq pointer.
|
||||
* In case of an error, an appropriate error pointer is returned.
|
||||
*/
|
||||
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
|
||||
struct cpumask *used_cpus, u16 vecidx)
|
||||
{
|
||||
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
|
||||
struct irq_affinity_desc af_desc = {};
|
||||
struct mlx5_irq *irq;
|
||||
|
||||
if (!mlx5_irq_pool_is_sf_pool(pool))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
af_desc.is_managed = 1;
|
||||
cpumask_copy(&af_desc.mask, cpu_online_mask);
|
||||
cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
|
||||
irq = mlx5_irq_affinity_request(pool, &af_desc);
|
||||
|
||||
if (IS_ERR(irq))
|
||||
return irq;
|
||||
|
||||
cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
|
||||
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
|
||||
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
|
||||
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
|
||||
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
|
||||
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
|
||||
{
|
||||
return mlx5_ptp_adjtime(ptp, delta);
|
||||
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
|
||||
struct mlx5_core_dev *mdev;
|
||||
|
||||
mdev = container_of(clock, struct mlx5_core_dev, clock);
|
||||
|
||||
return mlx5_ptp_adjtime_real_time(mdev, delta);
|
||||
}
|
||||
|
||||
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
|
||||
|
@ -28,7 +28,7 @@
|
||||
struct mlx5_irq {
|
||||
struct atomic_notifier_head nh;
|
||||
cpumask_var_t mask;
|
||||
char name[MLX5_MAX_IRQ_NAME];
|
||||
char name[MLX5_MAX_IRQ_FORMATTED_NAME];
|
||||
struct mlx5_irq_pool *pool;
|
||||
int refcount;
|
||||
struct msi_map map;
|
||||
@ -292,8 +292,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
|
||||
else
|
||||
irq_sf_set_name(pool, name, i);
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
|
||||
snprintf(irq->name, MLX5_MAX_IRQ_NAME,
|
||||
"%s@pci:%s", name, pci_name(dev->pdev));
|
||||
snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
|
||||
MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
|
||||
err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
|
||||
&irq->nh);
|
||||
if (err) {
|
||||
|
@ -7,6 +7,9 @@
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#define MLX5_MAX_IRQ_NAME (32)
|
||||
#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
|
||||
#define MLX5_MAX_IRQ_FORMATTED_NAME \
|
||||
(MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
|
||||
/* max irq_index is 2047, so four chars */
|
||||
#define MLX5_MAX_IRQ_IDX_CHARS (4)
|
||||
#define MLX5_EQ_REFS_PER_IRQ (2)
|
||||
|
@ -57,7 +57,8 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
|
||||
|
||||
static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
|
||||
return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX ||
|
||||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
|
||||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,6 @@ struct dr_qp_init_attr {
|
||||
u32 cqn;
|
||||
u32 pdn;
|
||||
u32 max_send_wr;
|
||||
u32 max_send_sge;
|
||||
struct mlx5_uars_page *uar;
|
||||
u8 isolate_vl_tc:1;
|
||||
};
|
||||
@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
|
||||
return err == CQ_POLL_ERR ? err : npolled;
|
||||
}
|
||||
|
||||
static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
|
||||
{
|
||||
return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
|
||||
}
|
||||
|
||||
/* We calculate for specific RC QP with the required functionality */
|
||||
static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
|
||||
{
|
||||
int update_arg_size;
|
||||
int inl_size = 0;
|
||||
int tot_size;
|
||||
int size;
|
||||
|
||||
update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
|
||||
|
||||
size = sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg);
|
||||
inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
|
||||
DR_STE_SIZE, 16);
|
||||
|
||||
size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
|
||||
|
||||
size = max(size, update_arg_size);
|
||||
|
||||
tot_size = max(size, inl_size);
|
||||
|
||||
return ALIGN(tot_size, MLX5_SEND_WQE_BB);
|
||||
}
|
||||
|
||||
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
|
||||
struct dr_qp_init_attr *attr)
|
||||
{
|
||||
@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
|
||||
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
|
||||
struct mlx5_wq_param wqp;
|
||||
struct mlx5dr_qp *dr_qp;
|
||||
int wqe_size;
|
||||
int inlen;
|
||||
void *qpc;
|
||||
void *in;
|
||||
@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
|
||||
if (err)
|
||||
goto err_in;
|
||||
dr_qp->uar = attr->uar;
|
||||
wqe_size = dr_qp_calc_rc_send_wqe(attr);
|
||||
dr_qp->max_inline_data = min(wqe_size -
|
||||
(sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg) +
|
||||
sizeof(struct mlx5_wqe_inline_seg)),
|
||||
(2 * MLX5_SEND_WQE_BB -
|
||||
(sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg) +
|
||||
sizeof(struct mlx5_wqe_inline_seg))));
|
||||
|
||||
return dr_qp;
|
||||
|
||||
@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
|
||||
MLX5_SEND_WQE_DS;
|
||||
}
|
||||
|
||||
static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
|
||||
struct dr_data_seg *data_seg, void *wqe)
|
||||
{
|
||||
int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
|
||||
sizeof(struct mlx5_wqe_raddr_seg) +
|
||||
sizeof(struct mlx5_wqe_inline_seg);
|
||||
struct mlx5_wqe_inline_seg *seg;
|
||||
int left_space;
|
||||
int inl = 0;
|
||||
void *addr;
|
||||
int len;
|
||||
int idx;
|
||||
|
||||
seg = wqe;
|
||||
wqe += sizeof(*seg);
|
||||
addr = (void *)(unsigned long)(data_seg->addr);
|
||||
len = data_seg->length;
|
||||
inl += len;
|
||||
left_space = MLX5_SEND_WQE_BB - inline_header_size;
|
||||
|
||||
if (likely(len > left_space)) {
|
||||
memcpy(wqe, addr, left_space);
|
||||
len -= left_space;
|
||||
addr += left_space;
|
||||
idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
|
||||
wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
|
||||
}
|
||||
|
||||
memcpy(wqe, addr, len);
|
||||
|
||||
if (likely(inl)) {
|
||||
seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
|
||||
return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
|
||||
MLX5_SEND_WQE_DS);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
|
||||
struct mlx5_wqe_ctrl_seg *wq_ctrl,
|
||||
dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
|
||||
u64 remote_addr,
|
||||
u32 rkey,
|
||||
struct dr_data_seg *data_seg,
|
||||
@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
|
||||
wq_raddr->reserved = 0;
|
||||
|
||||
wq_dseg = (void *)(wq_raddr + 1);
|
||||
/* WQE ctrl segment + WQE remote addr segment */
|
||||
*size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
|
||||
|
||||
if (data_seg->send_flags & IB_SEND_INLINE) {
|
||||
*size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
|
||||
} else {
|
||||
wq_dseg->byte_count = cpu_to_be32(data_seg->length);
|
||||
wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
|
||||
wq_dseg->addr = cpu_to_be64(data_seg->addr);
|
||||
*size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
|
||||
}
|
||||
wq_dseg->byte_count = cpu_to_be32(data_seg->length);
|
||||
wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
|
||||
wq_dseg->addr = cpu_to_be64(data_seg->addr);
|
||||
|
||||
*size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
|
||||
sizeof(*wq_dseg) + /* WQE data segment */
|
||||
sizeof(*wq_raddr)) / /* WQE remote addr segment */
|
||||
MLX5_SEND_WQE_DS;
|
||||
}
|
||||
|
||||
static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
|
||||
@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
|
||||
switch (opcode) {
|
||||
case MLX5_OPCODE_RDMA_READ:
|
||||
case MLX5_OPCODE_RDMA_WRITE:
|
||||
dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
|
||||
dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
|
||||
rkey, data_seg, &size);
|
||||
break;
|
||||
case MLX5_OPCODE_FLOW_TBL_ACCESS:
|
||||
@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
|
||||
if (send_ring->pending_wqe % send_ring->signal_th == 0)
|
||||
send_info->write.send_flags |= IB_SEND_SIGNALED;
|
||||
else
|
||||
send_info->write.send_flags &= ~IB_SEND_SIGNALED;
|
||||
send_info->write.send_flags = 0;
|
||||
}
|
||||
|
||||
static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
|
||||
@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
|
||||
}
|
||||
|
||||
send_ring->pending_wqe++;
|
||||
if (!send_info->write.lkey)
|
||||
send_info->write.send_flags |= IB_SEND_INLINE;
|
||||
|
||||
if (send_ring->pending_wqe % send_ring->signal_th == 0)
|
||||
send_info->write.send_flags |= IB_SEND_SIGNALED;
|
||||
else
|
||||
send_info->write.send_flags &= ~IB_SEND_SIGNALED;
|
||||
|
||||
send_ring->pending_wqe++;
|
||||
send_info->read.length = send_info->write.length;
|
||||
@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
|
||||
send_info->read.lkey = send_ring->sync_mr->mkey;
|
||||
|
||||
if (send_ring->pending_wqe % send_ring->signal_th == 0)
|
||||
send_info->read.send_flags |= IB_SEND_SIGNALED;
|
||||
send_info->read.send_flags = IB_SEND_SIGNALED;
|
||||
else
|
||||
send_info->read.send_flags &= ~IB_SEND_SIGNALED;
|
||||
send_info->read.send_flags = 0;
|
||||
}
|
||||
|
||||
static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
|
||||
@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
|
||||
dmn->send_ring->cq->qp = dmn->send_ring->qp;
|
||||
|
||||
dmn->info.max_send_wr = QUEUE_SIZE;
|
||||
init_attr.max_send_sge = 1;
|
||||
dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
|
||||
DR_STE_SIZE);
|
||||
|
||||
|
@ -624,6 +624,7 @@ struct rtl8169_private {
|
||||
|
||||
unsigned supports_gmii:1;
|
||||
unsigned aspm_manageable:1;
|
||||
unsigned dash_enabled:1;
|
||||
dma_addr_t counters_phys_addr;
|
||||
struct rtl8169_counters *counters;
|
||||
struct rtl8169_tc_offsets tc_offset;
|
||||
@ -1253,14 +1254,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
|
||||
return r8168ep_ocp_read(tp, 0x128) & BIT(0);
|
||||
}
|
||||
|
||||
static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
|
||||
static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
|
||||
{
|
||||
switch (tp->dash_type) {
|
||||
case RTL_DASH_DP:
|
||||
return r8168dp_check_dash(tp);
|
||||
case RTL_DASH_EP:
|
||||
return r8168ep_check_dash(tp);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
|
||||
{
|
||||
switch (tp->mac_version) {
|
||||
case RTL_GIGA_MAC_VER_28:
|
||||
case RTL_GIGA_MAC_VER_31:
|
||||
return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
|
||||
return RTL_DASH_DP;
|
||||
case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
|
||||
return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
|
||||
return RTL_DASH_EP;
|
||||
default:
|
||||
return RTL_DASH_NONE;
|
||||
}
|
||||
@ -1453,7 +1466,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
|
||||
|
||||
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
|
||||
|
||||
if (tp->dash_type == RTL_DASH_NONE) {
|
||||
if (!tp->dash_enabled) {
|
||||
rtl_set_d3_pll_down(tp, !wolopts);
|
||||
tp->dev->wol_enabled = wolopts ? 1 : 0;
|
||||
}
|
||||
@ -2512,7 +2525,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
|
||||
|
||||
static void rtl_prepare_power_down(struct rtl8169_private *tp)
|
||||
{
|
||||
if (tp->dash_type != RTL_DASH_NONE)
|
||||
if (tp->dash_enabled)
|
||||
return;
|
||||
|
||||
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
|
||||
@ -4648,10 +4661,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
|
||||
rtl8169_cleanup(tp);
|
||||
rtl_disable_exit_l1(tp);
|
||||
rtl_prepare_power_down(tp);
|
||||
|
||||
if (tp->dash_type != RTL_DASH_NONE)
|
||||
rtl8168_driver_stop(tp);
|
||||
}
|
||||
|
||||
static void rtl8169_up(struct rtl8169_private *tp)
|
||||
{
|
||||
if (tp->dash_type != RTL_DASH_NONE)
|
||||
rtl8168_driver_start(tp);
|
||||
|
||||
pci_set_master(tp->pci_dev);
|
||||
phy_init_hw(tp->phydev);
|
||||
phy_resume(tp->phydev);
|
||||
@ -4869,7 +4888,7 @@ static int rtl8169_runtime_idle(struct device *device)
|
||||
{
|
||||
struct rtl8169_private *tp = dev_get_drvdata(device);
|
||||
|
||||
if (tp->dash_type != RTL_DASH_NONE)
|
||||
if (tp->dash_enabled)
|
||||
return -EBUSY;
|
||||
|
||||
if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
|
||||
@ -4895,8 +4914,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
|
||||
/* Restore original MAC address */
|
||||
rtl_rar_set(tp, tp->dev->perm_addr);
|
||||
|
||||
if (system_state == SYSTEM_POWER_OFF &&
|
||||
tp->dash_type == RTL_DASH_NONE) {
|
||||
if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
|
||||
pci_wake_from_d3(pdev, tp->saved_wolopts);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
}
|
||||
@ -5254,7 +5272,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
|
||||
tp->aspm_manageable = !rc;
|
||||
|
||||
tp->dash_type = rtl_check_dash(tp);
|
||||
tp->dash_type = rtl_get_dash_type(tp);
|
||||
tp->dash_enabled = rtl_dash_is_enabled(tp);
|
||||
|
||||
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
|
||||
|
||||
@ -5325,7 +5344,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
/* configure chip for default features */
|
||||
rtl8169_set_features(dev, dev->features);
|
||||
|
||||
if (tp->dash_type == RTL_DASH_NONE) {
|
||||
if (!tp->dash_enabled) {
|
||||
rtl_set_d3_pll_down(tp, true);
|
||||
} else {
|
||||
rtl_set_d3_pll_down(tp, false);
|
||||
@ -5365,7 +5384,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
"ok" : "ko");
|
||||
|
||||
if (tp->dash_type != RTL_DASH_NONE) {
|
||||
netdev_info(dev, "DASH enabled\n");
|
||||
netdev_info(dev, "DASH %s\n",
|
||||
tp->dash_enabled ? "enabled" : "disabled");
|
||||
rtl8168_driver_start(tp);
|
||||
}
|
||||
|
||||
|
@ -5293,6 +5293,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
|
||||
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
|
||||
buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
|
||||
limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
|
||||
|
||||
if (netif_msg_rx_status(priv)) {
|
||||
void *rx_head;
|
||||
@ -5328,10 +5329,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
len = 0;
|
||||
}
|
||||
|
||||
read_again:
|
||||
if (count >= limit)
|
||||
break;
|
||||
|
||||
read_again:
|
||||
buf1_len = 0;
|
||||
buf2_len = 0;
|
||||
entry = next_entry;
|
||||
|
@ -2063,7 +2063,7 @@ static int prueth_probe(struct platform_device *pdev)
|
||||
&prueth->shram);
|
||||
if (ret) {
|
||||
dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
|
||||
pruss_put(prueth->pruss);
|
||||
goto put_pruss;
|
||||
}
|
||||
|
||||
prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
|
||||
@ -2105,10 +2105,7 @@ static int prueth_probe(struct platform_device *pdev)
|
||||
prueth->iep1 = icss_iep_get_idx(np, 1);
|
||||
if (IS_ERR(prueth->iep1)) {
|
||||
ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
|
||||
icss_iep_put(prueth->iep0);
|
||||
prueth->iep0 = NULL;
|
||||
prueth->iep1 = NULL;
|
||||
goto free_pool;
|
||||
goto put_iep0;
|
||||
}
|
||||
|
||||
if (prueth->pdata.quirk_10m_link_issue) {
|
||||
@ -2205,6 +2202,12 @@ netdev_exit:
|
||||
exit_iep:
|
||||
if (prueth->pdata.quirk_10m_link_issue)
|
||||
icss_iep_exit_fw(prueth->iep1);
|
||||
icss_iep_put(prueth->iep1);
|
||||
|
||||
put_iep0:
|
||||
icss_iep_put(prueth->iep0);
|
||||
prueth->iep0 = NULL;
|
||||
prueth->iep1 = NULL;
|
||||
|
||||
free_pool:
|
||||
gen_pool_free(prueth->sram_pool,
|
||||
@ -2212,6 +2215,8 @@ free_pool:
|
||||
|
||||
put_mem:
|
||||
pruss_release_mem_region(prueth->pruss, &prueth->shram);
|
||||
|
||||
put_pruss:
|
||||
pruss_put(prueth->pruss);
|
||||
|
||||
put_cores:
|
||||
|
@ -411,7 +411,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int ipvlan_process_v4_outbound(struct sk_buff *skb)
|
||||
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
|
||||
{
|
||||
const struct iphdr *ip4h = ip_hdr(skb);
|
||||
struct net_device *dev = skb->dev;
|
||||
@ -453,13 +453,11 @@ out:
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
||||
|
||||
static noinline_for_stack int
|
||||
ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
struct net_device *dev = skb->dev;
|
||||
struct net *net = dev_net(dev);
|
||||
struct dst_entry *dst;
|
||||
int err, ret = NET_XMIT_DROP;
|
||||
struct flowi6 fl6 = {
|
||||
.flowi6_oif = dev->ifindex,
|
||||
.daddr = ip6h->daddr,
|
||||
@ -469,27 +467,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
||||
.flowi6_mark = skb->mark,
|
||||
.flowi6_proto = ip6h->nexthdr,
|
||||
};
|
||||
struct dst_entry *dst;
|
||||
int err;
|
||||
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst->error) {
|
||||
ret = dst->error;
|
||||
dst = ip6_route_output(dev_net(dev), NULL, &fl6);
|
||||
err = dst->error;
|
||||
if (err) {
|
||||
dst_release(dst);
|
||||
goto err;
|
||||
return err;
|
||||
}
|
||||
skb_dst_set(skb, dst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
int err, ret = NET_XMIT_DROP;
|
||||
|
||||
err = ipvlan_route_v6_outbound(dev, skb);
|
||||
if (unlikely(err)) {
|
||||
DEV_STATS_INC(dev, tx_errors);
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
|
||||
err = ip6_local_out(net, skb->sk, skb);
|
||||
err = ip6_local_out(dev_net(dev), skb->sk, skb);
|
||||
if (unlikely(net_xmit_eval(err)))
|
||||
DEV_STATS_INC(dev, tx_errors);
|
||||
else
|
||||
ret = NET_XMIT_SUCCESS;
|
||||
goto out;
|
||||
err:
|
||||
DEV_STATS_INC(dev, tx_errors);
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
|
@ -780,7 +780,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
|
||||
if (dev->flags & IFF_UP) {
|
||||
if (change & IFF_ALLMULTI)
|
||||
dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
|
||||
if (change & IFF_PROMISC)
|
||||
if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
|
||||
dev_set_promiscuity(lowerdev,
|
||||
dev->flags & IFF_PROMISC ? 1 : -1);
|
||||
|
||||
|
@ -453,6 +453,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
|
||||
case PPPIOCSMRU:
|
||||
if (get_user(val, (int __user *) argp))
|
||||
break;
|
||||
if (val > U16_MAX) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (val < PPP_MRU)
|
||||
val = PPP_MRU;
|
||||
ap->mru = val;
|
||||
@ -687,7 +691,7 @@ ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count)
|
||||
|
||||
/* strip address/control field if present */
|
||||
p = skb->data;
|
||||
if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
|
||||
if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
|
||||
/* chop off address/control */
|
||||
if (skb->len < 3)
|
||||
goto err;
|
||||
|
@ -572,7 +572,8 @@ ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
event[i] = queue->buf[queue->head];
|
||||
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
|
||||
/* Paired with READ_ONCE() in queue_cnt() */
|
||||
WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&queue->lock, flags);
|
||||
|
@ -57,10 +57,11 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
|
||||
dst->t.sec = seconds;
|
||||
dst->t.nsec = remainder;
|
||||
|
||||
/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
|
||||
if (!queue_free(queue))
|
||||
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
|
||||
WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
|
||||
|
||||
queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS;
|
||||
WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
|
||||
|
||||
spin_unlock_irqrestore(&queue->lock, flags);
|
||||
}
|
||||
|
@ -85,9 +85,13 @@ struct ptp_vclock {
|
||||
* that a writer might concurrently increment the tail does not
|
||||
* matter, since the queue remains nonempty nonetheless.
|
||||
*/
|
||||
static inline int queue_cnt(struct timestamp_event_queue *q)
|
||||
static inline int queue_cnt(const struct timestamp_event_queue *q)
|
||||
{
|
||||
int cnt = q->tail - q->head;
|
||||
/*
|
||||
* Paired with WRITE_ONCE() in enqueue_external_timestamp(),
|
||||
* ptp_read(), extts_fifo_show().
|
||||
*/
|
||||
int cnt = READ_ONCE(q->tail) - READ_ONCE(q->head);
|
||||
return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,8 @@ static ssize_t extts_fifo_show(struct device *dev,
|
||||
qcnt = queue_cnt(queue);
|
||||
if (qcnt) {
|
||||
event = queue->buf[queue->head];
|
||||
queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
|
||||
/* Paired with READ_ONCE() in queue_cnt() */
|
||||
WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
|
||||
}
|
||||
spin_unlock_irqrestore(&queue->lock, flags);
|
||||
|
||||
|
@ -56,7 +56,7 @@ extern struct idr btf_idr;
|
||||
extern spinlock_t btf_idr_lock;
|
||||
extern struct kobject *btf_kobj;
|
||||
extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
|
||||
extern bool bpf_global_ma_set, bpf_global_percpu_ma_set;
|
||||
extern bool bpf_global_ma_set;
|
||||
|
||||
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
|
||||
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
|
||||
@ -909,10 +909,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
|
||||
aux->ctx_field_size = size;
|
||||
}
|
||||
|
||||
static bool bpf_is_ldimm64(const struct bpf_insn *insn)
|
||||
{
|
||||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
}
|
||||
|
||||
static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
|
||||
{
|
||||
return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
|
||||
insn->src_reg == BPF_PSEUDO_FUNC;
|
||||
return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
|
||||
}
|
||||
|
||||
struct bpf_prog_ops {
|
||||
|
@ -420,7 +420,7 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
|
||||
* A function that translates value of following registers to the linkmode:
|
||||
* IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20)
|
||||
* IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60)
|
||||
* IEEE 802.3-2018 45.2.7.14 "EEE "link partner ability 1 register (7.61)
|
||||
* IEEE 802.3-2018 45.2.7.14 "EEE link partner ability 1" register (7.61)
|
||||
*/
|
||||
static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
|
||||
{
|
||||
|
@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg)
|
||||
return *(__force __be32 *)sreg;
|
||||
}
|
||||
|
||||
static inline void nft_reg_store64(u32 *dreg, u64 val)
|
||||
static inline void nft_reg_store64(u64 *dreg, u64 val)
|
||||
{
|
||||
put_unaligned(val, (u64 *)dreg);
|
||||
put_unaligned(val, dreg);
|
||||
}
|
||||
|
||||
static inline u64 nft_reg_load64(const u32 *sreg)
|
||||
|
@ -58,6 +58,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
|
||||
return to_ct_params(a)->nf_ft;
|
||||
}
|
||||
|
||||
static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
|
||||
{
|
||||
return to_ct_params(a)->helper;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
|
||||
static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
|
||||
@ -65,6 +70,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_NF_CONNTRACK */
|
||||
|
||||
#if IS_ENABLED(CONFIG_NET_ACT_CT)
|
||||
|
@ -64,8 +64,8 @@
|
||||
#define OFF insn->off
|
||||
#define IMM insn->imm
|
||||
|
||||
struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
|
||||
bool bpf_global_ma_set, bpf_global_percpu_ma_set;
|
||||
struct bpf_mem_alloc bpf_global_ma;
|
||||
bool bpf_global_ma_set;
|
||||
|
||||
/* No hurry in this branch
|
||||
*
|
||||
@ -2934,9 +2934,7 @@ static int __init bpf_global_ma_init(void)
|
||||
|
||||
ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
|
||||
bpf_global_ma_set = !ret;
|
||||
ret = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
|
||||
bpf_global_percpu_ma_set = !ret;
|
||||
return !bpf_global_ma_set || !bpf_global_percpu_ma_set;
|
||||
return ret;
|
||||
}
|
||||
late_initcall(bpf_global_ma_init);
|
||||
#endif
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/poison.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
#include <net/xdp.h>
|
||||
|
||||
#include "disasm.h"
|
||||
@ -41,6 +42,9 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
|
||||
#undef BPF_LINK_TYPE
|
||||
};
|
||||
|
||||
struct bpf_mem_alloc bpf_global_percpu_ma;
|
||||
static bool bpf_global_percpu_ma_set;
|
||||
|
||||
/* bpf_check() is a static code analyzer that walks eBPF program
|
||||
* instruction by instruction and updates register/stack state.
|
||||
* All paths of conditional branches are analyzed until 'bpf_exit' insn.
|
||||
@ -336,6 +340,7 @@ struct bpf_kfunc_call_arg_meta {
|
||||
struct btf *btf_vmlinux;
|
||||
|
||||
static DEFINE_MUTEX(bpf_verifier_lock);
|
||||
static DEFINE_MUTEX(bpf_percpu_ma_lock);
|
||||
|
||||
static const struct bpf_line_info *
|
||||
find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
|
||||
@ -3516,12 +3521,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,
|
||||
|
||||
/* Backtrack one insn at a time. If idx is not at the top of recorded
|
||||
* history then previous instruction came from straight line execution.
|
||||
* Return -ENOENT if we exhausted all instructions within given state.
|
||||
*
|
||||
* It's legal to have a bit of a looping with the same starting and ending
|
||||
* insn index within the same state, e.g.: 3->4->5->3, so just because current
|
||||
* instruction index is the same as state's first_idx doesn't mean we are
|
||||
* done. If there is still some jump history left, we should keep going. We
|
||||
* need to take into account that we might have a jump history between given
|
||||
* state's parent and itself, due to checkpointing. In this case, we'll have
|
||||
* history entry recording a jump from last instruction of parent state and
|
||||
* first instruction of given state.
|
||||
*/
|
||||
static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
|
||||
u32 *history)
|
||||
{
|
||||
u32 cnt = *history;
|
||||
|
||||
if (i == st->first_insn_idx) {
|
||||
if (cnt == 0)
|
||||
return -ENOENT;
|
||||
if (cnt == 1 && st->jmp_history[0].idx == i)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (cnt && st->jmp_history[cnt - 1].idx == i) {
|
||||
i = st->jmp_history[cnt - 1].prev_idx;
|
||||
(*history)--;
|
||||
@ -4401,10 +4423,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
|
||||
* Nothing to be tracked further in the parent state.
|
||||
*/
|
||||
return 0;
|
||||
if (i == first_idx)
|
||||
break;
|
||||
subseq_idx = i;
|
||||
i = get_prev_insn_idx(st, i, &history);
|
||||
if (i == -ENOENT)
|
||||
break;
|
||||
if (i >= env->prog->len) {
|
||||
/* This can happen if backtracking reached insn 0
|
||||
* and there are still reg_mask or stack_mask
|
||||
@ -12074,8 +12096,19 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set)
|
||||
return -ENOMEM;
|
||||
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && !bpf_global_percpu_ma_set)
|
||||
return -ENOMEM;
|
||||
if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
|
||||
if (!bpf_global_percpu_ma_set) {
|
||||
mutex_lock(&bpf_percpu_ma_lock);
|
||||
if (!bpf_global_percpu_ma_set) {
|
||||
err = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
|
||||
if (!err)
|
||||
bpf_global_percpu_ma_set = true;
|
||||
}
|
||||
mutex_unlock(&bpf_percpu_ma_lock);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
|
||||
verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
|
||||
@ -15386,8 +15419,7 @@ enum {
|
||||
* w - next instruction
|
||||
* e - edge
|
||||
*/
|
||||
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
|
||||
bool loop_ok)
|
||||
static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
|
||||
{
|
||||
int *insn_stack = env->cfg.insn_stack;
|
||||
int *insn_state = env->cfg.insn_state;
|
||||
@ -15419,7 +15451,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
|
||||
insn_stack[env->cfg.cur_stack++] = w;
|
||||
return KEEP_EXPLORING;
|
||||
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
|
||||
if (loop_ok && env->bpf_capable)
|
||||
if (env->bpf_capable)
|
||||
return DONE_EXPLORING;
|
||||
verbose_linfo(env, t, "%d: ", t);
|
||||
verbose_linfo(env, w, "%d: ", w);
|
||||
@ -15439,24 +15471,20 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
||||
struct bpf_verifier_env *env,
|
||||
bool visit_callee)
|
||||
{
|
||||
int ret;
|
||||
int ret, insn_sz;
|
||||
|
||||
ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
|
||||
insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
|
||||
ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mark_prune_point(env, t + 1);
|
||||
mark_prune_point(env, t + insn_sz);
|
||||
/* when we exit from subprog, we need to record non-linear history */
|
||||
mark_jmp_point(env, t + 1);
|
||||
mark_jmp_point(env, t + insn_sz);
|
||||
|
||||
if (visit_callee) {
|
||||
mark_prune_point(env, t);
|
||||
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
|
||||
/* It's ok to allow recursion from CFG point of
|
||||
* view. __check_func_call() will do the actual
|
||||
* check.
|
||||
*/
|
||||
bpf_pseudo_func(insns + t));
|
||||
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -15469,15 +15497,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
||||
static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
|
||||
int ret, off;
|
||||
int ret, off, insn_sz;
|
||||
|
||||
if (bpf_pseudo_func(insn))
|
||||
return visit_func_call_insn(t, insns, env, true);
|
||||
|
||||
/* All non-branch instructions have a single fall-through edge. */
|
||||
if (BPF_CLASS(insn->code) != BPF_JMP &&
|
||||
BPF_CLASS(insn->code) != BPF_JMP32)
|
||||
return push_insn(t, t + 1, FALLTHROUGH, env, false);
|
||||
BPF_CLASS(insn->code) != BPF_JMP32) {
|
||||
insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
|
||||
return push_insn(t, t + insn_sz, FALLTHROUGH, env);
|
||||
}
|
||||
|
||||
switch (BPF_OP(insn->code)) {
|
||||
case BPF_EXIT:
|
||||
@ -15523,8 +15553,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
off = insn->imm;
|
||||
|
||||
/* unconditional jump with single edge */
|
||||
ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
|
||||
true);
|
||||
ret = push_insn(t, t + off + 1, FALLTHROUGH, env);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -15537,11 +15566,11 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
|
||||
/* conditional jump with two edges */
|
||||
mark_prune_point(env, t);
|
||||
|
||||
ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
|
||||
ret = push_insn(t, t + 1, FALLTHROUGH, env);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return push_insn(t, t + insn->off + 1, BRANCH, env, true);
|
||||
return push_insn(t, t + insn->off + 1, BRANCH, env);
|
||||
}
|
||||
}
|
||||
|
||||
@ -15607,11 +15636,21 @@ walk_cfg:
|
||||
}
|
||||
|
||||
for (i = 0; i < insn_cnt; i++) {
|
||||
struct bpf_insn *insn = &env->prog->insnsi[i];
|
||||
|
||||
if (insn_state[i] != EXPLORED) {
|
||||
verbose(env, "unreachable insn %d\n", i);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
if (bpf_is_ldimm64(insn)) {
|
||||
if (insn_state[i + 1] != 0) {
|
||||
verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
i++; /* skip second half of ldimm64 */
|
||||
}
|
||||
}
|
||||
ret = 0; /* cfg looks good */
|
||||
|
||||
|
@ -37,7 +37,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
|
||||
ktime_t tstamp = skb->tstamp;
|
||||
struct ip_frag_state state;
|
||||
struct iphdr *iph;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
/* for offloaded checksums cleanup checksum before fragmentation */
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
|
@ -1119,7 +1119,9 @@ static int __dev_alloc_name(struct net *net, const char *name, char *res)
|
||||
if (i == max_netdevices)
|
||||
return -ENFILE;
|
||||
|
||||
snprintf(res, IFNAMSIZ, name, i);
|
||||
/* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */
|
||||
strscpy(buf, name, IFNAMSIZ);
|
||||
snprintf(res, IFNAMSIZ, buf, i);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -180,18 +180,17 @@ static void gso_test_func(struct kunit *test)
|
||||
}
|
||||
|
||||
if (tcase->frag_skbs) {
|
||||
unsigned int total_size = 0, total_true_size = 0, alloc_size = 0;
|
||||
unsigned int total_size = 0, total_true_size = 0;
|
||||
struct sk_buff *frag_skb, *prev = NULL;
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_NULL(test, page);
|
||||
page_ref_add(page, tcase->nr_frag_skbs - 1);
|
||||
|
||||
for (i = 0; i < tcase->nr_frag_skbs; i++) {
|
||||
unsigned int frag_size;
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_NULL(test, page);
|
||||
|
||||
frag_size = tcase->frag_skbs[i];
|
||||
frag_skb = build_skb(page_address(page) + alloc_size,
|
||||
frag_skb = build_skb(page_address(page),
|
||||
frag_size + shinfo_size);
|
||||
KUNIT_ASSERT_NOT_NULL(test, frag_skb);
|
||||
__skb_put(frag_skb, frag_size);
|
||||
@ -204,11 +203,8 @@ static void gso_test_func(struct kunit *test)
|
||||
|
||||
total_size += frag_size;
|
||||
total_true_size += frag_skb->truesize;
|
||||
alloc_size += frag_size + shinfo_size;
|
||||
}
|
||||
|
||||
KUNIT_ASSERT_LE(test, alloc_size, PAGE_SIZE);
|
||||
|
||||
skb->len += total_size;
|
||||
skb->data_len += total_size;
|
||||
skb->truesize += total_true_size;
|
||||
|
@ -751,12 +751,12 @@ int __inet_hash(struct sock *sk, struct sock *osk)
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
|
||||
sk->sk_family == AF_INET6)
|
||||
__sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
|
||||
else
|
||||
__sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
unlock:
|
||||
spin_unlock(&ilb2->lock);
|
||||
|
@ -1515,8 +1515,9 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
|
||||
struct mptcp_pm_addr_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, rm_list, list) {
|
||||
remove_anno_list_by_saddr(msk, &entry->addr);
|
||||
if (alist.nr < MPTCP_RM_IDS_MAX)
|
||||
if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
|
||||
lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
|
||||
alist.nr < MPTCP_RM_IDS_MAX)
|
||||
alist.ids[alist.nr++] = entry->addr.id;
|
||||
}
|
||||
|
||||
|
@ -1230,6 +1230,8 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
|
||||
mptcp_do_fallback(ssk);
|
||||
}
|
||||
|
||||
#define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
|
||||
|
||||
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
|
||||
struct mptcp_data_frag *dfrag,
|
||||
struct mptcp_sendmsg_info *info)
|
||||
@ -1256,6 +1258,8 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
|
||||
return -EAGAIN;
|
||||
|
||||
/* compute send limit */
|
||||
if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
|
||||
ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
|
||||
info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
|
||||
copy = info->size_goal;
|
||||
|
||||
@ -3398,10 +3402,11 @@ static void mptcp_release_cb(struct sock *sk)
|
||||
if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
|
||||
__mptcp_clean_una_wakeup(sk);
|
||||
if (unlikely(msk->cb_flags)) {
|
||||
/* be sure to set the current sk state before tacking actions
|
||||
* depending on sk_state, that is processing MPTCP_ERROR_REPORT
|
||||
/* be sure to set the current sk state before taking actions
|
||||
* depending on sk_state (MPTCP_ERROR_REPORT)
|
||||
* On sk release avoid actions depending on the first subflow
|
||||
*/
|
||||
if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags))
|
||||
if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags) && msk->first)
|
||||
__mptcp_set_connected(sk);
|
||||
if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
|
||||
__mptcp_error_report(sk);
|
||||
|
@ -738,8 +738,11 @@ static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
|
||||
val = READ_ONCE(inet_sk(sk)->tos);
|
||||
mptcp_for_each_subflow(msk, subflow) {
|
||||
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
bool slow;
|
||||
|
||||
slow = lock_sock_fast(ssk);
|
||||
__ip_sock_set_tos(ssk, val);
|
||||
unlock_sock_fast(ssk, slow);
|
||||
}
|
||||
release_sock(sk);
|
||||
|
||||
|
@ -89,11 +89,6 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
|
||||
if ((had_link == has_link) || chained)
|
||||
return 0;
|
||||
|
||||
if (had_link)
|
||||
netif_carrier_off(ndp->ndev.dev);
|
||||
else
|
||||
netif_carrier_on(ndp->ndev.dev);
|
||||
|
||||
if (!ndp->multi_package && !nc->package->multi_channel) {
|
||||
if (had_link) {
|
||||
ndp->flags |= NCSI_DEV_RESHUFFLE;
|
||||
|
@ -61,6 +61,8 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
|
||||
ip_set_dereference((inst)->ip_set_list)[id]
|
||||
#define ip_set_ref_netlink(inst,id) \
|
||||
rcu_dereference_raw((inst)->ip_set_list)[id]
|
||||
#define ip_set_dereference_nfnl(p) \
|
||||
rcu_dereference_check(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
|
||||
|
||||
/* The set types are implemented in modules and registered set types
|
||||
* can be found in ip_set_type_list. Adding/deleting types is
|
||||
@ -708,15 +710,10 @@ __ip_set_put_netlink(struct ip_set *set)
|
||||
static struct ip_set *
|
||||
ip_set_rcu_get(struct net *net, ip_set_id_t index)
|
||||
{
|
||||
struct ip_set *set;
|
||||
struct ip_set_net *inst = ip_set_pernet(net);
|
||||
|
||||
rcu_read_lock();
|
||||
/* ip_set_list itself needs to be protected */
|
||||
set = rcu_dereference(inst->ip_set_list)[index];
|
||||
rcu_read_unlock();
|
||||
|
||||
return set;
|
||||
/* ip_set_list and the set pointer need to be protected */
|
||||
return ip_set_dereference_nfnl(inst->ip_set_list)[index];
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -1397,6 +1394,9 @@ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info,
|
||||
ip_set(inst, to_id) = from;
|
||||
write_unlock_bh(&ip_set_ref_lock);
|
||||
|
||||
/* Make sure all readers of the old set pointers are completed. */
|
||||
synchronize_rcu();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7263,10 +7263,11 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
|
||||
|
||||
if (err < 0) {
|
||||
NL_SET_BAD_ATTR(extack, attr);
|
||||
break;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -9679,16 +9680,14 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans)
|
||||
call_rcu(&trans->rcu, nft_trans_gc_trans_free);
|
||||
}
|
||||
|
||||
static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
|
||||
unsigned int gc_seq,
|
||||
bool sync)
|
||||
struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
|
||||
unsigned int gc_seq)
|
||||
{
|
||||
struct nft_set_elem_catchall *catchall, *next;
|
||||
struct nft_set_elem_catchall *catchall;
|
||||
const struct nft_set *set = gc->set;
|
||||
struct nft_elem_priv *elem_priv;
|
||||
struct nft_set_ext *ext;
|
||||
|
||||
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
|
||||
list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
|
||||
if (!nft_set_elem_expired(ext))
|
||||
@ -9698,35 +9697,42 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
|
||||
|
||||
nft_set_elem_dead(ext);
|
||||
dead_elem:
|
||||
if (sync)
|
||||
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
|
||||
else
|
||||
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
|
||||
|
||||
gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
|
||||
if (!gc)
|
||||
return NULL;
|
||||
|
||||
elem_priv = catchall->elem;
|
||||
if (sync) {
|
||||
nft_setelem_data_deactivate(gc->net, gc->set, elem_priv);
|
||||
nft_setelem_catchall_destroy(catchall);
|
||||
}
|
||||
|
||||
nft_trans_gc_elem_add(gc, elem_priv);
|
||||
nft_trans_gc_elem_add(gc, catchall->elem);
|
||||
}
|
||||
|
||||
return gc;
|
||||
}
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
|
||||
unsigned int gc_seq)
|
||||
{
|
||||
return nft_trans_gc_catchall(gc, gc_seq, false);
|
||||
}
|
||||
|
||||
struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc)
|
||||
{
|
||||
return nft_trans_gc_catchall(gc, 0, true);
|
||||
struct nft_set_elem_catchall *catchall, *next;
|
||||
const struct nft_set *set = gc->set;
|
||||
struct nft_elem_priv *elem_priv;
|
||||
struct nft_set_ext *ext;
|
||||
|
||||
WARN_ON_ONCE(!lockdep_commit_lock_is_held(gc->net));
|
||||
|
||||
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
|
||||
ext = nft_set_elem_ext(set, catchall->elem);
|
||||
|
||||
if (!nft_set_elem_expired(ext))
|
||||
continue;
|
||||
|
||||
gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
|
||||
if (!gc)
|
||||
return NULL;
|
||||
|
||||
elem_priv = catchall->elem;
|
||||
nft_setelem_data_deactivate(gc->net, gc->set, elem_priv);
|
||||
nft_setelem_catchall_destroy(catchall);
|
||||
nft_trans_gc_elem_add(gc, elem_priv);
|
||||
}
|
||||
|
||||
return gc;
|
||||
}
|
||||
|
||||
static void nf_tables_module_autoload_cleanup(struct net *net)
|
||||
|
@ -38,13 +38,14 @@ void nft_byteorder_eval(const struct nft_expr *expr,
|
||||
|
||||
switch (priv->size) {
|
||||
case 8: {
|
||||
u64 *dst64 = (void *)dst;
|
||||
u64 src64;
|
||||
|
||||
switch (priv->op) {
|
||||
case NFT_BYTEORDER_NTOH:
|
||||
for (i = 0; i < priv->len / 8; i++) {
|
||||
src64 = nft_reg_load64(&src[i]);
|
||||
nft_reg_store64(&dst[i],
|
||||
nft_reg_store64(&dst64[i],
|
||||
be64_to_cpu((__force __be64)src64));
|
||||
}
|
||||
break;
|
||||
@ -52,7 +53,7 @@ void nft_byteorder_eval(const struct nft_expr *expr,
|
||||
for (i = 0; i < priv->len / 8; i++) {
|
||||
src64 = (__force __u64)
|
||||
cpu_to_be64(nft_reg_load64(&src[i]));
|
||||
nft_reg_store64(&dst[i], src64);
|
||||
nft_reg_store64(&dst64[i], src64);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ nft_meta_get_eval_time(enum nft_meta_keys key,
|
||||
{
|
||||
switch (key) {
|
||||
case NFT_META_TIME_NS:
|
||||
nft_reg_store64(dest, ktime_get_real_ns());
|
||||
nft_reg_store64((u64 *)dest, ktime_get_real_ns());
|
||||
break;
|
||||
case NFT_META_TIME_DAY:
|
||||
nft_reg_store8(dest, nft_meta_weekday());
|
||||
|
@ -624,14 +624,12 @@ static void nft_rbtree_gc(struct nft_set *set)
|
||||
{
|
||||
struct nft_rbtree *priv = nft_set_priv(set);
|
||||
struct nft_rbtree_elem *rbe, *rbe_end = NULL;
|
||||
struct nftables_pernet *nft_net;
|
||||
struct rb_node *node, *next;
|
||||
struct nft_trans_gc *gc;
|
||||
struct net *net;
|
||||
|
||||
set = nft_set_container_of(priv);
|
||||
net = read_pnet(&set->net);
|
||||
nft_net = nft_pernet(net);
|
||||
|
||||
gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
|
||||
if (!gc)
|
||||
|
@ -1549,6 +1549,9 @@ static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
|
||||
if (bind) {
|
||||
struct flow_action_entry *entry = entry_data;
|
||||
|
||||
if (tcf_ct_helper(act))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
entry->id = FLOW_ACTION_CT;
|
||||
entry->ct.action = tcf_ct_action(act);
|
||||
entry->ct.zone = tcf_ct_zone(act);
|
||||
|
@ -102,6 +102,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
|
||||
return -EMSGSIZE;
|
||||
|
||||
skb_put(skb, TLV_SPACE(len));
|
||||
memset(tlv, 0, TLV_SPACE(len));
|
||||
tlv->tlv_type = htons(type);
|
||||
tlv->tlv_len = htons(TLV_LENGTH(len));
|
||||
if (len && data)
|
||||
|
@ -2581,15 +2581,16 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
|
||||
|
||||
if (!(state->flags & MSG_PEEK))
|
||||
WRITE_ONCE(u->oob_skb, NULL);
|
||||
|
||||
else
|
||||
skb_get(oob_skb);
|
||||
unix_state_unlock(sk);
|
||||
|
||||
chunk = state->recv_actor(oob_skb, 0, chunk, state);
|
||||
|
||||
if (!(state->flags & MSG_PEEK)) {
|
||||
if (!(state->flags & MSG_PEEK))
|
||||
UNIXCB(oob_skb).consumed += 1;
|
||||
kfree_skb(oob_skb);
|
||||
}
|
||||
|
||||
consume_skb(oob_skb);
|
||||
|
||||
mutex_unlock(&u->iolock);
|
||||
|
||||
|
@ -97,4 +97,66 @@ l0_%=: r2 = r0; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("conditional loop (2)")
|
||||
__success
|
||||
__failure_unpriv __msg_unpriv("back-edge from insn 10 to 11")
|
||||
__naked void conditional_loop2(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r9 = 2 ll; \
|
||||
r3 = 0x20 ll; \
|
||||
r4 = 0x35 ll; \
|
||||
r8 = r4; \
|
||||
goto l1_%=; \
|
||||
l0_%=: r9 -= r3; \
|
||||
r9 -= r4; \
|
||||
r9 -= r8; \
|
||||
l1_%=: r8 += r4; \
|
||||
if r8 < 0x64 goto l0_%=; \
|
||||
r0 = r9; \
|
||||
exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("unconditional loop after conditional jump")
|
||||
__failure __msg("infinite loop detected")
|
||||
__failure_unpriv __msg_unpriv("back-edge from insn 3 to 2")
|
||||
__naked void uncond_loop_after_cond_jmp(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
if r0 > 0 goto l1_%=; \
|
||||
l0_%=: r0 = 1; \
|
||||
goto l0_%=; \
|
||||
l1_%=: exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
|
||||
__naked __noinline __used
|
||||
static unsigned long never_ending_subprog()
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = r1; \
|
||||
goto -1; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("socket")
|
||||
__description("unconditional loop after conditional jump")
|
||||
/* infinite loop is detected *after* check_cfg() */
|
||||
__failure __msg("infinite loop detected")
|
||||
__naked void uncond_loop_in_subprog_after_cond_jmp(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
r0 = 0; \
|
||||
if r0 > 0 goto l1_%=; \
|
||||
l0_%=: r0 += 1; \
|
||||
call never_ending_subprog; \
|
||||
l1_%=: exit; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
@ -75,9 +75,10 @@ l0_%=: r0 += 1; \
|
||||
" ::: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tracepoint")
|
||||
SEC("socket")
|
||||
__description("bounded loop, start in the middle")
|
||||
__failure __msg("back-edge")
|
||||
__success
|
||||
__failure_unpriv __msg_unpriv("back-edge")
|
||||
__naked void loop_start_in_the_middle(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
@ -136,7 +137,9 @@ l0_%=: exit; \
|
||||
|
||||
SEC("tracepoint")
|
||||
__description("bounded recursion")
|
||||
__failure __msg("back-edge")
|
||||
__failure
|
||||
/* verifier limitation in detecting max stack depth */
|
||||
__msg("the call stack of 8 frames is too deep !")
|
||||
__naked void bounded_recursion(void)
|
||||
{
|
||||
asm volatile (" \
|
||||
|
@ -91,3 +91,43 @@ __naked int bpf_end_bswap(void)
|
||||
}
|
||||
|
||||
#endif /* v4 instruction */
|
||||
|
||||
SEC("?raw_tp")
|
||||
__success __log_level(2)
|
||||
/*
|
||||
* Without the bug fix there will be no history between "last_idx 3 first_idx 3"
|
||||
* and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
|
||||
* expected log messages to the one specific mark_chain_precision operation.
|
||||
*
|
||||
* This is quite fragile: if verifier checkpointing heuristic changes, this
|
||||
* might need adjusting.
|
||||
*/
|
||||
__msg("2: (07) r0 += 1 ; R0_w=6")
|
||||
__msg("3: (35) if r0 >= 0xa goto pc+1")
|
||||
__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
|
||||
__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
|
||||
__msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4")
|
||||
__msg("3: R0_w=6")
|
||||
__naked int state_loop_first_last_equal(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r0 = 0;"
|
||||
"l0_%=:"
|
||||
"r0 += 1;"
|
||||
"r0 += 1;"
|
||||
/* every few iterations we'll have a checkpoint here with
|
||||
* first_idx == last_idx, potentially confusing precision
|
||||
* backtracking logic
|
||||
*/
|
||||
"if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
|
||||
"goto l0_%=;"
|
||||
"l1_%=:"
|
||||
"exit;"
|
||||
::: __clobber_common
|
||||
);
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
@ -442,7 +442,7 @@
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
.errstr = "back-edge from insn 0 to 0",
|
||||
.errstr = "the call stack of 9 frames is too deep",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
@ -799,7 +799,7 @@
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
.errstr = "back-edge",
|
||||
.errstr = "the call stack of 9 frames is too deep",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
@ -811,7 +811,7 @@
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
.errstr = "back-edge",
|
||||
.errstr = "the call stack of 9 frames is too deep",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -9,8 +9,8 @@
|
||||
BPF_MOV64_IMM(BPF_REG_0, 2),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid BPF_LD_IMM insn",
|
||||
.errstr_unpriv = "R1 pointer comparison",
|
||||
.errstr = "jump into the middle of ldimm64 insn 1",
|
||||
.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
@ -23,8 +23,8 @@
|
||||
BPF_LD_IMM64(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "invalid BPF_LD_IMM insn",
|
||||
.errstr_unpriv = "R1 pointer comparison",
|
||||
.errstr = "jump into the middle of ldimm64 insn 1",
|
||||
.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -908,8 +908,9 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr)
|
||||
struct xdp_info *meta = data - sizeof(struct xdp_info);
|
||||
|
||||
if (meta->count != pkt->pkt_nb) {
|
||||
ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n",
|
||||
__func__, pkt->pkt_nb, meta->count);
|
||||
ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%llu]\n",
|
||||
__func__, pkt->pkt_nb,
|
||||
(unsigned long long)meta->count);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -926,11 +927,13 @@ static bool is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 exp
|
||||
|
||||
if (addr >= umem->num_frames * umem->frame_size ||
|
||||
addr + len > umem->num_frames * umem->frame_size) {
|
||||
ksft_print_msg("Frag invalid addr: %llx len: %u\n", addr, len);
|
||||
ksft_print_msg("Frag invalid addr: %llx len: %u\n",
|
||||
(unsigned long long)addr, len);
|
||||
return false;
|
||||
}
|
||||
if (!umem->unaligned_mode && addr % umem->frame_size + len > umem->frame_size) {
|
||||
ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n", addr, len);
|
||||
ksft_print_msg("Frag crosses frame boundary addr: %llx len: %u\n",
|
||||
(unsigned long long)addr, len);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1029,7 +1032,8 @@ static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
|
||||
u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
|
||||
|
||||
ksft_print_msg("[%s] Too many packets completed\n", __func__);
|
||||
ksft_print_msg("Last completion address: %llx\n", addr);
|
||||
ksft_print_msg("Last completion address: %llx\n",
|
||||
(unsigned long long)addr);
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
||||
@ -1513,8 +1517,9 @@ static int validate_tx_invalid_descs(struct ifobject *ifobject)
|
||||
}
|
||||
|
||||
if (stats.tx_invalid_descs != ifobject->xsk->pkt_stream->nb_pkts / 2) {
|
||||
ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
|
||||
__func__, stats.tx_invalid_descs,
|
||||
ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%llu] expected [%u]\n",
|
||||
__func__,
|
||||
(unsigned long long)stats.tx_invalid_descs,
|
||||
ifobject->xsk->pkt_stream->nb_pkts);
|
||||
return TEST_FAILURE;
|
||||
}
|
||||
|
@ -3240,7 +3240,7 @@ fastclose_tests()
|
||||
if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
|
||||
test_linkfail=1024 fastclose=server \
|
||||
run_tests $ns1 $ns2 10.0.1.1
|
||||
chk_join_nr 0 0 0
|
||||
chk_join_nr 0 0 0 0 0 0 1
|
||||
chk_fclose_nr 1 1 invert
|
||||
chk_rst_nr 1 1
|
||||
fi
|
||||
|
Loading…
Reference in New Issue
Block a user