mlx5-fixes-2022-11-09
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmNr8dQACgkQSD+KveBX +j4cCwf+O51qKPCU5XmpHUU21QmX36oEA0wJw4Y3uvTJqpbmWxKMI8pNUPFNhzl/ 0APMXm7uuD8o5Ehtq/rRzK0nCCTrN3OgkJYgaKnuUfr2NbBYCjHau1xKyIgPLj2m uSIxqlTblT3hBwaJjzqBIsFyhpT0x8ZS2lEd2tuoQw4uyrEv2sjceLRzdj21R5by HVtBECRI5wHXSVuZ31XjUGPbVXr6d42H5lz7465eae+FxavX0+XpzbFJLJdwOlyZ pynvEaqLwmpfXBpc0I+oYR5EJwm/HIMjZGDJRImdV29zC20ttX1tiJuT0Wr40yjZ 1Ws3pf89GmkLB36SzPiEkp3o6HuB3A== =ccW3 -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2022-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2022-11-02 This series provides bug fixes to mlx5 driver. * tag 'mlx5-fixes-2022-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux: net/mlx5e: TC, Fix slab-out-of-bounds in parse_tc_actions net/mlx5e: E-Switch, Fix comparing termination table instance net/mlx5e: TC, Fix wrong rejection of packet-per-second policing net/mlx5e: Fix tc acts array not to be dependent on enum order net/mlx5e: Fix usage of DMA sync API net/mlx5e: Add missing sanity checks for max TX WQE size net/mlx5: fw_reset: Don't try to load device in case PCI isn't working net/mlx5: E-switch, Set to legacy mode if failed to change switchdev mode net/mlx5: Allow async trigger completion execution on single CPU systems net/mlx5: Bridge, verify LAG state when adding bond to bridge ==================== Link: https://lore.kernel.org/r/20221109184050.108379-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
abd5ac18ae
@ -1770,12 +1770,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
|
|||||||
struct mlx5_cmd *cmd = &dev->cmd;
|
struct mlx5_cmd *cmd = &dev->cmd;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
for (i = 0; i < cmd->max_reg_cmds; i++) {
|
||||||
while (down_trylock(&cmd->sem))
|
while (down_trylock(&cmd->sem)) {
|
||||||
mlx5_cmd_trigger_completions(dev);
|
mlx5_cmd_trigger_completions(dev);
|
||||||
|
cond_resched();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
while (down_trylock(&cmd->pages_sem))
|
while (down_trylock(&cmd->pages_sem)) {
|
||||||
mlx5_cmd_trigger_completions(dev);
|
mlx5_cmd_trigger_completions(dev);
|
||||||
|
cond_resched();
|
||||||
|
}
|
||||||
|
|
||||||
/* Unlock cmdif */
|
/* Unlock cmdif */
|
||||||
up(&cmd->pages_sem);
|
up(&cmd->pages_sem);
|
||||||
|
@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
|
||||||
|
{
|
||||||
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||||
|
struct netdev_notifier_changeupper_info *info = ptr;
|
||||||
|
struct net_device *upper = info->upper_dev;
|
||||||
|
struct net_device *lower;
|
||||||
|
struct list_head *iter;
|
||||||
|
|
||||||
|
if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
netdev_for_each_lower_dev(dev, lower, iter) {
|
||||||
|
struct mlx5_core_dev *mdev;
|
||||||
|
struct mlx5e_priv *priv;
|
||||||
|
|
||||||
|
if (!mlx5e_eswitch_rep(lower))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
priv = netdev_priv(lower);
|
||||||
|
mdev = priv->mdev;
|
||||||
|
if (!mlx5_lag_is_active(mdev))
|
||||||
|
return -EAGAIN;
|
||||||
|
if (!mlx5_lag_is_shared_fdb(mdev))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
|
static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
|
||||||
unsigned long event, void *ptr)
|
unsigned long event, void *ptr)
|
||||||
{
|
{
|
||||||
@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
|
|||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case NETDEV_PRECHANGEUPPER:
|
case NETDEV_PRECHANGEUPPER:
|
||||||
|
err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case NETDEV_CHANGEUPPER:
|
case NETDEV_CHANGEUPPER:
|
||||||
|
@ -6,70 +6,42 @@
|
|||||||
#include "en/tc_priv.h"
|
#include "en/tc_priv.h"
|
||||||
#include "mlx5_core.h"
|
#include "mlx5_core.h"
|
||||||
|
|
||||||
/* Must be aligned with enum flow_action_id. */
|
|
||||||
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
|
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
|
||||||
&mlx5e_tc_act_accept,
|
[FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
|
||||||
&mlx5e_tc_act_drop,
|
[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
|
||||||
&mlx5e_tc_act_trap,
|
[FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap,
|
||||||
&mlx5e_tc_act_goto,
|
[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
|
||||||
&mlx5e_tc_act_mirred,
|
[FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred,
|
||||||
&mlx5e_tc_act_mirred,
|
[FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred,
|
||||||
&mlx5e_tc_act_redirect_ingress,
|
[FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress,
|
||||||
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
|
[FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan,
|
||||||
&mlx5e_tc_act_vlan,
|
[FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan,
|
||||||
&mlx5e_tc_act_vlan,
|
[FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle,
|
||||||
&mlx5e_tc_act_vlan_mangle,
|
[FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap,
|
||||||
&mlx5e_tc_act_tun_encap,
|
[FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap,
|
||||||
&mlx5e_tc_act_tun_decap,
|
[FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
|
||||||
&mlx5e_tc_act_pedit,
|
[FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
|
||||||
&mlx5e_tc_act_pedit,
|
[FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
|
||||||
&mlx5e_tc_act_csum,
|
[FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype,
|
||||||
NULL, /* FLOW_ACTION_MARK, */
|
[FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample,
|
||||||
&mlx5e_tc_act_ptype,
|
[FLOW_ACTION_POLICE] = &mlx5e_tc_act_police,
|
||||||
NULL, /* FLOW_ACTION_PRIORITY, */
|
[FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
|
||||||
NULL, /* FLOW_ACTION_WAKE, */
|
[FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push,
|
||||||
NULL, /* FLOW_ACTION_QUEUE, */
|
[FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop,
|
||||||
&mlx5e_tc_act_sample,
|
[FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan,
|
||||||
&mlx5e_tc_act_police,
|
[FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan,
|
||||||
&mlx5e_tc_act_ct,
|
|
||||||
NULL, /* FLOW_ACTION_CT_METADATA, */
|
|
||||||
&mlx5e_tc_act_mpls_push,
|
|
||||||
&mlx5e_tc_act_mpls_pop,
|
|
||||||
NULL, /* FLOW_ACTION_MPLS_MANGLE, */
|
|
||||||
NULL, /* FLOW_ACTION_GATE, */
|
|
||||||
NULL, /* FLOW_ACTION_PPPOE_PUSH, */
|
|
||||||
NULL, /* FLOW_ACTION_JUMP, */
|
|
||||||
NULL, /* FLOW_ACTION_PIPE, */
|
|
||||||
&mlx5e_tc_act_vlan,
|
|
||||||
&mlx5e_tc_act_vlan,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Must be aligned with enum flow_action_id. */
|
|
||||||
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
|
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
|
||||||
&mlx5e_tc_act_accept,
|
[FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
|
||||||
&mlx5e_tc_act_drop,
|
[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
|
||||||
NULL, /* FLOW_ACTION_TRAP, */
|
[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
|
||||||
&mlx5e_tc_act_goto,
|
[FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic,
|
||||||
&mlx5e_tc_act_mirred_nic,
|
[FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
|
||||||
NULL, /* FLOW_ACTION_MIRRED, */
|
[FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
|
||||||
NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
|
[FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
|
||||||
NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
|
[FLOW_ACTION_MARK] = &mlx5e_tc_act_mark,
|
||||||
NULL, /* FLOW_ACTION_VLAN_PUSH, */
|
[FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
|
||||||
NULL, /* FLOW_ACTION_VLAN_POP, */
|
|
||||||
NULL, /* FLOW_ACTION_VLAN_MANGLE, */
|
|
||||||
NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
|
|
||||||
NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
|
|
||||||
&mlx5e_tc_act_pedit,
|
|
||||||
&mlx5e_tc_act_pedit,
|
|
||||||
&mlx5e_tc_act_csum,
|
|
||||||
&mlx5e_tc_act_mark,
|
|
||||||
NULL, /* FLOW_ACTION_PTYPE, */
|
|
||||||
NULL, /* FLOW_ACTION_PRIORITY, */
|
|
||||||
NULL, /* FLOW_ACTION_WAKE, */
|
|
||||||
NULL, /* FLOW_ACTION_QUEUE, */
|
|
||||||
NULL, /* FLOW_ACTION_SAMPLE, */
|
|
||||||
NULL, /* FLOW_ACTION_POLICE, */
|
|
||||||
&mlx5e_tc_act_ct,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -11,6 +11,27 @@
|
|||||||
|
|
||||||
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
|
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
|
||||||
|
|
||||||
|
/* IPSEC inline data includes:
|
||||||
|
* 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
|
||||||
|
* next header.
|
||||||
|
* 2. ESP authentication data: 16 bytes for ICV.
|
||||||
|
*/
|
||||||
|
#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
|
||||||
|
255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
|
||||||
|
|
||||||
|
/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
|
||||||
|
* encapsulations.
|
||||||
|
*/
|
||||||
|
#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
|
||||||
|
MLX5_SEND_WQE_DS)
|
||||||
|
|
||||||
|
/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
|
||||||
|
#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
|
||||||
|
MLX5E_MAX_TX_INLINE_DS + \
|
||||||
|
MLX5E_MAX_TX_IPSEC_DS + \
|
||||||
|
MAX_SKB_FRAGS + 1, \
|
||||||
|
MLX5_SEND_WQEBB_NUM_DS)
|
||||||
|
|
||||||
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
|
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
@ -424,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
|
|||||||
|
|
||||||
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
|
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
|
||||||
{
|
{
|
||||||
|
WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
|
||||||
|
|
||||||
/* A WQE must not cross the page boundary, hence two conditions:
|
/* A WQE must not cross the page boundary, hence two conditions:
|
||||||
* 1. Its size must not exceed the page size.
|
* 1. Its size must not exceed the page size.
|
||||||
* 2. If the WQE size is X, and the space remaining in a page is less
|
* 2. If the WQE size is X, and the space remaining in a page is less
|
||||||
@ -436,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si
|
|||||||
"wqe_size %u is greater than max SQ WQEBBs %u",
|
"wqe_size %u is greater than max SQ WQEBBs %u",
|
||||||
wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
|
wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
|
||||||
|
|
||||||
|
|
||||||
return MLX5E_STOP_ROOM(wqe_size);
|
return MLX5E_STOP_ROOM(wqe_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
|
|||||||
xdpi.page.rq = rq;
|
xdpi.page.rq = rq;
|
||||||
|
|
||||||
dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
|
dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
|
||||||
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
|
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
if (unlikely(xdp_frame_has_frags(xdpf))) {
|
if (unlikely(xdp_frame_has_frags(xdpf))) {
|
||||||
sinfo = xdp_get_shared_info_from_frame(xdpf);
|
sinfo = xdp_get_shared_info_from_frame(xdpf);
|
||||||
@ -131,7 +131,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
|
|||||||
skb_frag_off(frag);
|
skb_frag_off(frag);
|
||||||
len = skb_frag_size(frag);
|
len = skb_frag_size(frag);
|
||||||
dma_sync_single_for_device(sq->pdev, addr, len,
|
dma_sync_single_for_device(sq->pdev, addr, len,
|
||||||
DMA_TO_DEVICE);
|
DMA_BIDIRECTIONAL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5694,6 +5694,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
|
|||||||
mlx5e_fs_set_state_destroy(priv->fs,
|
mlx5e_fs_set_state_destroy(priv->fs,
|
||||||
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
|
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
|
||||||
|
|
||||||
|
/* Validate the max_wqe_size_sq capability. */
|
||||||
|
if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
|
||||||
|
mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n",
|
||||||
|
mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
/* max number of channels may have changed */
|
/* max number of channels may have changed */
|
||||||
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
|
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
|
||||||
if (priv->channels.params.num_channels > max_nch) {
|
if (priv->channels.params.num_channels > max_nch) {
|
||||||
|
@ -266,7 +266,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_uni
|
|||||||
|
|
||||||
addr = page_pool_get_dma_addr(au->page);
|
addr = page_pool_get_dma_addr(au->page);
|
||||||
/* Non-XSK always uses PAGE_SIZE. */
|
/* Non-XSK always uses PAGE_SIZE. */
|
||||||
dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE);
|
dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,8 +282,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Non-XSK always uses PAGE_SIZE. */
|
/* Non-XSK always uses PAGE_SIZE. */
|
||||||
addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE,
|
addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir);
|
||||||
rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
|
||||||
if (unlikely(dma_mapping_error(rq->pdev, addr))) {
|
if (unlikely(dma_mapping_error(rq->pdev, addr))) {
|
||||||
page_pool_recycle_direct(rq->page_pool, au->page);
|
page_pool_recycle_direct(rq->page_pool, au->page);
|
||||||
au->page = NULL;
|
au->page = NULL;
|
||||||
@ -427,14 +426,15 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
|
|||||||
{
|
{
|
||||||
dma_addr_t addr = page_pool_get_dma_addr(au->page);
|
dma_addr_t addr = page_pool_get_dma_addr(au->page);
|
||||||
|
|
||||||
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE);
|
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
|
||||||
|
rq->buff.map_dir);
|
||||||
page_ref_inc(au->page);
|
page_ref_inc(au->page);
|
||||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||||
au->page, frag_offset, len, truesize);
|
au->page, frag_offset, len, truesize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
|
mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
|
||||||
struct page *page, dma_addr_t addr,
|
struct page *page, dma_addr_t addr,
|
||||||
int offset_from, int dma_offset, u32 headlen)
|
int offset_from, int dma_offset, u32 headlen)
|
||||||
{
|
{
|
||||||
@ -442,7 +442,8 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
|
|||||||
/* Aligning len to sizeof(long) optimizes memcpy performance */
|
/* Aligning len to sizeof(long) optimizes memcpy performance */
|
||||||
unsigned int len = ALIGN(headlen, sizeof(long));
|
unsigned int len = ALIGN(headlen, sizeof(long));
|
||||||
|
|
||||||
dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE);
|
dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
|
||||||
|
rq->buff.map_dir);
|
||||||
skb_copy_to_linear_data(skb, from, len);
|
skb_copy_to_linear_data(skb, from, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1538,7 +1539,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
|
|||||||
|
|
||||||
addr = page_pool_get_dma_addr(au->page);
|
addr = page_pool_get_dma_addr(au->page);
|
||||||
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
|
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
|
||||||
frag_size, DMA_FROM_DEVICE);
|
frag_size, rq->buff.map_dir);
|
||||||
net_prefetch(data);
|
net_prefetch(data);
|
||||||
|
|
||||||
prog = rcu_dereference(rq->xdp_prog);
|
prog = rcu_dereference(rq->xdp_prog);
|
||||||
@ -1587,7 +1588,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
|
|||||||
|
|
||||||
addr = page_pool_get_dma_addr(au->page);
|
addr = page_pool_get_dma_addr(au->page);
|
||||||
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
|
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
|
||||||
rq->buff.frame0_sz, DMA_FROM_DEVICE);
|
rq->buff.frame0_sz, rq->buff.map_dir);
|
||||||
net_prefetchw(va); /* xdp_frame data area */
|
net_prefetchw(va); /* xdp_frame data area */
|
||||||
net_prefetch(va + rx_headroom);
|
net_prefetch(va + rx_headroom);
|
||||||
|
|
||||||
@ -1608,7 +1609,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
|
|||||||
|
|
||||||
addr = page_pool_get_dma_addr(au->page);
|
addr = page_pool_get_dma_addr(au->page);
|
||||||
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
|
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
|
||||||
frag_consumed_bytes, DMA_FROM_DEVICE);
|
frag_consumed_bytes, rq->buff.map_dir);
|
||||||
|
|
||||||
if (!xdp_buff_has_frags(&xdp)) {
|
if (!xdp_buff_has_frags(&xdp)) {
|
||||||
/* Init on the first fragment to avoid cold cache access
|
/* Init on the first fragment to avoid cold cache access
|
||||||
@ -1905,7 +1906,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
|
|||||||
mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
|
mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
|
||||||
/* copy header */
|
/* copy header */
|
||||||
addr = page_pool_get_dma_addr(head_au->page);
|
addr = page_pool_get_dma_addr(head_au->page);
|
||||||
mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr,
|
mlx5e_copy_skb_header(rq, skb, head_au->page, addr,
|
||||||
head_offset, head_offset, headlen);
|
head_offset, head_offset, headlen);
|
||||||
/* skb linear part was allocated with headlen and aligned to long */
|
/* skb linear part was allocated with headlen and aligned to long */
|
||||||
skb->tail += headlen;
|
skb->tail += headlen;
|
||||||
@ -1939,7 +1940,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
|||||||
|
|
||||||
addr = page_pool_get_dma_addr(au->page);
|
addr = page_pool_get_dma_addr(au->page);
|
||||||
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
|
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
|
||||||
frag_size, DMA_FROM_DEVICE);
|
frag_size, rq->buff.map_dir);
|
||||||
net_prefetch(data);
|
net_prefetch(data);
|
||||||
|
|
||||||
prog = rcu_dereference(rq->xdp_prog);
|
prog = rcu_dereference(rq->xdp_prog);
|
||||||
@ -1987,7 +1988,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
|||||||
|
|
||||||
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
|
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
|
||||||
/* build SKB around header */
|
/* build SKB around header */
|
||||||
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
|
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
|
||||||
prefetchw(hdr);
|
prefetchw(hdr);
|
||||||
prefetch(data);
|
prefetch(data);
|
||||||
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
|
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
|
||||||
@ -2009,7 +2010,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
|||||||
}
|
}
|
||||||
|
|
||||||
prefetchw(skb->data);
|
prefetchw(skb->data);
|
||||||
mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr,
|
mlx5e_copy_skb_header(rq, skb, head->page, head->addr,
|
||||||
head_offset + rx_headroom,
|
head_offset + rx_headroom,
|
||||||
rx_headroom, head_size);
|
rx_headroom, head_size);
|
||||||
/* skb linear part was allocated with headlen and aligned to long */
|
/* skb linear part was allocated with headlen and aligned to long */
|
||||||
|
@ -3633,10 +3633,14 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
|
|||||||
attr2->action = 0;
|
attr2->action = 0;
|
||||||
attr2->flags = 0;
|
attr2->flags = 0;
|
||||||
attr2->parse_attr = parse_attr;
|
attr2->parse_attr = parse_attr;
|
||||||
attr2->esw_attr->out_count = 0;
|
|
||||||
attr2->esw_attr->split_count = 0;
|
|
||||||
attr2->dest_chain = 0;
|
attr2->dest_chain = 0;
|
||||||
attr2->dest_ft = NULL;
|
attr2->dest_ft = NULL;
|
||||||
|
|
||||||
|
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
|
||||||
|
attr2->esw_attr->out_count = 0;
|
||||||
|
attr2->esw_attr->split_count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
return attr2;
|
return attr2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4758,12 +4762,6 @@ int mlx5e_policer_validate(const struct flow_action *action,
|
|||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (act->police.rate_pkt_ps) {
|
|
||||||
NL_SET_ERR_MSG_MOD(extack,
|
|
||||||
"QoS offload not support packets per second");
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -305,6 +305,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
|
|||||||
u16 ds_cnt_inl = 0;
|
u16 ds_cnt_inl = 0;
|
||||||
u16 ds_cnt_ids = 0;
|
u16 ds_cnt_ids = 0;
|
||||||
|
|
||||||
|
/* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */
|
||||||
|
|
||||||
if (attr->insz)
|
if (attr->insz)
|
||||||
ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
|
ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
|
||||||
MLX5_SEND_WQE_DS);
|
MLX5_SEND_WQE_DS);
|
||||||
@ -317,6 +319,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
|
|||||||
inl += VLAN_HLEN;
|
inl += VLAN_HLEN;
|
||||||
|
|
||||||
ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
|
ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
|
||||||
|
if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS))
|
||||||
|
netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl,
|
||||||
|
(u16)MLX5E_MAX_TX_INLINE_DS);
|
||||||
ds_cnt += ds_cnt_inl;
|
ds_cnt += ds_cnt_inl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1387,12 +1387,14 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
|
|||||||
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
|
||||||
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
esw->esw_funcs.num_vfs, esw->enabled_vports);
|
||||||
|
|
||||||
esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
|
if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
|
||||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
|
||||||
esw_offloads_disable(esw);
|
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||||
else if (esw->mode == MLX5_ESWITCH_LEGACY)
|
esw_offloads_disable(esw);
|
||||||
esw_legacy_disable(esw);
|
else if (esw->mode == MLX5_ESWITCH_LEGACY)
|
||||||
mlx5_esw_acls_ns_cleanup(esw);
|
esw_legacy_disable(esw);
|
||||||
|
mlx5_esw_acls_ns_cleanup(esw);
|
||||||
|
}
|
||||||
|
|
||||||
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
|
||||||
devl_rate_nodes_destroy(devlink);
|
devl_rate_nodes_destroy(devlink);
|
||||||
|
@ -2310,7 +2310,7 @@ out_free:
|
|||||||
static int esw_offloads_start(struct mlx5_eswitch *esw,
|
static int esw_offloads_start(struct mlx5_eswitch *esw,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
int err, err1;
|
int err;
|
||||||
|
|
||||||
esw->mode = MLX5_ESWITCH_OFFLOADS;
|
esw->mode = MLX5_ESWITCH_OFFLOADS;
|
||||||
err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
|
err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
|
||||||
@ -2318,11 +2318,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
|
|||||||
NL_SET_ERR_MSG_MOD(extack,
|
NL_SET_ERR_MSG_MOD(extack,
|
||||||
"Failed setting eswitch to offloads");
|
"Failed setting eswitch to offloads");
|
||||||
esw->mode = MLX5_ESWITCH_LEGACY;
|
esw->mode = MLX5_ESWITCH_LEGACY;
|
||||||
err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
|
|
||||||
if (err1) {
|
|
||||||
NL_SET_ERR_MSG_MOD(extack,
|
|
||||||
"Failed setting eswitch back to legacy");
|
|
||||||
}
|
|
||||||
mlx5_rescan_drivers(esw->dev);
|
mlx5_rescan_drivers(esw->dev);
|
||||||
}
|
}
|
||||||
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
|
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
|
||||||
@ -3389,19 +3384,12 @@ err_metadata:
|
|||||||
static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
int err, err1;
|
int err;
|
||||||
|
|
||||||
esw->mode = MLX5_ESWITCH_LEGACY;
|
esw->mode = MLX5_ESWITCH_LEGACY;
|
||||||
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
|
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
|
||||||
if (err) {
|
if (err)
|
||||||
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
|
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
|
||||||
esw->mode = MLX5_ESWITCH_OFFLOADS;
|
|
||||||
err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
|
|
||||||
if (err1) {
|
|
||||||
NL_SET_ERR_MSG_MOD(extack,
|
|
||||||
"Failed setting eswitch back to offloads");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act,
|
|||||||
sizeof(dest->vport.num), hash);
|
sizeof(dest->vport.num), hash);
|
||||||
hash = jhash((const void *)&dest->vport.vhca_id,
|
hash = jhash((const void *)&dest->vport.vhca_id,
|
||||||
sizeof(dest->vport.num), hash);
|
sizeof(dest->vport.num), hash);
|
||||||
if (dest->vport.pkt_reformat)
|
if (flow_act->pkt_reformat)
|
||||||
hash = jhash(dest->vport.pkt_reformat,
|
hash = jhash(flow_act->pkt_reformat,
|
||||||
sizeof(*dest->vport.pkt_reformat),
|
sizeof(*flow_act->pkt_reformat),
|
||||||
hash);
|
hash);
|
||||||
return hash;
|
return hash;
|
||||||
}
|
}
|
||||||
@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ?
|
if (flow_act1->pkt_reformat && flow_act2->pkt_reformat)
|
||||||
memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat,
|
return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat,
|
||||||
sizeof(*dest1->vport.pkt_reformat)) : 0;
|
sizeof(*flow_act1->pkt_reformat));
|
||||||
|
|
||||||
|
return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -152,7 +152,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
|
|||||||
mlx5_unload_one(dev);
|
mlx5_unload_one(dev);
|
||||||
if (mlx5_health_wait_pci_up(dev))
|
if (mlx5_health_wait_pci_up(dev))
|
||||||
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
|
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
|
||||||
mlx5_load_one(dev, false);
|
else
|
||||||
|
mlx5_load_one(dev, false);
|
||||||
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
|
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
|
||||||
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
|
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
|
||||||
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
|
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
|
||||||
|
Loading…
Reference in New Issue
Block a user