mirror of
https://github.com/torvalds/linux.git
synced 2024-12-13 06:32:50 +00:00
mlx5-fixes-2021-03-22
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmBY+ykACgkQSD+KveBX +j7oyAgAy6RtOHXTRPvMU2H6iTOO48fTUiPVQpQEZEQdk0GuhsBhbPG19u8GFqJu LlVBc90c8ZCbve84u9BRrBkZJUM9mVKuHOXsqFc7SeUuedSnaBtziAOYThWnmPkq uO5KvBS4Rbjbh6PXeSmjPwuPzEWBZlKYEbbCyrO7kSm3p9HWjhudHqpd/fLQ+Kxc NaMqieD3O2HkMKO1+RmZSanokLixmhF1h25uIVNwIVnCniz4qsLy02fQt7lzw0l5 VZeEgZBME3rwrrsGtgl29oSbZEGIV10bPw0k2OoCUVX1yrX8zHCZzYdmIUVRUtqP hlLC1xuwqFq5Xyd6RMCb3FRbYZv17g== =mbVa -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2021-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5 fixes 2021-03-22 This series introduces some fixes to mlx5 driver. Please pull and let me know if there is any problem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8fb16e80cb
@ -1181,7 +1181,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
|
||||
|
||||
mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
|
||||
&ctstate, &ctstate_mask);
|
||||
if (ctstate_mask)
|
||||
|
||||
if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
|
||||
|
@ -1887,6 +1887,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, cqe_compression))
|
||||
return -EOPNOTSUPP;
|
||||
@ -1896,7 +1897,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mlx5e_modify_rx_cqe_compression_locked(priv, enable);
|
||||
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
priv->channels.params.rx_cqe_compress_def = enable;
|
||||
|
||||
return 0;
|
||||
|
@ -3846,10 +3846,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
||||
}
|
||||
|
||||
if (mlx5e_is_uplink_rep(priv)) {
|
||||
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
|
||||
|
||||
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
|
||||
stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
|
||||
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
|
||||
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
|
||||
|
||||
/* vport multicast also counts packets that are dropped due to steering
|
||||
* or rx out of buffer
|
||||
*/
|
||||
stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
|
||||
} else {
|
||||
mlx5e_fold_sw_stats64(priv, stats);
|
||||
}
|
||||
@ -4972,6 +4979,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
|
||||
priv->max_nch);
|
||||
params->num_tc = 1;
|
||||
|
||||
/* Set an initial non-zero value, so that mlx5e_select_queue won't
|
||||
* divide by zero if called before first activating channels.
|
||||
*/
|
||||
priv->num_tc_x_num_ch = params->num_channels * params->num_tc;
|
||||
|
||||
/* SQ */
|
||||
params->log_sq_size = is_kdump_kernel() ?
|
||||
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
|
||||
|
@ -2296,6 +2296,16 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
*match_level = MLX5_MATCH_L4;
|
||||
}
|
||||
|
||||
/* Currenlty supported only for MPLS over UDP */
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
|
||||
!netif_is_bareudp(filter_dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Matching on MPLS is supported only for MPLS over UDP");
|
||||
netdev_err(priv->netdev,
|
||||
"Matching on MPLS is supported only for MPLS over UDP\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2899,6 +2909,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
|
||||
bool ct_flow, struct netlink_ext_ack *extack,
|
||||
struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec)
|
||||
{
|
||||
if (!modify_tuple || ct_clear)
|
||||
return true;
|
||||
|
||||
if (ct_flow) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't offload tuple modification with non-clear ct()");
|
||||
netdev_info(priv->netdev,
|
||||
"can't offload tuple modification with non-clear ct()");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Add ct_state=-trk match so it will be offloaded for non ct flows
|
||||
* (or after clear action), as otherwise, since the tuple is changed,
|
||||
* we can't restore ct state
|
||||
*/
|
||||
if (mlx5_tc_ct_add_no_trk_match(spec)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't offload tuple modification with ct matches and no ct(clear) action");
|
||||
netdev_info(priv->netdev,
|
||||
"can't offload tuple modification with ct matches and no ct(clear) action");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool modify_header_match_supported(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct flow_action *flow_action,
|
||||
@ -2937,18 +2978,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Add ct_state=-trk match so it will be offloaded for non ct flows
|
||||
* (or after clear action), as otherwise, since the tuple is changed,
|
||||
* we can't restore ct state
|
||||
*/
|
||||
if (!ct_clear && modify_tuple &&
|
||||
mlx5_tc_ct_add_no_trk_match(spec)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't offload tuple modify header with ct matches");
|
||||
netdev_info(priv->netdev,
|
||||
"can't offload tuple modify header with ct matches");
|
||||
if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
|
||||
priv, spec))
|
||||
return false;
|
||||
}
|
||||
|
||||
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
|
||||
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
|
||||
|
@ -181,15 +181,13 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
|
||||
u16 max_functions;
|
||||
u16 function_id;
|
||||
int err = 0;
|
||||
bool ecpu;
|
||||
int i;
|
||||
|
||||
max_functions = mlx5_sf_max_functions(dev);
|
||||
function_id = MLX5_CAP_GEN(dev, sf_base_id);
|
||||
ecpu = mlx5_read_embedded_cpu(dev);
|
||||
/* Arm the vhca context as the vhca event notifier */
|
||||
for (i = 0; i < max_functions; i++) {
|
||||
err = mlx5_vhca_event_arm(dev, function_id, ecpu);
|
||||
err = mlx5_vhca_event_arm(dev, function_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include "sf.h"
|
||||
#include "mlx5_ifc_vhca_event.h"
|
||||
#include "vhca_event.h"
|
||||
#include "ecpf.h"
|
||||
#include "mlx5_core.h"
|
||||
|
||||
struct mlx5_sf_hw {
|
||||
u32 usr_sfnum;
|
||||
@ -18,7 +18,6 @@ struct mlx5_sf_hw_table {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_sf_hw *sfs;
|
||||
int max_local_functions;
|
||||
u8 ecpu: 1;
|
||||
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
|
||||
struct notifier_block vhca_nb;
|
||||
};
|
||||
@ -72,7 +71,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum);
|
||||
err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum);
|
||||
if (err)
|
||||
goto vhca_err;
|
||||
|
||||
@ -118,7 +117,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
|
||||
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
|
||||
mutex_lock(&table->table_lock);
|
||||
err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out));
|
||||
err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
|
||||
if (err)
|
||||
goto err;
|
||||
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
|
||||
@ -164,7 +163,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
|
||||
table->dev = dev;
|
||||
table->sfs = sfs;
|
||||
table->max_local_functions = max_functions;
|
||||
table->ecpu = mlx5_read_embedded_cpu(dev);
|
||||
dev->priv.sf_hw_table = table;
|
||||
mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
|
||||
return 0;
|
||||
|
@ -19,52 +19,51 @@ struct mlx5_vhca_event_work {
|
||||
struct mlx5_vhca_state_event event;
|
||||
};
|
||||
|
||||
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
|
||||
bool ecpu, u32 *out, u32 outlen)
|
||||
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
|
||||
|
||||
MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
|
||||
MLX5_SET(query_vhca_state_in, in, function_id, function_id);
|
||||
MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu);
|
||||
MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
|
||||
bool ecpu, u32 *in, u32 inlen)
|
||||
u32 *in, u32 inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
|
||||
|
||||
MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
|
||||
MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
|
||||
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
|
||||
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id)
|
||||
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
|
||||
|
||||
MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
|
||||
MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
|
||||
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
|
||||
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
|
||||
MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
|
||||
MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
|
||||
|
||||
return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
|
||||
}
|
||||
|
||||
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu)
|
||||
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
|
||||
|
||||
MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
|
||||
MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
|
||||
|
||||
return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in));
|
||||
return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -73,7 +72,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
|
||||
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
|
||||
int err;
|
||||
|
||||
err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out));
|
||||
err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out));
|
||||
if (err)
|
||||
return;
|
||||
|
||||
@ -82,7 +81,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
|
||||
event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
|
||||
vhca_state_context.vhca_state);
|
||||
|
||||
mlx5_vhca_event_arm(dev, event->function_id, event->ecpu);
|
||||
mlx5_vhca_event_arm(dev, event->function_id);
|
||||
|
||||
blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
|
||||
}
|
||||
@ -111,7 +110,6 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
|
||||
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
|
||||
work->notifier = notifier;
|
||||
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
|
||||
work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function);
|
||||
mlx5_events_work_enqueue(notifier->dev, &work->work);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
@ -10,7 +10,6 @@ struct mlx5_vhca_state_event {
|
||||
u16 function_id;
|
||||
u16 sw_function_id;
|
||||
u8 new_vhca_state;
|
||||
bool ecpu;
|
||||
};
|
||||
|
||||
static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
|
||||
@ -25,10 +24,10 @@ void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev);
|
||||
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id);
|
||||
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu);
|
||||
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id);
|
||||
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id);
|
||||
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
|
||||
bool ecpu, u32 *out, u32 outlen);
|
||||
u32 *out, u32 outlen);
|
||||
#else
|
||||
|
||||
static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
|
||||
|
Loading…
Reference in New Issue
Block a user