mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 13:11:40 +00:00
mlx5-updates-2018-10-03
mlx5 core driver and ethernet netdev updates, please note there is a small devlink releated update to allow extack argument to eswitch operations. From Eli Britstein, 1) devlink: Add extack argument to the eswitch related operations 2) net/mlx5e: E-Switch, return extack messages for failures in the e-switch devlink callbacks 3) net/mlx5e: Add extack messages for TC offload failures From Eran Ben Elisha, 4) mlx5e: Add counter for aRFS rule insertion failures From Feras Daoud 5) Fast teardown support for mlx5 device This change introduces the enhanced version of the "Force teardown" that allows SW to perform teardown in a faster way without the need to reclaim all the FW pages. Fast teardown provides the following advantages: 1- Fix a FW race condition that could cause command timeout 2- Avoid moving to polling mode 3- Close the vport to prevent PCI ACK to be sent without been scatter to memory -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJbtU45AAoJEEg/ir3gV/o+/C4H/RHA4KImrb476EdB3VNYMqAN dgXb+bmh6sZP+jHWqQ4c3aVeh6/T8qm4gwiSn2nVTtHEnxtCdIYljzDC1Nswczeg pSjD1eOP7M1LpAOmBb8xdnJcX7yM7r1bTklnp2sN853WShbsDRYgZBHsBwTzx25U ZdzL4QTLuohlG/aLrbGXMntIy45ya2fVQrnK54s18nFlgsdFjEs0mi0xaUKNBC6+ P8CTohHAxuuxmL5b+6MIYLZCdgd8cLNQFdtqbckEVw7SvcRTxfraRlyqJ0YOgTGB TdSWnqZz2JYH29wSFbpFG8qX6GCv8FoiZ+fKzldbolHk442rrktHv3+Y7qQuZVs= =NVks -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2018-10-03' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2018-10-03 mlx5 core driver and ethernet netdev updates, please note there is a small devlink releated update to allow extack argument to eswitch operations. From Eli Britstein, 1) devlink: Add extack argument to the eswitch related operations 2) net/mlx5e: E-Switch, return extack messages for failures in the e-switch devlink callbacks 3) net/mlx5e: Add extack messages for TC offload failures From Eran Ben Elisha, 4) mlx5e: Add counter for aRFS rule insertion failures From Feras Daoud 5) Fast teardown support for mlx5 device This change introduces the enhanced version of the "Force teardown" that allows SW to perform teardown in a faster way without the need to reclaim all the FW pages. Fast teardown provides the following advantages: 1- Fix a FW race condition that could cause command timeout 2- Avoid moving to polling mode 3- Close the vport to prevent PCI ACK to be sent without been scatter to memory ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9e50727f0e
@ -521,7 +521,8 @@ int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
|
||||
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
|
||||
int rc = 0;
|
||||
|
@ -30,7 +30,8 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
|
||||
|
||||
bool bnxt_dev_is_vf_rep(struct net_device *dev);
|
||||
int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
|
||||
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
|
||||
int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -3144,7 +3144,8 @@ liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
|
||||
}
|
||||
|
||||
static int
|
||||
liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
|
||||
liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct lio_devlink_priv *priv;
|
||||
struct octeon_device *oct;
|
||||
|
@ -543,8 +543,11 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
|
||||
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
|
||||
if (IS_ERR(rule)) {
|
||||
err = PTR_ERR(rule);
|
||||
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
|
||||
__func__, arfs_rule->filter_id, arfs_rule->rxq, err);
|
||||
priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
|
||||
mlx5e_dbg(HW, priv,
|
||||
"%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
|
||||
__func__, arfs_rule->filter_id, arfs_rule->rxq,
|
||||
tuple->ip_proto, err);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -93,6 +93,7 @@ static const struct counter_desc sw_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
|
||||
@ -170,6 +171,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
||||
s->rx_cache_busy += rq_stats->cache_busy;
|
||||
s->rx_cache_waive += rq_stats->cache_waive;
|
||||
s->rx_congst_umr += rq_stats->congst_umr;
|
||||
s->rx_arfs_err += rq_stats->arfs_err;
|
||||
s->ch_events += ch_stats->events;
|
||||
s->ch_poll += ch_stats->poll;
|
||||
s->ch_arm += ch_stats->arm;
|
||||
@ -1161,6 +1163,7 @@ static const struct counter_desc rq_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
|
||||
};
|
||||
|
||||
static const struct counter_desc sq_stats_desc[] = {
|
||||
|
@ -106,6 +106,7 @@ struct mlx5e_sw_stats {
|
||||
u64 rx_cache_busy;
|
||||
u64 rx_cache_waive;
|
||||
u64 rx_congst_umr;
|
||||
u64 rx_arfs_err;
|
||||
u64 ch_events;
|
||||
u64 ch_poll;
|
||||
u64 ch_arm;
|
||||
@ -202,6 +203,7 @@ struct mlx5e_rq_stats {
|
||||
u64 cache_busy;
|
||||
u64 cache_waive;
|
||||
u64 congst_umr;
|
||||
u64 arfs_err;
|
||||
};
|
||||
|
||||
struct mlx5e_sq_stats {
|
||||
|
@ -532,7 +532,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
|
||||
#define UNKNOWN_MATCH_PRIO 8
|
||||
|
||||
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec, u8 *match_prio)
|
||||
struct mlx5_flow_spec *spec, u8 *match_prio,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
void *headers_c, *headers_v;
|
||||
u8 prio_val, prio_mask = 0;
|
||||
@ -540,8 +541,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
|
||||
netdev_warn(priv->netdev,
|
||||
"only PCP trust state supported for hairpin\n");
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"only PCP trust state supported for hairpin");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
@ -557,8 +558,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
|
||||
if (!vlan_present || !prio_mask) {
|
||||
prio_val = UNKNOWN_MATCH_PRIO;
|
||||
} else if (prio_mask != 0x7) {
|
||||
netdev_warn(priv->netdev,
|
||||
"masked priority match not supported for hairpin\n");
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"masked priority match not supported for hairpin");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -568,7 +569,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
|
||||
|
||||
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr)
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int peer_ifindex = parse_attr->mirred_ifindex;
|
||||
struct mlx5_hairpin_params params;
|
||||
@ -583,12 +585,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
|
||||
|
||||
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
|
||||
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
|
||||
netdev_warn(priv->netdev, "hairpin is not supported\n");
|
||||
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
|
||||
err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
|
||||
err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
|
||||
extack);
|
||||
if (err)
|
||||
return err;
|
||||
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
|
||||
@ -677,7 +680,8 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
|
||||
static struct mlx5_flow_handle *
|
||||
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
|
||||
struct mlx5_core_dev *dev = priv->mdev;
|
||||
@ -694,7 +698,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
||||
int err, dest_ix = 0;
|
||||
|
||||
if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
|
||||
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
|
||||
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
|
||||
if (err) {
|
||||
rule = ERR_PTR(err);
|
||||
goto err_add_hairpin_flow;
|
||||
@ -753,6 +757,8 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
|
||||
MLX5E_TC_TABLE_NUM_GROUPS,
|
||||
MLX5E_TC_FT_LEVEL, 0);
|
||||
if (IS_ERR(priv->fs.tc.t)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed to create tc offload table\n");
|
||||
netdev_err(priv->netdev,
|
||||
"Failed to create tc offload table\n");
|
||||
rule = ERR_CAST(priv->fs.tc.t);
|
||||
@ -819,12 +825,14 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
struct ip_tunnel_info *tun_info,
|
||||
struct net_device *mirred_dev,
|
||||
struct net_device **encap_dev,
|
||||
struct mlx5e_tc_flow *flow);
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct netlink_ext_ack *extack);
|
||||
|
||||
static struct mlx5_flow_handle *
|
||||
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
|
||||
@ -838,7 +846,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
|
||||
out_dev = __dev_get_by_index(dev_net(priv->netdev),
|
||||
attr->parse_attr->mirred_ifindex);
|
||||
err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
|
||||
out_dev, &encap_dev, flow);
|
||||
out_dev, &encap_dev, flow, extack);
|
||||
if (err) {
|
||||
rule = ERR_PTR(err);
|
||||
if (err != -EAGAIN)
|
||||
@ -1105,6 +1113,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||
outer_headers);
|
||||
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||
@ -1133,6 +1142,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
|
||||
parse_vxlan_attr(spec, f);
|
||||
else {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"port isn't an offloaded vxlan udp dport");
|
||||
netdev_warn(priv->netdev,
|
||||
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
|
||||
return -EOPNOTSUPP;
|
||||
@ -1149,6 +1160,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||
udp_sport, ntohs(key->src));
|
||||
} else { /* udp dst port must be given */
|
||||
vxlan_match_offload_err:
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"IP tunnel decap offload supported only for vxlan, must set UDP dport");
|
||||
netdev_warn(priv->netdev,
|
||||
"IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
|
||||
return -EOPNOTSUPP;
|
||||
@ -1225,6 +1238,16 @@ vxlan_match_offload_err:
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
|
||||
|
||||
if (mask->ttl &&
|
||||
!MLX5_CAP_ESW_FLOWTABLE_FDB
|
||||
(priv->mdev,
|
||||
ft_field_support.outer_ipv4_ttl)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Matching on TTL is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Enforce DMAC when offloading incoming tunneled flows.
|
||||
@ -1247,6 +1270,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f,
|
||||
u8 *match_level)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||
outer_headers);
|
||||
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
|
||||
@ -1277,6 +1301,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
BIT(FLOW_DISSECTOR_KEY_TCP) |
|
||||
BIT(FLOW_DISSECTOR_KEY_IP) |
|
||||
BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
|
||||
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
|
||||
f->dissector->used_keys);
|
||||
return -EOPNOTSUPP;
|
||||
@ -1553,8 +1578,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
|
||||
if (mask->ttl &&
|
||||
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
|
||||
ft_field_support.outer_ipv4_ttl))
|
||||
ft_field_support.outer_ipv4_ttl)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Matching on TTL is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (mask->tos || mask->ttl)
|
||||
*match_level = MLX5_MATCH_L3;
|
||||
@ -1596,6 +1624,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
udp_dport, ntohs(key->dst));
|
||||
break;
|
||||
default:
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Only UDP and TCP transports are supported for L4 matching");
|
||||
netdev_err(priv->netdev,
|
||||
"Only UDP and TCP transport are supported\n");
|
||||
return -EINVAL;
|
||||
@ -1632,6 +1662,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct mlx5_core_dev *dev = priv->mdev;
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
@ -1646,6 +1677,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
||||
if (rep->vport != FDB_UPLINK_VPORT &&
|
||||
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
|
||||
esw->offloads.inline_mode < match_level)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Flow is not offloaded due to min inline setting");
|
||||
netdev_warn(priv->netdev,
|
||||
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
|
||||
match_level, esw->offloads.inline_mode);
|
||||
@ -1747,7 +1780,8 @@ static struct mlx5_fields fields[] = {
|
||||
*/
|
||||
static int offload_pedit_fields(struct pedit_headers *masks,
|
||||
struct pedit_headers *vals,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr)
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
|
||||
int i, action_size, nactions, max_actions, first, last, next_z;
|
||||
@ -1786,11 +1820,15 @@ static int offload_pedit_fields(struct pedit_headers *masks,
|
||||
continue;
|
||||
|
||||
if (s_mask && a_mask) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't set and add to the same HW field");
|
||||
printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (nactions == max_actions) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"too many pedit actions, can't offload");
|
||||
printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -1823,6 +1861,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
|
||||
next_z = find_next_zero_bit(&mask, field_bsize, first);
|
||||
last = find_last_bit(&mask, field_bsize);
|
||||
if (first < next_z && next_z < last) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"rewrite of few sub-fields isn't supported");
|
||||
printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
|
||||
mask);
|
||||
return -EOPNOTSUPP;
|
||||
@ -1881,7 +1921,8 @@ static const struct pedit_headers zero_masks = {};
|
||||
|
||||
static int parse_tc_pedit_action(struct mlx5e_priv *priv,
|
||||
const struct tc_action *a, int namespace,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr)
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
|
||||
int nkeys, i, err = -EOPNOTSUPP;
|
||||
@ -1899,12 +1940,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
|
||||
err = -EOPNOTSUPP; /* can't be all optimistic */
|
||||
|
||||
if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
|
||||
netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"legacy pedit isn't offloaded");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
|
||||
netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
|
||||
NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
@ -1921,13 +1963,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
err = offload_pedit_fields(masks, vals, parse_attr);
|
||||
err = offload_pedit_fields(masks, vals, parse_attr, extack);
|
||||
if (err < 0)
|
||||
goto out_dealloc_parsed_actions;
|
||||
|
||||
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
|
||||
cmd_masks = &masks[cmd];
|
||||
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"attempt to offload an unsupported field");
|
||||
netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
|
||||
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
|
||||
16, 1, cmd_masks, sizeof(zero_masks), true);
|
||||
@ -1944,19 +1988,26 @@ out_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
|
||||
static bool csum_offload_supported(struct mlx5e_priv *priv,
|
||||
u32 action,
|
||||
u32 update_flags,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
|
||||
TCA_CSUM_UPDATE_FLAG_UDP;
|
||||
|
||||
/* The HW recalcs checksums only if re-writing headers */
|
||||
if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"TC csum action is only offloaded with pedit");
|
||||
netdev_warn(priv->netdev,
|
||||
"TC csum action is only offloaded with pedit\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (update_flags & ~prot_flags) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't offload TC csum action for some header/s");
|
||||
netdev_warn(priv->netdev,
|
||||
"can't offload TC csum action for some header/s - flags %#x\n",
|
||||
update_flags);
|
||||
@ -1967,7 +2018,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
|
||||
}
|
||||
|
||||
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
|
||||
struct tcf_exts *exts)
|
||||
struct tcf_exts *exts,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct tc_action *a;
|
||||
bool modify_ip_header;
|
||||
@ -2005,6 +2057,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
|
||||
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
|
||||
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
|
||||
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't offload re-write of non TCP/UDP");
|
||||
pr_info("can't offload re-write of ip proto %d\n", ip_proto);
|
||||
return false;
|
||||
}
|
||||
@ -2016,7 +2070,8 @@ out_ok:
|
||||
static bool actions_match_supported(struct mlx5e_priv *priv,
|
||||
struct tcf_exts *exts,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
u32 actions;
|
||||
|
||||
@ -2030,7 +2085,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
|
||||
return false;
|
||||
|
||||
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
return modify_header_match_supported(&parse_attr->spec, exts);
|
||||
return modify_header_match_supported(&parse_attr->spec, exts,
|
||||
extack);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -2051,7 +2107,8 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
|
||||
|
||||
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
|
||||
const struct tc_action *a;
|
||||
@ -2075,7 +2132,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
|
||||
if (is_tcf_pedit(a)) {
|
||||
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
|
||||
parse_attr);
|
||||
parse_attr, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -2086,7 +2143,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
|
||||
if (is_tcf_csum(a)) {
|
||||
if (csum_offload_supported(priv, action,
|
||||
tcf_csum_update_flags(a)))
|
||||
tcf_csum_update_flags(a),
|
||||
extack))
|
||||
continue;
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
@ -2102,6 +2160,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
|
||||
MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
} else {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"device is not on same HW, can't offload");
|
||||
netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
|
||||
peer_dev->name);
|
||||
return -EINVAL;
|
||||
@ -2113,8 +2173,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
u32 mark = tcf_skbedit_mark(a);
|
||||
|
||||
if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
|
||||
netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
|
||||
mark);
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Bad flow mark - only 16 bit is supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2127,7 +2187,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
}
|
||||
|
||||
attr->action = action;
|
||||
if (!actions_match_supported(priv, exts, parse_attr, flow))
|
||||
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return 0;
|
||||
@ -2529,7 +2589,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
struct ip_tunnel_info *tun_info,
|
||||
struct net_device *mirred_dev,
|
||||
struct net_device **encap_dev,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
unsigned short family = ip_tunnel_info_af(tun_info);
|
||||
@ -2547,6 +2608,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
/* setting udp src port isn't supported */
|
||||
if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
|
||||
vxlan_encap_offload_err:
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"must set udp dst port and not set udp src port");
|
||||
netdev_warn(priv->netdev,
|
||||
"must set udp dst port and not set udp src port\n");
|
||||
return -EOPNOTSUPP;
|
||||
@ -2556,6 +2619,8 @@ vxlan_encap_offload_err:
|
||||
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
|
||||
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
|
||||
} else {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"port isn't an offloaded vxlan udp dport");
|
||||
netdev_warn(priv->netdev,
|
||||
"%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
|
||||
return -EOPNOTSUPP;
|
||||
@ -2660,7 +2725,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
|
||||
|
||||
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
@ -2686,7 +2752,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
|
||||
if (is_tcf_pedit(a)) {
|
||||
err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
|
||||
parse_attr);
|
||||
parse_attr, extack);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -2697,7 +2763,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
|
||||
if (is_tcf_csum(a)) {
|
||||
if (csum_offload_supported(priv, action,
|
||||
tcf_csum_update_flags(a)))
|
||||
tcf_csum_update_flags(a),
|
||||
extack))
|
||||
continue;
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
@ -2710,6 +2777,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
out_dev = tcf_mirred_dev(a);
|
||||
|
||||
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't support more output ports, can't offload forwarding");
|
||||
pr_err("can't support more than %d output ports, can't offload forwarding\n",
|
||||
attr->out_count);
|
||||
return -EOPNOTSUPP;
|
||||
@ -2733,6 +2802,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
/* attr->out_rep is resolved when we handle encap */
|
||||
} else {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"devices are not on same switch HW, can't offload forwarding");
|
||||
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
|
||||
priv->netdev->name, out_dev->name);
|
||||
return -EINVAL;
|
||||
@ -2769,10 +2840,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
||||
}
|
||||
|
||||
attr->action = action;
|
||||
if (!actions_match_supported(priv, exts, parse_attr, flow))
|
||||
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"current firmware doesn't support split rule for port mirroring");
|
||||
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -2814,6 +2887,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
|
||||
int mlx5e_configure_flower(struct mlx5e_priv *priv,
|
||||
struct tc_cls_flower_offload *f, int flags)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr;
|
||||
struct rhashtable *tc_ht = get_tc_ht(priv);
|
||||
@ -2825,6 +2899,8 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
|
||||
|
||||
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
|
||||
if (flow) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"flow cookie already exists, ignoring");
|
||||
netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
|
||||
return 0;
|
||||
}
|
||||
@ -2853,15 +2929,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
|
||||
goto err_free;
|
||||
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
|
||||
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
|
||||
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow,
|
||||
extack);
|
||||
if (err < 0)
|
||||
goto err_free;
|
||||
flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
|
||||
flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow,
|
||||
extack);
|
||||
} else {
|
||||
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
|
||||
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow,
|
||||
extack);
|
||||
if (err < 0)
|
||||
goto err_free;
|
||||
flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
|
||||
flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow,
|
||||
extack);
|
||||
}
|
||||
|
||||
if (IS_ERR(flow->rule[0])) {
|
||||
|
@ -269,12 +269,15 @@ struct mlx5_esw_flow_attr {
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr;
|
||||
};
|
||||
|
||||
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
|
||||
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
struct netlink_ext_ack *extack);
|
||||
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
|
||||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
|
||||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
||||
struct netlink_ext_ack *extack);
|
||||
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
|
||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
|
||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
|
||||
struct netlink_ext_ack *extack);
|
||||
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
|
||||
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
|
||||
|
||||
|
@ -810,29 +810,35 @@ out:
|
||||
return flow_rule;
|
||||
}
|
||||
|
||||
static int esw_offloads_start(struct mlx5_eswitch *esw)
|
||||
static int esw_offloads_start(struct mlx5_eswitch *esw,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
|
||||
if (esw->mode != SRIOV_LEGACY) {
|
||||
esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't set offloads mode, SRIOV legacy not enabled");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mlx5_eswitch_disable_sriov(esw);
|
||||
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
|
||||
if (err) {
|
||||
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch to offloads");
|
||||
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
if (err1)
|
||||
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
|
||||
if (err1) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch back to legacy");
|
||||
}
|
||||
}
|
||||
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
|
||||
if (mlx5_eswitch_inline_mode_get(esw,
|
||||
num_vfs,
|
||||
&esw->offloads.inline_mode)) {
|
||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
|
||||
esw_warn(esw->dev, "Inline mode is different between vports\n");
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Inline mode is different between vports");
|
||||
}
|
||||
}
|
||||
return err;
|
||||
@ -973,17 +979,20 @@ create_ft_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int esw_offloads_stop(struct mlx5_eswitch *esw)
|
||||
static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
|
||||
mlx5_eswitch_disable_sriov(esw);
|
||||
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
if (err) {
|
||||
esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
|
||||
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
|
||||
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
|
||||
if (err1)
|
||||
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
|
||||
if (err1) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed setting eswitch back to offloads");
|
||||
}
|
||||
}
|
||||
|
||||
/* enable back PF RoCE */
|
||||
@ -1092,7 +1101,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
|
||||
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
u16 cur_mlx5_mode, mlx5_mode = 0;
|
||||
@ -1111,9 +1121,9 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
|
||||
return 0;
|
||||
|
||||
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
|
||||
return esw_offloads_start(dev->priv.eswitch);
|
||||
return esw_offloads_start(dev->priv.eswitch, extack);
|
||||
else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
|
||||
return esw_offloads_stop(dev->priv.eswitch);
|
||||
return esw_offloads_stop(dev->priv.eswitch, extack);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1130,7 +1140,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
|
||||
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
|
||||
}
|
||||
|
||||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
||||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
@ -1147,14 +1158,15 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
||||
return 0;
|
||||
/* fall through */
|
||||
case MLX5_CAP_INLINE_MODE_L2:
|
||||
esw_warn(dev, "Inline mode can't be set\n");
|
||||
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
|
||||
return -EOPNOTSUPP;
|
||||
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
|
||||
break;
|
||||
}
|
||||
|
||||
if (esw->offloads.num_flows > 0) {
|
||||
esw_warn(dev, "Can't set inline mode when flows are configured\n");
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't set inline mode when flows are configured");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -1165,8 +1177,8 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
|
||||
for (vport = 1; vport < esw->enabled_vports; vport++) {
|
||||
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
|
||||
if (err) {
|
||||
esw_warn(dev, "Failed to set min inline on vport %d\n",
|
||||
vport);
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed to set min inline on vport");
|
||||
goto revert_inline_mode;
|
||||
}
|
||||
}
|
||||
@ -1232,7 +1244,8 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
|
||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
@ -1259,7 +1272,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
|
||||
return 0;
|
||||
|
||||
if (esw->offloads.num_flows > 0) {
|
||||
esw_warn(dev, "Can't set encapsulation when flows are configured\n");
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Can't set encapsulation when flows are configured");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -1268,7 +1282,8 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
|
||||
esw->offloads.encap = encap;
|
||||
err = esw_create_offloads_fast_fdb_table(esw);
|
||||
if (err) {
|
||||
esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Failed re-creating fast FDB table");
|
||||
esw->offloads.encap = !encap;
|
||||
(void)esw_create_offloads_fast_fdb_table(esw);
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
force_state = MLX5_GET(teardown_hca_out, out, force_state);
|
||||
force_state = MLX5_GET(teardown_hca_out, out, state);
|
||||
if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
|
||||
mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n");
|
||||
return -EIO;
|
||||
@ -259,6 +259,54 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
|
||||
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
|
||||
{
|
||||
unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
|
||||
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
|
||||
int state;
|
||||
int ret;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
|
||||
mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
|
||||
MLX5_SET(teardown_hca_in, in, profile,
|
||||
MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
|
||||
|
||||
ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
state = MLX5_GET(teardown_hca_out, out, state);
|
||||
if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
|
||||
mlx5_core_warn(dev, "teardown with fast mode failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
|
||||
|
||||
/* Loop until device state turns to disable */
|
||||
end = jiffies + msecs_to_jiffies(delay_ms);
|
||||
do {
|
||||
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
} while (!time_after(jiffies, end));
|
||||
|
||||
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
|
||||
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
|
||||
mlx5_get_nic_state(dev), delay_ms);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum mlxsw_reg_mcc_instruction {
|
||||
MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
|
||||
MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
|
||||
|
@ -58,23 +58,26 @@ enum {
|
||||
MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_NIC_IFC_FULL = 0,
|
||||
MLX5_NIC_IFC_DISABLED = 1,
|
||||
MLX5_NIC_IFC_NO_DRAM_NIC = 2,
|
||||
MLX5_NIC_IFC_INVALID = 3
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_DROP_NEW_HEALTH_WORK,
|
||||
MLX5_DROP_NEW_RECOVERY_WORK,
|
||||
};
|
||||
|
||||
static u8 get_nic_state(struct mlx5_core_dev *dev)
|
||||
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
|
||||
}
|
||||
|
||||
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
|
||||
{
|
||||
u32 cur_cmdq_addr_l_sz;
|
||||
|
||||
cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
|
||||
iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
|
||||
state << MLX5_NIC_IFC_OFFSET,
|
||||
&dev->iseg->cmdq_addr_l_sz);
|
||||
}
|
||||
|
||||
static void trigger_cmd_completions(struct mlx5_core_dev *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -103,7 +106,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
|
||||
struct mlx5_core_health *health = &dev->priv.health;
|
||||
struct health_buffer __iomem *h = health->health;
|
||||
|
||||
if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
|
||||
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
|
||||
return 1;
|
||||
|
||||
if (ioread32be(&h->fw_ver) == 0xffffffff)
|
||||
@ -133,7 +136,7 @@ unlock:
|
||||
|
||||
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
|
||||
{
|
||||
u8 nic_interface = get_nic_state(dev);
|
||||
u8 nic_interface = mlx5_get_nic_state(dev);
|
||||
|
||||
switch (nic_interface) {
|
||||
case MLX5_NIC_IFC_FULL:
|
||||
@ -168,7 +171,7 @@ static void health_recover(struct work_struct *work)
|
||||
priv = container_of(health, struct mlx5_priv, health);
|
||||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
|
||||
nic_state = get_nic_state(dev);
|
||||
nic_state = mlx5_get_nic_state(dev);
|
||||
if (nic_state == MLX5_NIC_IFC_INVALID) {
|
||||
dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
|
||||
return;
|
||||
|
@ -1594,12 +1594,17 @@ static const struct pci_error_handlers mlx5_err_handler = {
|
||||
|
||||
static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
bool fast_teardown = false, force_teardown = false;
|
||||
int ret = 1;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, force_teardown)) {
|
||||
mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
|
||||
fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
|
||||
force_teardown = MLX5_CAP_GEN(dev, force_teardown);
|
||||
|
||||
mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
|
||||
mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
|
||||
|
||||
if (!fast_teardown && !force_teardown)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||
mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
|
||||
@ -1612,13 +1617,19 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_stop_health_poll(dev, false);
|
||||
|
||||
ret = mlx5_cmd_force_teardown_hca(dev);
|
||||
if (ret) {
|
||||
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
|
||||
mlx5_start_health_poll(dev);
|
||||
return ret;
|
||||
}
|
||||
ret = mlx5_cmd_fast_teardown_hca(dev);
|
||||
if (!ret)
|
||||
goto succeed;
|
||||
|
||||
ret = mlx5_cmd_force_teardown_hca(dev);
|
||||
if (!ret)
|
||||
goto succeed;
|
||||
|
||||
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
|
||||
mlx5_start_health_poll(dev);
|
||||
return ret;
|
||||
|
||||
succeed:
|
||||
mlx5_enter_error_state(dev, true);
|
||||
|
||||
/* Some platforms requiring freeing the IRQ's in the shutdown
|
||||
|
@ -95,6 +95,8 @@ int mlx5_query_board_id(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
|
||||
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
|
||||
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
|
||||
unsigned long param);
|
||||
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
|
||||
@ -214,4 +216,14 @@ int mlx5_lag_allow(struct mlx5_core_dev *dev);
|
||||
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
|
||||
|
||||
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
|
||||
|
||||
enum {
|
||||
MLX5_NIC_IFC_FULL = 0,
|
||||
MLX5_NIC_IFC_DISABLED = 1,
|
||||
MLX5_NIC_IFC_NO_DRAM_NIC = 2,
|
||||
MLX5_NIC_IFC_INVALID = 3
|
||||
};
|
||||
|
||||
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
|
||||
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
|
||||
#endif /* __MLX5_CORE_H__ */
|
||||
|
@ -177,7 +177,8 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
|
||||
return nfp_app_eswitch_mode_get(pf->app, mode);
|
||||
}
|
||||
|
||||
static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
|
||||
static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct nfp_pf *pf = devlink_priv(devlink);
|
||||
int ret;
|
||||
|
@ -504,6 +504,10 @@ struct health_buffer {
|
||||
__be16 ext_synd;
|
||||
};
|
||||
|
||||
enum mlx5_cmd_addr_l_sz_offset {
|
||||
MLX5_NIC_IFC_OFFSET = 8,
|
||||
};
|
||||
|
||||
struct mlx5_init_seg {
|
||||
__be32 fw_rev;
|
||||
__be32 cmdif_rev_fw_sub;
|
||||
|
@ -896,7 +896,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
||||
u8 log_max_mkey[0x6];
|
||||
u8 reserved_at_f0[0x8];
|
||||
u8 dump_fill_mkey[0x1];
|
||||
u8 reserved_at_f9[0x3];
|
||||
u8 reserved_at_f9[0x2];
|
||||
u8 fast_teardown[0x1];
|
||||
u8 log_max_eq[0x4];
|
||||
|
||||
u8 max_indirection[0x8];
|
||||
@ -3352,12 +3353,13 @@ struct mlx5_ifc_teardown_hca_out_bits {
|
||||
|
||||
u8 reserved_at_40[0x3f];
|
||||
|
||||
u8 force_state[0x1];
|
||||
u8 state[0x1];
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
|
||||
MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE = 0x1,
|
||||
MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN = 0x2,
|
||||
};
|
||||
|
||||
struct mlx5_ifc_teardown_hca_in_bits {
|
||||
|
@ -451,11 +451,14 @@ struct devlink_ops {
|
||||
u32 *p_cur, u32 *p_max);
|
||||
|
||||
int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
|
||||
int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
|
||||
int (*eswitch_mode_set)(struct devlink *devlink, u16 mode,
|
||||
struct netlink_ext_ack *extack);
|
||||
int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
|
||||
int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
|
||||
int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode,
|
||||
struct netlink_ext_ack *extack);
|
||||
int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode);
|
||||
int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode);
|
||||
int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode,
|
||||
struct netlink_ext_ack *extack);
|
||||
};
|
||||
|
||||
static inline void *devlink_priv(struct devlink *devlink)
|
||||
|
@ -1626,7 +1626,7 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
|
||||
if (!ops->eswitch_mode_set)
|
||||
return -EOPNOTSUPP;
|
||||
mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
|
||||
err = ops->eswitch_mode_set(devlink, mode);
|
||||
err = ops->eswitch_mode_set(devlink, mode, info->extack);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -1636,7 +1636,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
|
||||
return -EOPNOTSUPP;
|
||||
inline_mode = nla_get_u8(
|
||||
info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
|
||||
err = ops->eswitch_inline_mode_set(devlink, inline_mode);
|
||||
err = ops->eswitch_inline_mode_set(devlink, inline_mode,
|
||||
info->extack);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -1645,7 +1646,8 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
|
||||
if (!ops->eswitch_encap_mode_set)
|
||||
return -EOPNOTSUPP;
|
||||
encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
|
||||
err = ops->eswitch_encap_mode_set(devlink, encap_mode);
|
||||
err = ops->eswitch_encap_mode_set(devlink, encap_mode,
|
||||
info->extack);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user