mlx5-updates-2021-03-11
Cleanups for mlx5 driver 1) Fix build warnings form Arnd and Vlad 2) Leon improves locking for driver load/unload flows 3) From Roi, Lockdep false dependency warning 4) Other trivial cleanups -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmBKmyMACgkQSD+KveBX +j6n8ggAr45d0+MbXE8jAfLNS5lu7S6a5mTZ1nZhQPHn3ki9N3XnkOXW6H0MKIUP mLb5b1Q1MA1h8XcwJBeXjrsSVl5lJTaxK6guHkhtnrIZ/+7das6tYxry5pw7S6Yq LVzZD6gnyz/kFoF0pCPZtdYz5pX3EBy8j7fTlMDhP1hTgaSoru09HSSz2oqMuqkp Msrra9ba+qBnSJcB+nhRExygdQKiwiJKXNg1kmpCQcWoy1JlkUxgJrKx4Rq3ofVU +zaiZes63OZfrJhro5pvuJvxz0x3IcONL+ZL4C2YHDslsA05qpL7U3aRQbwThZHn NKg1dKX8nX0fdkIuMfBYcaeQo+GBow== =O521 -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2021-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== This series provides some cleanups to mlx5 driver For more information please see tag log below. Please pull and let me know if there is any problem. mlx5-updates-2021-03-11 Cleanups for mlx5 driver 1) Fix build warnings form Arnd and Vlad 2) Leon improves locking for driver load/unload flows 3) From Roi, Lockdep false dependency warning 4) Other trivial cleanups ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
1bc61c9dd4
@ -148,7 +148,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
|
||||
|
||||
switch (action) {
|
||||
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
|
||||
mlx5_unload_one(dev, false);
|
||||
mlx5_unload_one(dev);
|
||||
return 0;
|
||||
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
|
||||
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
|
||||
@ -170,13 +170,13 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
|
||||
*actions_performed = BIT(action);
|
||||
switch (action) {
|
||||
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
|
||||
return mlx5_load_one(dev, false);
|
||||
return mlx5_load_one(dev);
|
||||
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
|
||||
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
|
||||
break;
|
||||
/* On fw_activate action, also driver is reloaded and reinit performed */
|
||||
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
|
||||
return mlx5_load_one(dev, false);
|
||||
return mlx5_load_one(dev);
|
||||
default:
|
||||
/* Unsupported action should not get to this function */
|
||||
WARN_ON(1);
|
||||
|
@ -1100,7 +1100,7 @@ int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
|
||||
int err;
|
||||
|
||||
if (IS_ERR_OR_NULL(tracer))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
dev = tracer->dev;
|
||||
mlx5_fw_tracer_cleanup(tracer);
|
||||
@ -1126,8 +1126,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
|
||||
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
|
||||
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state))
|
||||
queue_work(tracer->work_queue, &tracer->ownership_change_work);
|
||||
queue_work(tracer->work_queue, &tracer->ownership_change_work);
|
||||
break;
|
||||
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
|
||||
if (likely(tracer->str_db.loaded))
|
||||
|
@ -621,11 +621,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
|
||||
int err;
|
||||
|
||||
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
|
||||
if (reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
|
||||
reg_c0 = 0;
|
||||
reg_c1 = be32_to_cpu(cqe->ft_metadata);
|
||||
|
||||
if (!reg_c0)
|
||||
if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
|
||||
return true;
|
||||
|
||||
/* If reg_c0 is not equal to the default flow tag then skb->mark
|
||||
@ -633,6 +629,8 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
|
||||
*/
|
||||
skb->mark = 0;
|
||||
|
||||
reg_c1 = be32_to_cpu(cqe->ft_metadata);
|
||||
|
||||
priv = netdev_priv(skb->dev);
|
||||
esw = priv->mdev->priv.eswitch;
|
||||
|
||||
|
@ -695,7 +695,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
|
||||
zone_rule->nat = nat;
|
||||
|
||||
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -737,7 +737,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
|
||||
|
||||
zone_rule->attr = attr;
|
||||
|
||||
kfree(spec);
|
||||
kvfree(spec);
|
||||
ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
|
||||
|
||||
return 0;
|
||||
@ -749,7 +749,7 @@ err_rule:
|
||||
err_mod_hdr:
|
||||
kfree(attr);
|
||||
err_attr:
|
||||
kfree(spec);
|
||||
kvfree(spec);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1539,6 +1539,14 @@ mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft)
|
||||
mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
|
||||
}
|
||||
|
||||
/* To avoid false lock dependency warning set the ct_entries_ht lock
|
||||
* class different than the lock class of the ht being used when deleting
|
||||
* last flow from a group and then deleting a group, we get into del_sw_flow_group()
|
||||
* which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
|
||||
* it's different than the ht->mutex here.
|
||||
*/
|
||||
static struct lock_class_key ct_entries_ht_lock_key;
|
||||
|
||||
static struct mlx5_ct_ft *
|
||||
mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
|
||||
struct nf_flowtable *nf_ft)
|
||||
@ -1573,6 +1581,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
|
||||
if (err)
|
||||
goto err_init;
|
||||
|
||||
lockdep_set_class(&ft->ct_entries_ht.mutex, &ct_entries_ht_lock_key);
|
||||
|
||||
err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
|
||||
zone_params);
|
||||
if (err)
|
||||
@ -1674,10 +1684,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
|
||||
struct mlx5_ct_ft *ft;
|
||||
u32 fte_id = 1;
|
||||
|
||||
post_ct_spec = kzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
|
||||
post_ct_spec = kvzalloc(sizeof(*post_ct_spec), GFP_KERNEL);
|
||||
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
|
||||
if (!post_ct_spec || !ct_flow) {
|
||||
kfree(post_ct_spec);
|
||||
kvfree(post_ct_spec);
|
||||
kfree(ct_flow);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@ -1812,7 +1822,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
|
||||
|
||||
attr->ct_attr.ct_flow = ct_flow;
|
||||
dealloc_mod_hdr_actions(&pre_mod_acts);
|
||||
kfree(post_ct_spec);
|
||||
kvfree(post_ct_spec);
|
||||
|
||||
return rule;
|
||||
|
||||
@ -1833,7 +1843,7 @@ err_alloc_pre:
|
||||
err_idr:
|
||||
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
|
||||
err_ft:
|
||||
kfree(post_ct_spec);
|
||||
kvfree(post_ct_spec);
|
||||
kfree(ct_flow);
|
||||
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
|
||||
return ERR_PTR(err);
|
||||
|
@ -76,10 +76,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
static inline int
|
||||
mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
struct net_device *mirred_dev,
|
||||
struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; }
|
||||
int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
struct net_device *mirred_dev,
|
||||
struct mlx5e_encap_entry *e)
|
||||
struct mlx5e_encap_entry *e)
|
||||
{ return -EOPNOTSUPP; }
|
||||
static inline int
|
||||
mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
struct net_device *mirred_dev,
|
||||
struct mlx5e_encap_entry *e)
|
||||
{ return -EOPNOTSUPP; }
|
||||
#endif
|
||||
int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Copyright (c) 2021 Mellanox Technologies. */
|
||||
|
||||
#include <net/fib_notifier.h>
|
||||
#include <net/nexthop.h>
|
||||
#include "tc_tun_encap.h"
|
||||
#include "en_tc.h"
|
||||
#include "tc_tun.h"
|
||||
|
@ -60,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec;
|
||||
int err = 0;
|
||||
|
||||
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -101,7 +101,7 @@ out:
|
||||
if (err)
|
||||
mlx5_modify_header_dealloc(mdev, modify_hdr);
|
||||
out_spec:
|
||||
kfree(spec);
|
||||
kvfree(spec);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -4646,10 +4646,6 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||
|
||||
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
|
||||
MLX5_FLOW_NAMESPACE_KERNEL);
|
||||
if (IS_ERR(tc->ct)) {
|
||||
err = PTR_ERR(tc->ct);
|
||||
goto err_ct;
|
||||
}
|
||||
|
||||
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
|
||||
err = register_netdevice_notifier_dev_net(priv->netdev,
|
||||
@ -4665,7 +4661,6 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||
|
||||
err_reg:
|
||||
mlx5_tc_ct_clean(tc->ct);
|
||||
err_ct:
|
||||
mlx5_chains_destroy(tc->chains);
|
||||
err_chains:
|
||||
rhashtable_destroy(&tc->ht);
|
||||
@ -4724,8 +4719,6 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
|
||||
esw_chains(esw),
|
||||
&esw->offloads.mod_hdr,
|
||||
MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (IS_ERR(uplink_priv->ct_priv))
|
||||
goto err_ct;
|
||||
|
||||
mapping = mapping_create(sizeof(struct tunnel_match_key),
|
||||
TUNNEL_INFO_BITS_MASK, true);
|
||||
@ -4765,7 +4758,6 @@ err_enc_opts_mapping:
|
||||
mapping_destroy(uplink_priv->tunnel_mapping);
|
||||
err_tun_mapping:
|
||||
mlx5_tc_ct_clean(uplink_priv->ct_priv);
|
||||
err_ct:
|
||||
netdev_warn(priv->netdev,
|
||||
"Failed to initialize tc (eswitch), err: %d", err);
|
||||
return err;
|
||||
|
@ -120,7 +120,7 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
return vport->dl_port;
|
||||
return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port;
|
||||
}
|
||||
|
||||
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
|
||||
|
@ -30,13 +30,13 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr);
|
||||
|
||||
#else
|
||||
/* indir API stubs */
|
||||
struct mlx5_esw_indir_table *
|
||||
static inline struct mlx5_esw_indir_table *
|
||||
mlx5_esw_indir_table_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
static inline void
|
||||
mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
|
||||
{
|
||||
}
|
||||
@ -57,7 +57,7 @@ mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
|
||||
{
|
||||
}
|
||||
|
||||
bool
|
||||
static inline bool
|
||||
mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
|
||||
struct mlx5_flow_attr *attr,
|
||||
u16 vport_num,
|
||||
|
@ -1141,6 +1141,8 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport))
|
||||
return PTR_ERR(vport);
|
||||
|
||||
if (!vport->qos.enabled)
|
||||
return -EOPNOTSUPP;
|
||||
@ -1279,6 +1281,8 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
int ret;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport))
|
||||
return PTR_ERR(vport);
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
WARN_ON(vport->enabled);
|
||||
@ -1326,6 +1330,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport))
|
||||
return;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (!vport->enabled)
|
||||
|
@ -1446,7 +1446,7 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
|
||||
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -1469,7 +1469,7 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
|
||||
dest.ft = esw->offloads.ft_offloads;
|
||||
|
||||
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
|
||||
kfree(spec);
|
||||
kvfree(spec);
|
||||
|
||||
if (IS_ERR(flow_rule))
|
||||
esw_warn(esw->dev,
|
||||
@ -2554,6 +2554,9 @@ static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
|
||||
if (IS_ERR(vport))
|
||||
return PTR_ERR(vport);
|
||||
|
||||
return esw_vport_create_offloads_acl_tables(esw, vport);
|
||||
}
|
||||
|
||||
@ -2562,6 +2565,9 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
struct mlx5_vport *vport;
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
|
||||
if (IS_ERR(vport))
|
||||
return;
|
||||
|
||||
esw_vport_destroy_offloads_acl_tables(esw, vport);
|
||||
}
|
||||
|
||||
|
@ -2395,14 +2395,12 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
|
||||
struct init_tree_node *init_node,
|
||||
struct fs_node *fs_parent_node)
|
||||
{
|
||||
int i;
|
||||
struct mlx5_flow_namespace *fs_ns;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
fs_get_obj(fs_ns, fs_parent_node);
|
||||
for (i = 0; i < init_node->ar_size; i++) {
|
||||
err = init_root_tree_recursive(steering, &init_node->children[i],
|
||||
&fs_ns->node,
|
||||
fs_parent_node,
|
||||
init_node, i);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -104,7 +104,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
|
||||
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
|
||||
complete(&fw_reset->done);
|
||||
} else {
|
||||
mlx5_load_one(dev, false);
|
||||
mlx5_load_one(dev);
|
||||
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
|
||||
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
|
||||
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
|
||||
@ -119,7 +119,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
|
||||
int err;
|
||||
|
||||
mlx5_enter_error_state(dev, true);
|
||||
mlx5_unload_one(dev, false);
|
||||
mlx5_unload_one(dev);
|
||||
err = mlx5_health_wait_pci_up(dev);
|
||||
if (err)
|
||||
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
|
||||
@ -199,16 +199,11 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
|
||||
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
|
||||
fw_live_patch_work);
|
||||
struct mlx5_core_dev *dev = fw_reset->dev;
|
||||
struct mlx5_fw_tracer *tracer;
|
||||
|
||||
mlx5_core_info(dev, "Live patch updated firmware version: %d.%d.%d\n", fw_rev_maj(dev),
|
||||
fw_rev_min(dev), fw_rev_sub(dev));
|
||||
|
||||
tracer = dev->tracer;
|
||||
if (IS_ERR_OR_NULL(tracer))
|
||||
return;
|
||||
|
||||
if (mlx5_fw_tracer_reload(tracer))
|
||||
if (mlx5_fw_tracer_reload(dev->tracer))
|
||||
mlx5_core_err(dev, "Failed to reload FW tracer\n");
|
||||
}
|
||||
|
||||
@ -342,7 +337,7 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
|
||||
}
|
||||
|
||||
mlx5_enter_error_state(dev, true);
|
||||
mlx5_unload_one(dev, false);
|
||||
mlx5_unload_one(dev);
|
||||
done:
|
||||
fw_reset->ret = err;
|
||||
mlx5_fw_reset_complete_reload(dev);
|
||||
|
@ -335,12 +335,12 @@ static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
|
||||
return -EIO;
|
||||
}
|
||||
mlx5_core_err(dev, "starting health recovery flow\n");
|
||||
mlx5_recover_device(dev);
|
||||
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) ||
|
||||
mlx5_health_check_fatal_sensors(dev)) {
|
||||
if (mlx5_recover_device(dev) || mlx5_health_check_fatal_sensors(dev)) {
|
||||
mlx5_core_err(dev, "health recovery failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
mlx5_core_info(dev, "health revovery succeded\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -55,10 +55,6 @@ void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev)
|
||||
|
||||
int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
|
||||
{
|
||||
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||
mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n");
|
||||
return -EPERM;
|
||||
}
|
||||
if (dev->roce.reserved_gids.start < count) {
|
||||
mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n",
|
||||
count);
|
||||
@ -79,7 +75,6 @@ int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
|
||||
|
||||
void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
|
||||
{
|
||||
WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up");
|
||||
WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved",
|
||||
count, dev->roce.reserved_gids.count);
|
||||
|
||||
|
@ -1235,7 +1235,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
|
||||
mlx5_put_uars_page(dev, dev->priv.uar);
|
||||
}
|
||||
|
||||
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
|
||||
int mlx5_init_one(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
@ -1247,16 +1247,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
|
||||
/* remove any previous indication of internal error */
|
||||
dev->state = MLX5_DEVICE_STATE_UP;
|
||||
|
||||
err = mlx5_function_setup(dev, boot);
|
||||
err = mlx5_function_setup(dev, true);
|
||||
if (err)
|
||||
goto err_function;
|
||||
|
||||
if (boot) {
|
||||
err = mlx5_init_once(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "sw objs init failed\n");
|
||||
goto function_teardown;
|
||||
}
|
||||
err = mlx5_init_once(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "sw objs init failed\n");
|
||||
goto function_teardown;
|
||||
}
|
||||
|
||||
err = mlx5_load(dev);
|
||||
@ -1265,16 +1263,11 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
|
||||
|
||||
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
|
||||
if (boot) {
|
||||
err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
|
||||
if (err)
|
||||
goto err_devlink_reg;
|
||||
|
||||
err = mlx5_register_device(dev);
|
||||
} else {
|
||||
err = mlx5_attach_device(dev);
|
||||
}
|
||||
err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
|
||||
if (err)
|
||||
goto err_devlink_reg;
|
||||
|
||||
err = mlx5_register_device(dev);
|
||||
if (err)
|
||||
goto err_register;
|
||||
|
||||
@ -1282,16 +1275,14 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
|
||||
return 0;
|
||||
|
||||
err_register:
|
||||
if (boot)
|
||||
mlx5_devlink_unregister(priv_to_devlink(dev));
|
||||
mlx5_devlink_unregister(priv_to_devlink(dev));
|
||||
err_devlink_reg:
|
||||
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
mlx5_unload(dev);
|
||||
err_load:
|
||||
if (boot)
|
||||
mlx5_cleanup_once(dev);
|
||||
mlx5_cleanup_once(dev);
|
||||
function_teardown:
|
||||
mlx5_function_teardown(dev, boot);
|
||||
mlx5_function_teardown(dev, true);
|
||||
err_function:
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
out:
|
||||
@ -1299,33 +1290,84 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
|
||||
void mlx5_uninit_one(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
|
||||
if (cleanup) {
|
||||
mlx5_unregister_device(dev);
|
||||
mlx5_devlink_unregister(priv_to_devlink(dev));
|
||||
} else {
|
||||
mlx5_detach_device(dev);
|
||||
}
|
||||
mlx5_unregister_device(dev);
|
||||
mlx5_devlink_unregister(priv_to_devlink(dev));
|
||||
|
||||
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
|
||||
__func__);
|
||||
if (cleanup)
|
||||
mlx5_cleanup_once(dev);
|
||||
mlx5_cleanup_once(dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
|
||||
mlx5_unload(dev);
|
||||
mlx5_cleanup_once(dev);
|
||||
mlx5_function_teardown(dev, true);
|
||||
out:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
}
|
||||
|
||||
if (cleanup)
|
||||
mlx5_cleanup_once(dev);
|
||||
int mlx5_load_one(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
mlx5_function_teardown(dev, cleanup);
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||
mlx5_core_warn(dev, "interface is up, NOP\n");
|
||||
goto out;
|
||||
}
|
||||
/* remove any previous indication of internal error */
|
||||
dev->state = MLX5_DEVICE_STATE_UP;
|
||||
|
||||
err = mlx5_function_setup(dev, false);
|
||||
if (err)
|
||||
goto err_function;
|
||||
|
||||
err = mlx5_load(dev);
|
||||
if (err)
|
||||
goto err_load;
|
||||
|
||||
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
|
||||
err = mlx5_attach_device(dev);
|
||||
if (err)
|
||||
goto err_attach;
|
||||
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
return 0;
|
||||
|
||||
err_attach:
|
||||
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
mlx5_unload(dev);
|
||||
err_load:
|
||||
mlx5_function_teardown(dev, false);
|
||||
err_function:
|
||||
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
|
||||
out:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_unload_one(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
|
||||
mlx5_detach_device(dev);
|
||||
|
||||
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
|
||||
__func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
mlx5_unload(dev);
|
||||
mlx5_function_teardown(dev, false);
|
||||
out:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
}
|
||||
@ -1397,7 +1439,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
|
||||
mutex_destroy(&dev->intf_state_mutex);
|
||||
}
|
||||
|
||||
static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct mlx5_core_dev *dev;
|
||||
struct devlink *devlink;
|
||||
@ -1433,11 +1475,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto pci_init_err;
|
||||
}
|
||||
|
||||
err = mlx5_load_one(dev, true);
|
||||
err = mlx5_init_one(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n",
|
||||
mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
|
||||
err);
|
||||
goto err_load_one;
|
||||
goto err_init_one;
|
||||
}
|
||||
|
||||
err = mlx5_crdump_enable(dev);
|
||||
@ -1449,7 +1491,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
devlink_reload_enable(devlink);
|
||||
return 0;
|
||||
|
||||
err_load_one:
|
||||
err_init_one:
|
||||
mlx5_pci_close(dev);
|
||||
pci_init_err:
|
||||
mlx5_mdev_uninit(dev);
|
||||
@ -1469,7 +1511,7 @@ static void remove_one(struct pci_dev *pdev)
|
||||
devlink_reload_disable(devlink);
|
||||
mlx5_crdump_disable(dev);
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_unload_one(dev, true);
|
||||
mlx5_uninit_one(dev);
|
||||
mlx5_pci_close(dev);
|
||||
mlx5_mdev_uninit(dev);
|
||||
mlx5_adev_idx_free(dev->priv.adev_idx);
|
||||
@ -1485,7 +1527,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
||||
|
||||
mlx5_enter_error_state(dev, false);
|
||||
mlx5_error_sw_reset(dev);
|
||||
mlx5_unload_one(dev, false);
|
||||
mlx5_unload_one(dev);
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_pci_disable_device(dev);
|
||||
|
||||
@ -1555,7 +1597,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
|
||||
|
||||
mlx5_core_info(dev, "%s was called\n", __func__);
|
||||
|
||||
err = mlx5_load_one(dev, false);
|
||||
err = mlx5_load_one(dev);
|
||||
if (err)
|
||||
mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
|
||||
__func__, err);
|
||||
@ -1627,7 +1669,7 @@ static void shutdown(struct pci_dev *pdev)
|
||||
mlx5_core_info(dev, "Shutdown was called\n");
|
||||
err = mlx5_try_fast_unload(dev);
|
||||
if (err)
|
||||
mlx5_unload_one(dev, false);
|
||||
mlx5_unload_one(dev);
|
||||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
|
||||
@ -1635,7 +1677,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
|
||||
mlx5_unload_one(dev, false);
|
||||
mlx5_unload_one(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1644,7 +1686,7 @@ static int mlx5_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
|
||||
return mlx5_load_one(dev, false);
|
||||
return mlx5_load_one(dev);
|
||||
}
|
||||
|
||||
static const struct pci_device_id mlx5_core_pci_table[] = {
|
||||
@ -1676,20 +1718,23 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
|
||||
void mlx5_disable_device(struct mlx5_core_dev *dev)
|
||||
{
|
||||
mlx5_error_sw_reset(dev);
|
||||
mlx5_unload_one(dev, false);
|
||||
mlx5_unload_one(dev);
|
||||
}
|
||||
|
||||
void mlx5_recover_device(struct mlx5_core_dev *dev)
|
||||
int mlx5_recover_device(struct mlx5_core_dev *dev)
|
||||
{
|
||||
int ret = -EIO;
|
||||
|
||||
mlx5_pci_disable_device(dev);
|
||||
if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
|
||||
mlx5_pci_resume(dev->pdev);
|
||||
ret = mlx5_load_one(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct pci_driver mlx5_core_driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.id_table = mlx5_core_pci_table,
|
||||
.probe = init_one,
|
||||
.probe = probe_one,
|
||||
.remove = remove_one,
|
||||
.suspend = mlx5_suspend,
|
||||
.resume = mlx5_resume,
|
||||
|
@ -134,7 +134,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
|
||||
u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
|
||||
int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
|
||||
void mlx5_disable_device(struct mlx5_core_dev *dev);
|
||||
void mlx5_recover_device(struct mlx5_core_dev *dev);
|
||||
int mlx5_recover_device(struct mlx5_core_dev *dev);
|
||||
int mlx5_sriov_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
|
||||
@ -267,8 +267,10 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
|
||||
|
||||
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
|
||||
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
|
||||
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
|
||||
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
|
||||
int mlx5_init_one(struct mlx5_core_dev *dev);
|
||||
void mlx5_uninit_one(struct mlx5_core_dev *dev);
|
||||
void mlx5_unload_one(struct mlx5_core_dev *dev);
|
||||
int mlx5_load_one(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
|
||||
|
||||
|
@ -47,7 +47,7 @@ static inline void mlx5_sf_driver_unregister(void)
|
||||
|
||||
static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -41,14 +41,15 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
|
||||
goto remap_err;
|
||||
}
|
||||
|
||||
err = mlx5_load_one(mdev, true);
|
||||
err = mlx5_init_one(mdev);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err);
|
||||
goto load_one_err;
|
||||
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
|
||||
goto init_one_err;
|
||||
}
|
||||
devlink_reload_enable(devlink);
|
||||
return 0;
|
||||
|
||||
load_one_err:
|
||||
init_one_err:
|
||||
iounmap(mdev->iseg);
|
||||
remap_err:
|
||||
mlx5_mdev_uninit(mdev);
|
||||
@ -63,7 +64,8 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
|
||||
struct devlink *devlink;
|
||||
|
||||
devlink = priv_to_devlink(sf_dev->mdev);
|
||||
mlx5_unload_one(sf_dev->mdev, true);
|
||||
devlink_reload_disable(devlink);
|
||||
mlx5_uninit_one(sf_dev->mdev);
|
||||
iounmap(sf_dev->mdev->iseg);
|
||||
mlx5_mdev_uninit(sf_dev->mdev);
|
||||
mlx5_devlink_free(devlink);
|
||||
@ -73,7 +75,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
|
||||
{
|
||||
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
|
||||
|
||||
mlx5_unload_one(sf_dev->mdev, false);
|
||||
mlx5_unload_one(sf_dev->mdev);
|
||||
}
|
||||
|
||||
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
|
||||
|
Loading…
Reference in New Issue
Block a user