Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-11-04 (ice, idpf, i40e, e1000e)

For ice:

Marcin adjusts ordering of calls in ice_eswitch_detach() to resolve a
use after free issue.

Mateusz corrects variable type for Flow Director queue to fix issues
related to drop actions.

For idpf:

Pavan resolves issues related to reset on idpf; avoiding use of freed
vport and correctly unrolling the mailbox task.

For i40e:

Aleksandr fixes a race condition involving addition and deletion of VF
MAC filters.

For e1000e:

Vitaly reverts workaround for Meteor Lake causing regressions in power
management flows.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  e1000e: Remove Meteor Lake SMBUS workarounds
  i40e: fix race condition by adding filter's intermediate sync state
  idpf: fix idpf_vc_core_init error path
  idpf: avoid vport access in idpf_get_link_ksettings
  ice: change q_index variable type to s16 to store -1 value
  ice: Fix use after free during unload with ports in bridge
====================

Link: https://patch.msgid.link/20241104223639.2801097-1-anthony.l.nguyen@intel.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-11-05 18:05:50 -08:00
commit 26a2bebd2c
11 changed files with 32 additions and 32 deletions

View File

@ -1205,13 +1205,11 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
if (hw->mac.type != e1000_pch_mtp) {
ret_val = e1000e_force_smbus(hw);
if (ret_val) {
e_dbg("Failed to force SMBUS: %d\n", ret_val);
goto release;
}
}
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
@ -1273,13 +1271,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
}
release:
if (hw->mac.type == e1000_pch_mtp) {
ret_val = e1000e_force_smbus(hw);
if (ret_val)
e_dbg("Failed to force SMBUS over MTL system: %d\n",
ret_val);
}
hw->phy.ops.release(hw);
out:
if (ret_val)

View File

@ -755,6 +755,7 @@ enum i40e_filter_state {
I40E_FILTER_ACTIVE, /* Added to switch by FW */
I40E_FILTER_FAILED, /* Rejected by FW */
I40E_FILTER_REMOVE, /* To be removed */
I40E_FILTER_NEW_SYNC, /* New, not sent yet, is in i40e_sync_vsi_filters() */
/* There is no 'removed' state; the filter struct is freed */
};
struct i40e_mac_filter {

View File

@ -89,6 +89,7 @@ static char *i40e_filter_state_string[] = {
"ACTIVE",
"FAILED",
"REMOVE",
"NEW_SYNC",
};
/**

View File

@ -1255,6 +1255,7 @@ int i40e_count_filters(struct i40e_vsi *vsi)
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_NEW ||
f->state == I40E_FILTER_NEW_SYNC ||
f->state == I40E_FILTER_ACTIVE)
++cnt;
}
@ -1441,6 +1442,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
new->f = add_head;
new->state = add_head->state;
if (add_head->state == I40E_FILTER_NEW)
add_head->state = I40E_FILTER_NEW_SYNC;
/* Add the new filter to the tmp list */
hlist_add_head(&new->hlist, tmp_add_list);
@ -1550,6 +1553,8 @@ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
return -ENOMEM;
new_mac->f = add_head;
new_mac->state = add_head->state;
if (add_head->state == I40E_FILTER_NEW)
add_head->state = I40E_FILTER_NEW_SYNC;
/* Add the new filter to the tmp list */
hlist_add_head(&new_mac->hlist, tmp_add_list);
@ -2437,7 +2442,8 @@ static int
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_mac_filter *f)
{
bool enable = f->state == I40E_FILTER_NEW;
bool enable = f->state == I40E_FILTER_NEW ||
f->state == I40E_FILTER_NEW_SYNC;
struct i40e_hw *hw = &vsi->back->hw;
int aq_ret;
@ -2611,6 +2617,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* Add it to the hash list */
hlist_add_head(&new->hlist, &tmp_add_list);
f->state = I40E_FILTER_NEW_SYNC;
}
/* Count the number of active (current and new) VLAN
@ -2762,7 +2769,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
spin_lock_bh(&vsi->mac_filter_hash_lock);
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
/* Only update the state if we're still NEW */
if (new->f->state == I40E_FILTER_NEW)
if (new->f->state == I40E_FILTER_NEW ||
new->f->state == I40E_FILTER_NEW_SYNC)
new->f->state = new->state;
hlist_del(&new->hlist);
netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);

View File

@ -552,13 +552,14 @@ int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
{
ice_eswitch_stop_reprs(pf);
repr->ops.rem(repr);
xa_erase(&pf->eswitch.reprs, repr->id);
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
repr->ops.rem(repr);
ice_repr_destroy(repr);
if (xa_empty(&pf->eswitch.reprs)) {

View File

@ -1830,11 +1830,12 @@ static int
ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
struct ice_fdir_fltr *input)
{
u16 dest_vsi, q_index = 0;
s16 q_index = ICE_FDIR_NO_QUEUE_IDX;
u16 orig_q_index = 0;
struct ice_pf *pf;
struct ice_hw *hw;
int flow_type;
u16 dest_vsi;
u8 dest_ctl;
if (!vsi || !fsp || !input)

View File

@ -53,6 +53,8 @@
*/
#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
#define ICE_FDIR_NO_QUEUE_IDX -1
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
@ -186,7 +188,7 @@ struct ice_fdir_fltr {
u16 flex_fltr;
/* filter control */
u16 q_index;
s16 q_index;
u16 orig_q_index;
u16 dest_vsi;
u8 dest_ctl;

View File

@ -141,6 +141,7 @@ enum idpf_vport_state {
* @adapter: Adapter back pointer
* @vport: Vport back pointer
* @vport_id: Vport identifier
* @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
@ -150,6 +151,7 @@ struct idpf_netdev_priv {
struct idpf_adapter *adapter;
struct idpf_vport *vport;
u32 vport_id;
u32 link_speed_mbps;
u16 vport_idx;
enum idpf_vport_state state;
struct rtnl_link_stats64 netstats;
@ -287,7 +289,6 @@ struct idpf_port_stats {
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @link_speed_mbps: Link speed in mbps
* @sw_marker_wq: workqueue for marker packets
*/
struct idpf_vport {
@ -331,7 +332,6 @@ struct idpf_vport {
struct idpf_port_stats port_stats;
bool link_up;
u32 link_speed_mbps;
wait_queue_head_t sw_marker_wq;
};

View File

@ -1296,24 +1296,19 @@ static void idpf_set_msglevel(struct net_device *netdev, u32 data)
static int idpf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct idpf_vport *vport;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
struct idpf_netdev_priv *np = netdev_priv(netdev);
ethtool_link_ksettings_zero_link_mode(cmd, supported);
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.port = PORT_NONE;
if (vport->link_up) {
if (netif_carrier_ok(netdev)) {
cmd->base.duplex = DUPLEX_FULL;
cmd->base.speed = vport->link_speed_mbps;
cmd->base.speed = np->link_speed_mbps;
} else {
cmd->base.duplex = DUPLEX_UNKNOWN;
cmd->base.speed = SPEED_UNKNOWN;
}
idpf_vport_ctrl_unlock(netdev);
return 0;
}

View File

@ -1786,6 +1786,7 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
*/
err = idpf_vc_core_init(adapter);
if (err) {
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_deinit_dflt_mbx(adapter);
goto unlock_mutex;
}
@ -1860,7 +1861,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
memcpy(new_vport, vport, offsetof(struct idpf_vport, link_up));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@ -1906,7 +1907,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport);

View File

@ -141,7 +141,7 @@ static void idpf_handle_event_link(struct idpf_adapter *adapter,
}
np = netdev_priv(vport->netdev);
vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
np->link_speed_mbps = le32_to_cpu(v2e->link_speed);
if (vport->link_up == v2e->link_status)
return;
@ -3063,7 +3063,6 @@ init_failed:
adapter->state = __IDPF_VER_CHECK;
if (adapter->vcxn_mngr)
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
msecs_to_jiffies(task_delay));