Merge branch 'sfc-E100-VF-respresenters'

Edward Cree says:

====================
sfc: VF representors for EF100

This series adds representor netdevices for EF100 VFs, as a step towards
 supporting TC offload and vDPA usecases in future patches.
In this first series is basic netdevice creation and packet TX; the
 following series will add the RX path.

v3: dropped massive mcdi_pcol.h patch which was applied separately.
v2: converted comments on struct efx_nic members added in patch #4 to
 kernel-doc (Jakub).  While at it, also gave struct efx_rep its own kdoc
 since several members had comments on them.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2022-07-22 12:50:07 +01:00
commit 735dbc69ab
23 changed files with 687 additions and 53 deletions

View File

@ -8,7 +8,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o mae.o
obj-$(CONFIG_SFC) += sfc.o

View File

@ -85,6 +85,7 @@ static int ef100_net_stop(struct net_device *net_dev)
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id());
efx_detach_reps(efx);
netif_stop_queue(net_dev);
efx_stop_all(efx);
efx_mcdi_mac_fini_stats(efx);
@ -176,6 +177,8 @@ static int ef100_net_open(struct net_device *net_dev)
mutex_unlock(&efx->mac_lock);
efx->state = STATE_NET_UP;
if (netif_running(efx->net_dev))
efx_attach_reps(efx);
return 0;
@ -195,6 +198,15 @@ static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
return __ef100_hard_start_xmit(skb, efx, net_dev, NULL);
}
netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
struct efx_nic *efx,
struct net_device *net_dev,
struct efx_rep *efv)
{
struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
int rc;
@ -209,7 +221,7 @@ static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb,
}
tx_queue = &channel->tx_queue[0];
rc = ef100_enqueue_skb(tx_queue, skb);
rc = __ef100_enqueue_skb(tx_queue, skb, efv);
if (rc == 0)
return NETDEV_TX_OK;
@ -312,7 +324,7 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
unregister_netdevice_notifier(&efx->netdev_notifier);
#if defined(CONFIG_SFC_SRIOV)
if (!efx->type->is_vf)
efx_ef100_pci_sriov_disable(efx);
efx_ef100_pci_sriov_disable(efx, true);
#endif
ef100_unregister_netdev(efx);

View File

@ -10,7 +10,12 @@
*/
#include <linux/netdevice.h>
#include "ef100_rep.h"
netdev_tx_t __ef100_hard_start_xmit(struct sk_buff *skb,
struct efx_nic *efx,
struct net_device *net_dev,
struct efx_rep *efv);
int ef100_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
int ef100_probe_netdev(struct efx_probe_data *probe_data);

View File

@ -946,6 +946,7 @@ static int ef100_probe_main(struct efx_nic *efx)
unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
struct ef100_nic_data *nic_data;
char fw_version[32];
u32 priv_mask = 0;
int i, rc;
if (WARN_ON(bar_size == 0))
@ -1027,6 +1028,12 @@ static int ef100_probe_main(struct efx_nic *efx)
efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
pci_dbg(efx->pci_dev, "Firmware version %s\n", fw_version);
rc = efx_mcdi_get_privilege_mask(efx, &priv_mask);
if (rc) /* non-fatal, and priv_mask will still be 0 */
pci_info(efx->pci_dev,
"Failed to get privilege mask from FW, rc %d\n", rc);
nic_data->grp_mae = !!(priv_mask & MC_CMD_PRIVILEGE_MASK_IN_GRP_MAE);
if (compare_versions(fw_version, "1.1.0.1000") < 0) {
pci_info(efx->pci_dev, "Firmware uses old event descriptors\n");
rc = -EINVAL;

View File

@ -72,6 +72,7 @@ struct ef100_nic_data {
u8 port_id[ETH_ALEN];
DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
u64 stats[EF100_STAT_COUNT];
bool grp_mae; /* MAE Privilege */
u16 tso_max_hdr_len;
u16 tso_max_payload_num_segs;
u16 tso_max_frames;

View File

@ -2,7 +2,7 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2018 Solarflare Communications Inc.
* Copyright 2019-2020 Xilinx Inc.
* Copyright 2019-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@ -181,12 +181,6 @@
/* RHEAD_BASE_EVENT */
#define ESF_GZ_E_TYPE_LBN 60
#define ESF_GZ_E_TYPE_WIDTH 4
#define ESE_GZ_EF100_EV_DRIVER 5
#define ESE_GZ_EF100_EV_MCDI 4
#define ESE_GZ_EF100_EV_CONTROL 3
#define ESE_GZ_EF100_EV_TX_TIMESTAMP 2
#define ESE_GZ_EF100_EV_TX_COMPLETION 1
#define ESE_GZ_EF100_EV_RX_PKTS 0
#define ESF_GZ_EV_EVQ_PHASE_LBN 59
#define ESF_GZ_EV_EVQ_PHASE_WIDTH 1
#define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64
@ -369,14 +363,18 @@
#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16
#define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144
#define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16
#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_LBN 128
#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_WIDTH 16
#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_LBN 128
#define ESF_GZ_RX_PREFIX_INGRESS_MPORT_WIDTH 16
#define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96
#define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32
#define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64
#define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32
#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 32
#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 32
#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 34
#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 30
#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_LBN 33
#define ESF_GZ_RX_PREFIX_VSWITCH_STATUS_WIDTH 1
#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_LBN 32
#define ESF_GZ_RX_PREFIX_VLAN_STRIPPED_WIDTH 1
#define ESF_GZ_RX_PREFIX_CLASS_LBN 16
#define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16
#define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15
@ -454,12 +452,8 @@
#define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1
#define ESF_GZ_M2M_RSVD_LBN 120
#define ESF_GZ_M2M_RSVD_WIDTH 2
#define ESF_GZ_M2M_ADDR_SPC_LBN 108
#define ESF_GZ_M2M_ADDR_SPC_WIDTH 12
#define ESF_GZ_M2M_ADDR_SPC_PASID_LBN 86
#define ESF_GZ_M2M_ADDR_SPC_PASID_WIDTH 22
#define ESF_GZ_M2M_ADDR_SPC_MODE_LBN 84
#define ESF_GZ_M2M_ADDR_SPC_MODE_WIDTH 2
#define ESF_GZ_M2M_ADDR_SPC_ID_LBN 84
#define ESF_GZ_M2M_ADDR_SPC_ID_WIDTH 36
#define ESF_GZ_M2M_LEN_MINUS_1_LBN 64
#define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20
#define ESF_GZ_M2M_ADDR_LBN 0
@ -492,12 +486,8 @@
#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1
#define ESF_GZ_TX_SEG_RSVD2_LBN 120
#define ESF_GZ_TX_SEG_RSVD2_WIDTH 2
#define ESF_GZ_TX_SEG_ADDR_SPC_LBN 108
#define ESF_GZ_TX_SEG_ADDR_SPC_WIDTH 12
#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_LBN 86
#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_WIDTH 22
#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_LBN 84
#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_WIDTH 2
#define ESF_GZ_TX_SEG_ADDR_SPC_ID_LBN 84
#define ESF_GZ_TX_SEG_ADDR_SPC_ID_WIDTH 36
#define ESF_GZ_TX_SEG_RSVD_LBN 80
#define ESF_GZ_TX_SEG_RSVD_WIDTH 4
#define ESF_GZ_TX_SEG_LEN_LBN 64
@ -583,6 +573,12 @@
#define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124
/* Enum D2VIO_MSG_OP */
#define ESE_GZ_QUE_JBDNE 3
#define ESE_GZ_QUE_EVICT 2
#define ESE_GZ_QUE_EMPTY 1
#define ESE_GZ_NOP 0
/* Enum DESIGN_PARAMS */
#define ESE_EF100_DP_GZ_RX_MAX_RUNT 17
#define ESE_EF100_DP_GZ_VI_STRIDES 16
@ -630,6 +626,19 @@
#define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256
#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4
/* Enum RH_DSC_TYPE */
#define ESE_GZ_TX_TOMB 0xF
#define ESE_GZ_TX_VIO 0xE
#define ESE_GZ_TX_TSO_OVRRD 0x8
#define ESE_GZ_TX_D2CMP 0x7
#define ESE_GZ_TX_DATA 0x6
#define ESE_GZ_TX_D2M 0x5
#define ESE_GZ_TX_M2M 0x4
#define ESE_GZ_TX_SEG 0x3
#define ESE_GZ_TX_TSO 0x2
#define ESE_GZ_TX_OVRRD 0x1
#define ESE_GZ_TX_SEND 0x0
/* Enum RH_HCLASS_L2_CLASS */
#define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1
#define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0
@ -666,6 +675,25 @@
#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1
#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0
/* Enum SF_CTL_EVENT_SUBTYPE */
#define ESE_GZ_EF100_CTL_EV_EVQ_TIMEOUT 0x3
#define ESE_GZ_EF100_CTL_EV_FLUSH 0x2
#define ESE_GZ_EF100_CTL_EV_TIME_SYNC 0x1
#define ESE_GZ_EF100_CTL_EV_UNSOL_OVERFLOW 0x0
/* Enum SF_EVENT_TYPE */
#define ESE_GZ_EF100_EV_DRIVER 0x5
#define ESE_GZ_EF100_EV_MCDI 0x4
#define ESE_GZ_EF100_EV_CONTROL 0x3
#define ESE_GZ_EF100_EV_TX_TIMESTAMP 0x2
#define ESE_GZ_EF100_EV_TX_COMPLETION 0x1
#define ESE_GZ_EF100_EV_RX_PKTS 0x0
/* Enum SF_EW_EVENT_TYPE */
#define ESE_GZ_EF100_EWEV_VIRTQ_DESC 0x2
#define ESE_GZ_EF100_EWEV_TXQ_DESC 0x1
#define ESE_GZ_EF100_EWEV_64BIT 0x0
/* Enum TX_DESC_CSO_PARTIAL_EN */
#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2
#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1
@ -681,6 +709,15 @@
#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2
#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1
#define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0
/* Enum VIRTIO_NET_HDR_F */
#define ESE_GZ_NEEDS_CSUM 0x1
/* Enum VIRTIO_NET_HDR_GSO */
#define ESE_GZ_TCPV6 0x4
#define ESE_GZ_UDP 0x3
#define ESE_GZ_TCPV4 0x1
#define ESE_GZ_NONE 0x0
/**************************************************************************/
#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44

View File

@ -0,0 +1,244 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "ef100_rep.h"
#include "ef100_netdev.h"
#include "ef100_nic.h"
#include "mae.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
unsigned int i)
{
efv->parent = efx;
efv->idx = i;
INIT_LIST_HEAD(&efv->list);
efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
NETIF_MSG_TX_ERR | NETIF_MSG_HW;
return 0;
}
static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct efx_rep *efv = netdev_priv(dev);
struct efx_nic *efx = efv->parent;
netdev_tx_t rc;
/* __ef100_hard_start_xmit() will always return success even in the
* case of TX drops, where it will increment efx's tx_dropped. The
* efv stats really only count attempted TX, not success/failure.
*/
atomic64_inc(&efv->stats.tx_packets);
atomic64_add(skb->len, &efv->stats.tx_bytes);
netif_tx_lock(efx->net_dev);
rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
netif_tx_unlock(efx->net_dev);
return rc;
}
static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
struct efx_rep *efv = netdev_priv(dev);
struct efx_nic *efx = efv->parent;
struct ef100_nic_data *nic_data;
nic_data = efx->nic_data;
/* nic_data->port_id is a u8[] */
ppid->id_len = sizeof(nic_data->port_id);
memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
return 0;
}
static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
struct efx_rep *efv = netdev_priv(dev);
struct efx_nic *efx = efv->parent;
struct ef100_nic_data *nic_data;
int ret;
nic_data = efx->nic_data;
ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
nic_data->pf_index, efv->idx);
if (ret >= len)
return -EOPNOTSUPP;
return 0;
}
static const struct net_device_ops efx_ef100_rep_netdev_ops = {
.ndo_start_xmit = efx_ef100_rep_xmit,
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
};
static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
}
static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
return efv->msg_enable;
}
static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
u32 msg_enable)
{
struct efx_rep *efv = netdev_priv(net_dev);
efv->msg_enable = msg_enable;
}
static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
.get_drvinfo = efx_ef100_rep_get_drvinfo,
.get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
.set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
};
static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
unsigned int i)
{
struct net_device *net_dev;
struct efx_rep *efv;
int rc;
net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
if (!net_dev)
return ERR_PTR(-ENOMEM);
efv = netdev_priv(net_dev);
rc = efx_ef100_rep_init_struct(efx, efv, i);
if (rc)
goto fail1;
efv->net_dev = net_dev;
rtnl_lock();
spin_lock_bh(&efx->vf_reps_lock);
list_add_tail(&efv->list, &efx->vf_reps);
spin_unlock_bh(&efx->vf_reps_lock);
if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
netif_device_attach(net_dev);
netif_carrier_on(net_dev);
} else {
netif_carrier_off(net_dev);
netif_tx_stop_all_queues(net_dev);
}
rtnl_unlock();
net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
net_dev->features |= NETIF_F_LLTX;
net_dev->hw_features |= NETIF_F_LLTX;
return efv;
fail1:
free_netdev(net_dev);
return ERR_PTR(rc);
}
static int efx_ef100_configure_rep(struct efx_rep *efv)
{
struct efx_nic *efx = efv->parent;
u32 selector;
int rc;
/* Construct mport selector for corresponding VF */
efx_mae_mport_vf(efx, efv->idx, &selector);
/* Look up actual mport ID */
rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
if (rc)
return rc;
pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
/* mport label should fit in 16 bits */
WARN_ON(efv->mport >> 16);
return 0;
}
static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
{
struct efx_nic *efx = efv->parent;
rtnl_lock();
spin_lock_bh(&efx->vf_reps_lock);
list_del(&efv->list);
spin_unlock_bh(&efx->vf_reps_lock);
rtnl_unlock();
free_netdev(efv->net_dev);
}
int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
{
struct efx_rep *efv;
int rc;
efv = efx_ef100_rep_create_netdev(efx, i);
if (IS_ERR(efv)) {
rc = PTR_ERR(efv);
pci_err(efx->pci_dev,
"Failed to create representor for VF %d, rc %d\n", i,
rc);
return rc;
}
rc = efx_ef100_configure_rep(efv);
if (rc) {
pci_err(efx->pci_dev,
"Failed to configure representor for VF %d, rc %d\n",
i, rc);
goto fail;
}
rc = register_netdev(efv->net_dev);
if (rc) {
pci_err(efx->pci_dev,
"Failed to register representor for VF %d, rc %d\n",
i, rc);
goto fail;
}
pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
efv->net_dev->name);
return 0;
fail:
efx_ef100_rep_destroy_netdev(efv);
return rc;
}
void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
{
struct net_device *rep_dev;
rep_dev = efv->net_dev;
if (!rep_dev)
return;
netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
unregister_netdev(rep_dev);
efx_ef100_rep_destroy_netdev(efv);
}
void efx_ef100_fini_vfreps(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
struct efx_rep *efv, *next;
if (!nic_data->grp_mae)
return;
list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
efx_ef100_vfrep_destroy(efx, efv);
}

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
/* Handling for ef100 representor netdevs */
#ifndef EF100_REP_H
#define EF100_REP_H
#include "net_driver.h"
struct efx_rep_sw_stats {
atomic64_t rx_packets, tx_packets;
atomic64_t rx_bytes, tx_bytes;
atomic64_t rx_dropped, tx_errors;
};
/**
* struct efx_rep - Private data for an Efx representor
*
* @parent: the efx PF which manages this representor
* @net_dev: representor netdevice
* @msg_enable: log message enable flags
* @mport: m-port ID of corresponding VF
* @idx: VF index
* @list: entry on efx->vf_reps
* @stats: software traffic counters for netdev stats
*/
struct efx_rep {
struct efx_nic *parent;
struct net_device *net_dev;
u32 msg_enable;
u32 mport;
unsigned int idx;
struct list_head list;
struct efx_rep_sw_stats stats;
};
int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
void efx_ef100_fini_vfreps(struct efx_nic *efx);
#endif /* EF100_REP_H */

View File

@ -11,46 +11,62 @@
#include "ef100_sriov.h"
#include "ef100_nic.h"
#include "ef100_rep.h"
static int efx_ef100_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
{
struct ef100_nic_data *nic_data = efx->nic_data;
struct pci_dev *dev = efx->pci_dev;
int rc;
struct efx_rep *efv, *next;
int rc, i;
efx->vf_count = num_vfs;
rc = pci_enable_sriov(dev, num_vfs);
if (rc)
goto fail;
goto fail1;
if (!nic_data->grp_mae)
return 0;
for (i = 0; i < num_vfs; i++) {
rc = efx_ef100_vfrep_create(efx, i);
if (rc)
goto fail2;
}
return 0;
fail:
fail2:
list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
efx_ef100_vfrep_destroy(efx, efv);
pci_disable_sriov(dev);
fail1:
netif_err(efx, probe, efx->net_dev, "Failed to enable SRIOV VFs\n");
efx->vf_count = 0;
return rc;
}
int efx_ef100_pci_sriov_disable(struct efx_nic *efx)
int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
unsigned int vfs_assigned;
vfs_assigned = pci_vfs_assigned(dev);
if (vfs_assigned) {
if (vfs_assigned && !force) {
netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
"please detach them before disabling SR-IOV\n");
return -EBUSY;
}
pci_disable_sriov(dev);
efx_ef100_fini_vfreps(efx);
if (!vfs_assigned)
pci_disable_sriov(dev);
return 0;
}
int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs)
{
if (num_vfs == 0)
return efx_ef100_pci_sriov_disable(efx);
return efx_ef100_pci_sriov_disable(efx, false);
else
return efx_ef100_pci_sriov_enable(efx, num_vfs);
}

View File

@ -11,4 +11,4 @@
#include "net_driver.h"
int efx_ef100_sriov_configure(struct efx_nic *efx, int num_vfs);
int efx_ef100_pci_sriov_disable(struct efx_nic *efx);
int efx_ef100_pci_sriov_disable(struct efx_nic *efx, bool force);

View File

@ -254,7 +254,8 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
const struct sk_buff *skb,
unsigned int segment_count)
unsigned int segment_count,
struct efx_rep *efv)
{
unsigned int old_write_count = tx_queue->write_count;
unsigned int new_write_count = old_write_count;
@ -272,6 +273,20 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
else
next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
if (unlikely(efv)) {
/* Create TX override descriptor */
write_ptr = new_write_count & tx_queue->ptr_mask;
txd = ef100_tx_desc(tx_queue, write_ptr);
++new_write_count;
tx_queue->packet_write_count = new_write_count;
EFX_POPULATE_OWORD_3(*txd,
ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX,
ESF_GZ_TX_PREFIX_EGRESS_MPORT, efv->mport,
ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1);
nr_descs--;
}
/* if it's a raw write (such as XDP) then always SEND single frames */
if (!skb)
nr_descs = 1;
@ -306,6 +321,9 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
/* if it's a raw write (such as XDP) then always SEND */
next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
ESE_GZ_TX_DESC_TYPE_SEND;
/* mark as an EFV buffer if applicable */
if (unlikely(efv))
buffer->flags |= EFX_TX_BUF_EFV;
} while (new_write_count != tx_queue->insert_count);
@ -324,7 +342,7 @@ static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
void ef100_tx_write(struct efx_tx_queue *tx_queue)
{
ef100_tx_make_descriptors(tx_queue, NULL, 0);
ef100_tx_make_descriptors(tx_queue, NULL, 0, NULL);
ef100_tx_push_buffers(tx_queue);
}
@ -350,6 +368,12 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
* function will free the SKB.
*/
int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
return __ef100_enqueue_skb(tx_queue, skb, NULL);
}
int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
struct efx_rep *efv)
{
unsigned int old_insert_count = tx_queue->insert_count;
struct efx_nic *efx = tx_queue->efx;
@ -376,16 +400,64 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return 0;
}
if (unlikely(efv)) {
struct efx_tx_buffer *buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
/* Drop representor packets if the queue is stopped.
* We currently don't assert backoff to representors so this is
* to make sure representor traffic can't starve the main
* net device.
* And, of course, if there are no TX descriptors left.
*/
if (netif_tx_queue_stopped(tx_queue->core_txq) ||
unlikely(efx_tx_buffer_in_use(buffer))) {
atomic64_inc(&efv->stats.tx_errors);
rc = -ENOSPC;
goto err;
}
/* Also drop representor traffic if it could cause us to
* stop the queue. If we assert backoff and we haven't
* received traffic on the main net device recently then the
* TX watchdog can go off erroneously.
*/
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
fill_level += efx_tx_max_skb_descs(efx);
if (fill_level > efx->txq_stop_thresh) {
struct efx_tx_queue *txq2;
/* Refresh cached fill level and re-check */
efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
fill_level += efx_tx_max_skb_descs(efx);
if (fill_level > efx->txq_stop_thresh) {
atomic64_inc(&efv->stats.tx_errors);
rc = -ENOSPC;
goto err;
}
}
buffer->flags = EFX_TX_BUF_OPTION | EFX_TX_BUF_EFV;
tx_queue->insert_count++;
}
/* Map for DMA and create descriptors */
rc = efx_tx_map_data(tx_queue, skb, segments);
if (rc)
goto err;
ef100_tx_make_descriptors(tx_queue, skb, segments);
ef100_tx_make_descriptors(tx_queue, skb, segments, efv);
fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level > efx->txq_stop_thresh) {
struct efx_tx_queue *txq2;
/* Because of checks above, representor traffic should
* not be able to stop the queue.
*/
WARN_ON(efv);
netif_tx_stop_queue(tx_queue->core_txq);
/* Re-read after a memory barrier in case we've raced with
* the completion path. Otherwise there's a danger we'll never
@ -404,8 +476,12 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* If xmit_more then we don't need to push the doorbell, unless there
* are 256 descriptors already queued in which case we have to push to
* ensure we never push more than 256 at once.
*
* Always push for representor traffic, and don't account it to parent
* PF netdevice's BQL.
*/
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
if (unlikely(efv) ||
__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
tx_queue->write_count - tx_queue->notify_count > 255)
ef100_tx_push_buffers(tx_queue);

View File

@ -13,6 +13,7 @@
#define EFX_EF100_TX_H
#include "net_driver.h"
#include "ef100_rep.h"
int ef100_tx_probe(struct efx_tx_queue *tx_queue);
void ef100_tx_init(struct efx_tx_queue *tx_queue);
@ -22,4 +23,6 @@ unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
struct efx_rep *efv);
#endif

View File

@ -12,6 +12,7 @@
#include "net_driver.h"
#include "ef100_rx.h"
#include "ef100_tx.h"
#include "efx_common.h"
#include "filter.h"
int efx_net_open(struct net_device *net_dev);
@ -206,6 +207,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
{
struct net_device *dev = efx->net_dev;
/* We must stop reps (which use our TX) before we stop ourselves. */
efx_detach_reps(efx);
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
@ -217,8 +221,11 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
{
if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
if ((efx->state != STATE_DISABLED) && !efx->reset_pending) {
netif_device_attach(efx->net_dev);
if (efx->state == STATE_NET_UP)
efx_attach_reps(efx);
}
}
static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)

View File

@ -24,6 +24,7 @@
#include "mcdi_port_common.h"
#include "io.h"
#include "mcdi_pcol.h"
#include "ef100_rep.h"
static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
@ -1021,6 +1022,8 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
spin_lock_init(&efx->vf_reps_lock);
INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
@ -1389,3 +1392,38 @@ int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
return -EINVAL;
return 0;
}
void efx_detach_reps(struct efx_nic *efx)
{
struct net_device *rep_dev;
struct efx_rep *efv;
ASSERT_RTNL();
netif_dbg(efx, drv, efx->net_dev, "Detaching VF representors\n");
list_for_each_entry(efv, &efx->vf_reps, list) {
rep_dev = efv->net_dev;
if (!rep_dev)
continue;
netif_carrier_off(rep_dev);
/* See efx_device_detach_sync() */
netif_tx_lock_bh(rep_dev);
netif_tx_stop_all_queues(rep_dev);
netif_tx_unlock_bh(rep_dev);
}
}
void efx_attach_reps(struct efx_nic *efx)
{
struct net_device *rep_dev;
struct efx_rep *efv;
ASSERT_RTNL();
netif_dbg(efx, drv, efx->net_dev, "Attaching VF representors\n");
list_for_each_entry(efv, &efx->vf_reps, list) {
rep_dev = efv->net_dev;
if (!rep_dev)
continue;
netif_tx_wake_all_queues(rep_dev);
netif_carrier_on(rep_dev);
}
}

View File

@ -111,4 +111,7 @@ int efx_get_phys_port_id(struct net_device *net_dev,
int efx_get_phys_port_name(struct net_device *net_dev,
char *name, size_t len);
void efx_detach_reps(struct efx_nic *efx);
void efx_attach_reps(struct efx_nic *efx);
#endif

View File

@ -0,0 +1,44 @@
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "mae.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
void efx_mae_mport_vf(struct efx_nic *efx __always_unused, u32 vf_id, u32 *out)
{
efx_dword_t mport;
EFX_POPULATE_DWORD_3(mport,
MAE_MPORT_SELECTOR_TYPE, MAE_MPORT_SELECTOR_TYPE_FUNC,
MAE_MPORT_SELECTOR_FUNC_PF_ID, MAE_MPORT_SELECTOR_FUNC_PF_ID_CALLER,
MAE_MPORT_SELECTOR_FUNC_VF_ID, vf_id);
*out = EFX_DWORD_VAL(mport);
}
/* id is really only 24 bits wide */
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, MAE_MPORT_LOOKUP_IN_MPORT_SELECTOR, selector);
rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_LOOKUP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
if (outlen < sizeof(outbuf))
return -EIO;
*id = MCDI_DWORD(outbuf, MAE_MPORT_LOOKUP_OUT_MPORT_ID);
return 0;
}

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF100_MAE_H
#define EF100_MAE_H
/* MCDI interface for the ef100 Match-Action Engine */
#include "net_driver.h"
void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
#endif /* EF100_MAE_H */

View File

@ -2129,6 +2129,52 @@ fail:
return rc;
}
/* Failure to read a privilege mask is never fatal, because we can always
* carry on as though we didn't have the privilege we were interested in.
* So use efx_mcdi_rpc_quiet().
*/
int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
{
MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
size_t outlen;
u16 pf, vf;
int rc;
if (!efx || !mask)
return -EINVAL;
/* Get our function number */
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0,
fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN,
&outlen);
if (rc != 0)
return rc;
if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN)
return -EIO;
pf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_PF);
vf = MCDI_DWORD(fi_outbuf, GET_FUNCTION_INFO_OUT_VF);
MCDI_POPULATE_DWORD_2(pm_inbuf, PRIVILEGE_MASK_IN_FUNCTION,
PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PRIVILEGE_MASK,
pm_inbuf, sizeof(pm_inbuf),
pm_outbuf, sizeof(pm_outbuf), &outlen);
if (rc != 0)
return rc;
if (outlen < MC_CMD_PRIVILEGE_MASK_OUT_LEN)
return -EIO;
*mask = MCDI_DWORD(pm_outbuf, PRIVILEGE_MASK_OUT_OLD_MASK);
return 0;
}
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128

View File

@ -366,6 +366,7 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
unsigned int *flags);
int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
unsigned int *enabled_out);
int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);

View File

@ -178,6 +178,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */
#define EFX_TX_BUF_EFV 0x100 /* buffer was sent from representor */
/**
* struct efx_tx_queue - An Efx TX queue
@ -966,6 +967,8 @@ enum efx_xdp_tx_queues_mode {
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
* @vf_reps_lock: Protects vf_reps list
* @vf_reps: local VF reps
* @ptp_data: PTP state data
* @ptp_warned: has this NIC seen and warned about unexpected PTP events?
* @vpd_sn: Serial number read from VPD
@ -1145,6 +1148,8 @@ struct efx_nic {
unsigned vf_init_count;
unsigned vi_scale;
#endif
spinlock_t vf_reps_lock;
struct list_head vf_reps;
struct efx_ptp_data *ptp_data;
bool ptp_warned;

View File

@ -559,6 +559,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
{
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
unsigned int read_ptr;
bool finished = false;
@ -580,7 +581,8 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
/* Need to check the flag before dequeueing. */
if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
&efv_pkts_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@ -589,7 +591,7 @@ void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
EFX_WARN_ON_PARANOID(pkts_compl != 1);
EFX_WARN_ON_PARANOID(pkts_compl + efv_pkts_compl != 1);
efx_xmit_done_check_empty(tx_queue);
}

View File

@ -109,9 +109,11 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
&efv_pkts_compl);
++tx_queue->read_count;
}
@ -146,7 +148,8 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
unsigned int *bytes_compl,
unsigned int *efv_pkts_compl)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
@ -164,9 +167,15 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
if (buffer->flags & EFX_TX_BUF_SKB) {
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
(*pkts_compl)++;
(*bytes_compl) += skb->len;
if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) {
EFX_WARN_ON_PARANOID(!efv_pkts_compl);
(*efv_pkts_compl)++;
} else {
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
(*pkts_compl)++;
(*bytes_compl) += skb->len;
}
if (tx_queue->timestamping &&
(tx_queue->completed_timestamp_major ||
tx_queue->completed_timestamp_minor)) {
@ -199,7 +208,8 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
unsigned int *bytes_compl,
unsigned int *efv_pkts_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@ -218,7 +228,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl,
efv_pkts_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
@ -241,15 +252,17 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
unsigned int efv_pkts_compl = 0;
struct efx_nic *efx = tx_queue->efx;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl,
&efv_pkts_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
if (pkts_compl + efv_pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
@ -274,6 +287,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count)
{
unsigned int efv_pkts_compl = 0;
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
@ -282,7 +296,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
&efv_pkts_compl);
}
}

View File

@ -19,7 +19,8 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl);
unsigned int *bytes_compl,
unsigned int *efv_pkts_compl);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{