Merge branch 'bnxt_en-update-for-net-next'

Michael Chan says:

====================
bnxt_en: Update for net-next

This series starts with 2 patches to support firmware crash dump.  The
driver allocates the required DMA memory ahead of time for firmware to
store the crash dump if and when it crashes.  Patch 3 adds priority and
TPID for the .ndo_set_vf_vlan() callback.  Note that this was rejected
and reverted last year and it is being re-submitted after recent changes
in the guidelines.  The remaining patches are MSIX related.  Legacy
interrupt is no longer supported by firmware so we remove the support
in the driver.  We then convert to use the newer kernel APIs to
allocate and enable MSIX vectors.  The last patch adds support for
dynamic MSIX.

v3: https://lore.kernel.org/20240823195657.31588-1-michael.chan@broadcom.com
v2: https://lore.kernel.org/20240816212832.185379-1-michael.chan@broadcom.com
v1: https://lore.kernel.org/20240713234339.70293-1-michael.chan@broadcom.com
====================

Link: https://patch.msgid.link/20240828183235.128948-1-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-08-29 15:33:27 -07:00
commit 670726a826
6 changed files with 280 additions and 188 deletions

View File

@ -69,6 +69,7 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
#include "bnxt_coredump.h"
#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@ -301,10 +302,6 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
#define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
@ -2853,34 +2850,6 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return TX_CMP_VALID(txcmp, raw_cons);
}
static irqreturn_t bnxt_inta(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
struct bnxt *bp = bnapi->bp;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 cons = RING_CMP(cpr->cp_raw_cons);
u32 int_status;
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
if (!bnxt_has_work(bp, cpr)) {
int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
/* return if erroneous interrupt */
if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
return IRQ_NONE;
}
/* disable ring IRQ */
BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
int budget)
{
@ -6875,15 +6844,14 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->cq_handle = cpu_to_le64(ring->handle);
req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
} else {
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
}
break;
case HWRM_RING_ALLOC_NQ:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_USING_MSIX)
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
@ -8946,6 +8914,80 @@ skip_rdma:
return 0;
}
static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
{
struct hwrm_dbg_crashdump_medium_cfg_input *req;
u16 page_attr;
int rc;
if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
return 0;
rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
if (rc)
return rc;
if (BNXT_PAGE_SIZE == 0x2000)
page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
else if (BNXT_PAGE_SIZE == 0x10000)
page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
else
page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
req->pg_size_lvl = cpu_to_le16(page_attr |
bp->fw_crash_mem->ring_mem.depth);
req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
req->size = cpu_to_le32(bp->fw_crash_len);
req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
return hwrm_req_send(bp, req);
}
static void bnxt_free_crash_dump_mem(struct bnxt *bp)
{
if (bp->fw_crash_mem) {
bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
kfree(bp->fw_crash_mem);
bp->fw_crash_mem = NULL;
}
}
static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
{
u32 mem_size = 0;
int rc;
if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
return 0;
rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
if (rc)
return rc;
mem_size = round_up(mem_size, 4);
/* keep and use the existing pages */
if (bp->fw_crash_mem &&
mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
goto alloc_done;
if (bp->fw_crash_mem)
bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
else
bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
GFP_KERNEL);
if (!bp->fw_crash_mem)
return -ENOMEM;
rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
if (rc) {
bnxt_free_crash_dump_mem(bp);
return rc;
}
alloc_done:
bp->fw_crash_len = mem_size;
return 0;
}
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp;
@ -9121,6 +9163,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
@ -10576,22 +10620,32 @@ static void bnxt_setup_msix(struct bnxt *bp)
}
}
static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
static int bnxt_init_int_mode(struct bnxt *bp);
if (bp->num_tc) {
netdev_reset_tc(bp->dev);
bp->num_tc = 0;
static int bnxt_change_msix(struct bnxt *bp, int total)
{
struct msi_map map;
int i;
/* add MSIX to the end if needed */
for (i = bp->total_irqs; i < total; i++) {
map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
if (map.index < 0)
return bp->total_irqs;
bp->irq_tbl[i].vector = map.virq;
bp->total_irqs++;
}
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
bp->irq_tbl[0].handler = bnxt_inta;
/* trim MSIX from the end if needed */
for (i = bp->total_irqs; i > total; i--) {
map.index = i - 1;
map.virq = bp->irq_tbl[i - 1].vector;
pci_msix_free_irq(bp->pdev, map);
bp->total_irqs--;
}
return bp->total_irqs;
}
static int bnxt_init_int_mode(struct bnxt *bp);
static int bnxt_setup_int_mode(struct bnxt *bp)
{
int rc;
@ -10602,10 +10656,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc ?: -ENODEV;
}
if (bp->flags & BNXT_FLAG_USING_MSIX)
bnxt_setup_msix(bp);
else
bnxt_setup_inta(bp);
bnxt_setup_msix(bp);
rc = bnxt_set_real_num_queues(bp);
return rc;
@ -10693,10 +10744,9 @@ static int bnxt_get_num_msix(struct bnxt *bp)
return bnxt_nq_rings_in_use(bp);
}
static int bnxt_init_msix(struct bnxt *bp)
static int bnxt_init_int_mode(struct bnxt *bp)
{
int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
struct msix_entry *msix_ent;
int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
total_vecs = bnxt_get_num_msix(bp);
max = bnxt_get_max_func_irqs(bp);
@ -10706,29 +10756,24 @@ static int bnxt_init_msix(struct bnxt *bp)
if (!total_vecs)
return 0;
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
for (i = 0; i < total_vecs; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
}
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
min = 2;
total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
PCI_IRQ_MSIX);
ulp_msix = bnxt_get_ulp_msix_num(bp);
if (total_vecs < 0 || total_vecs < ulp_msix) {
rc = -ENODEV;
goto msix_setup_exit;
}
bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
tbl_size = total_vecs;
if (pci_msix_can_alloc_dyn(bp->pdev))
tbl_size = max;
bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
if (bp->irq_tbl) {
for (i = 0; i < total_vecs; i++)
bp->irq_tbl[i].vector = msix_ent[i].vector;
bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
@ -10746,61 +10791,28 @@ static int bnxt_init_msix(struct bnxt *bp)
rc = -ENOMEM;
goto msix_setup_exit;
}
bp->flags |= BNXT_FLAG_USING_MSIX;
kfree(msix_ent);
return 0;
msix_setup_exit:
netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
pci_disable_msix(bp->pdev);
kfree(msix_ent);
return rc;
}
static int bnxt_init_inta(struct bnxt *bp)
{
bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
if (!bp->irq_tbl)
return -ENOMEM;
bp->total_irqs = 1;
bp->rx_nr_rings = 1;
bp->tx_nr_rings = 1;
bp->cp_nr_rings = 1;
bp->flags |= BNXT_FLAG_SHARED_RINGS;
bp->irq_tbl[0].vector = bp->pdev->irq;
return 0;
}
static int bnxt_init_int_mode(struct bnxt *bp)
{
int rc = -ENODEV;
if (bp->flags & BNXT_FLAG_MSIX_CAP)
rc = bnxt_init_msix(bp);
if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
/* fallback to INTA */
rc = bnxt_init_inta(bp);
}
pci_free_irq_vectors(bp->pdev);
return rc;
}
static void bnxt_clear_int_mode(struct bnxt *bp)
{
if (bp->flags & BNXT_FLAG_USING_MSIX)
pci_disable_msix(bp->pdev);
pci_free_irq_vectors(bp->pdev);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
bool irq_cleared = false;
bool irq_change = false;
int tcs = bp->num_tc;
int irqs_required;
int rc;
@ -10819,15 +10831,21 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
}
if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
irq_cleared = true;
irq_change = true;
if (!pci_msix_can_alloc_dyn(bp->pdev)) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
irq_cleared = true;
}
}
rc = __bnxt_reserve_rings(bp);
if (irq_cleared) {
if (!rc)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
} else if (irq_change && !rc) {
if (bnxt_change_msix(bp, irqs_required) != irqs_required)
rc = -ENOSPC;
}
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
@ -10893,9 +10911,6 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
flags = IRQF_SHARED;
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
@ -10960,29 +10975,22 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
int i;
int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
unsigned int cp_nr_rings = bp->cp_nr_rings;
struct bnxt_napi *bnapi;
int i;
if (bp->flags & BNXT_FLAG_USING_MSIX) {
int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
bnxt_poll_nitroa0);
}
} else {
bnapi = bp->bnapi[0];
netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@ -11970,20 +11978,6 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
/* Common routine to pre-map certain register block to different GRC window.
* A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
* in PF and 3 windows in VF that can be customized to map in different
* register blocks.
*/
static void bnxt_preset_reg_win(struct bnxt *bp)
{
if (BNXT_PF(bp)) {
/* CAG registers map to GRC window #4 */
writel(BNXT_CAG_REG_BASE,
bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
}
}
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
static int bnxt_reinit_after_abort(struct bnxt *bp)
@ -12088,7 +12082,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
/* Reserve rings now if none were reserved at driver probe. */
@ -12101,12 +12094,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_reserve_rings(bp, irq_re_init);
if (rc)
return rc;
if ((bp->flags & BNXT_FLAG_RFS) &&
!(bp->flags & BNXT_FLAG_USING_MSIX)) {
/* disable RFS if falling back to INTA */
bp->dev->hw_features &= ~NETIF_F_NTUPLE;
bp->flags &= ~BNXT_FLAG_RFS;
}
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
@ -12833,7 +12820,7 @@ bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
!BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
hwr.grp = bp->rx_nr_rings;
@ -13986,6 +13973,19 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
rc = bnxt_alloc_crash_dump_mem(bp);
if (rc)
netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
rc);
if (!rc) {
rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
if (rc) {
bnxt_free_crash_dump_mem(bp);
netdev_warn(bp->dev,
"hwrm crash dump mem failure rc: %d\n", rc);
}
}
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
@ -15294,6 +15294,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@ -15681,6 +15682,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
if (!pdev->msix_cap) {
dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
return -ENODEV;
}
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@ -15707,9 +15713,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
if (pdev->msix_cap)
bp->flags |= BNXT_FLAG_MSIX_CAP;
rc = bnxt_init_board(pdev, dev);
if (rc < 0)
goto init_err_free;
@ -15933,6 +15936,7 @@ init_err_pci_clean:
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@ -16024,6 +16028,8 @@ static int bnxt_resume(struct device *device)
rc = -ENODEV;
goto resume_exit;
}
if (bp->fw_crash_mem)
bnxt_hwrm_crash_dump_mem_cfg(bp);
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {

View File

@ -1356,7 +1356,6 @@ struct bnxt_vf_info {
u16 vlan;
u16 func_qcfg_flags;
u32 flags;
#define BNXT_VF_QOS 0x1
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
@ -1756,8 +1755,6 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRC_REG_STATUS_P5 0x520
@ -2200,8 +2197,6 @@ struct bnxt {
#define BNXT_FLAG_STRIP_VLAN 0x20
#define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
BNXT_FLAG_LRO)
#define BNXT_FLAG_USING_MSIX 0x40
#define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@ -2649,6 +2644,9 @@ struct bnxt {
#endif
u32 thermal_threshold_type;
enum board_idx board_idx;
struct bnxt_ctx_pg_info *fw_crash_mem;
u32 fw_crash_len;
};
#define BNXT_NUM_RX_RING_STATS 8

View File

@ -372,20 +372,81 @@ err:
return rc;
}
static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
u32 dump_len)
{
u32 data_copied = 0;
u32 data_len;
int i;
for (i = 0; i < rmem->nr_pages; i++) {
data_len = rmem->page_size;
if (data_copied + data_len > dump_len)
data_len = dump_len - data_copied;
memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
data_copied += data_len;
if (data_copied >= dump_len)
break;
}
return data_copied;
}
static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
{
struct bnxt_ring_mem_info *rmem;
u32 offset = 0;
if (!bp->fw_crash_mem)
return -ENOENT;
rmem = &bp->fw_crash_mem->ring_mem;
if (rmem->depth > 1) {
int i;
for (i = 0; i < rmem->nr_pages; i++) {
struct bnxt_ctx_pg_info *pg_tbl;
pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
buf + offset,
dump_len - offset);
if (offset >= dump_len)
break;
}
} else {
bnxt_copy_crash_data(rmem, buf, dump_len);
}
return 0;
}
static bool bnxt_crash_dump_avail(struct bnxt *bp)
{
u32 sig = 0;
/* First 4 bytes(signature) of crash dump is always non-zero */
bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
return !!sig;
}
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
{
if (dump_type == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
return bnxt_copy_crash_dump(bp, buf, *dump_len);
#ifdef CONFIG_TEE_BNXT_FW
return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#else
return -EOPNOTSUPP;
else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#endif
else
return -EOPNOTSUPP;
} else {
return __bnxt_get_coredump(bp, buf, dump_len);
}
}
static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
@ -395,7 +456,8 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
@ -403,8 +465,12 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return rc;
req->fid = cpu_to_le16(0xffff);
if (dump_type == BNXT_DUMP_CRASH)
req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
if (dump_type == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
else
req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
}
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@ -412,7 +478,10 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
*dump_len = le32_to_cpu(resp->crashdump_size);
if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
*dump_len = BNXT_CRASH_DUMP_LEN;
else
*dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump header and "HWRM_VER_GET response"
* segment additionally to coredump.
@ -434,10 +503,17 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
{
u32 len = 0;
if (dump_type == BNXT_DUMP_CRASH &&
bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
bp->fw_crash_mem) {
if (!bnxt_crash_dump_avail(bp))
return 0;
return bp->fw_crash_len;
}
if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
if (dump_type == BNXT_DUMP_CRASH)
len = BNXT_CRASH_DUMP_LEN;
else
if (dump_type != BNXT_DUMP_CRASH)
__bnxt_get_coredump(bp, NULL, &len);
}
return len;

View File

@ -111,7 +111,15 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR
#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
#endif

View File

@ -4989,9 +4989,16 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
return -EINVAL;
}
if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
return -EOPNOTSUPP;
if (dump->flag == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
(!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
netdev_info(dev,
"Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
return -EOPNOTSUPP;
} else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
return -EOPNOTSUPP;
}
}
bp->dump_flag = dump->flag;

View File

@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <net/dcbnl.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@ -196,11 +197,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
if (vf->flags & BNXT_VF_QOS)
ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
else
ivi->qos = 0;
ivi->vlan = vf->vlan & VLAN_VID_MASK;
ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
ivi->trusted = bnxt_is_trusted_vf(bp, vf);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
@ -256,21 +254,21 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
if (vlan_proto != htons(ETH_P_8021Q))
if (vlan_proto != htons(ETH_P_8021Q) &&
(vlan_proto != htons(ETH_P_8021AD) ||
!(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
return -EPROTONOSUPPORT;
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
/* TODO: needed to implement proper handling of user priority,
* currently fail the command if there is valid priority
*/
if (vlan_id > 4095 || qos)
if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
(!vlan_id && qos))
return -EINVAL;
vf = &bp->pf.vf[vf_id];
vlan_tag = vlan_id;
vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
if (vlan_tag == vf->vlan)
return 0;
@ -279,6 +277,10 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
req->tpid = vlan_proto;
}
rc = hwrm_req_send(bp, req);
if (!rc)
vf->vlan = vlan_tag;
@ -900,11 +902,6 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
return 0;
}
rtnl_lock();
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");