Merge branch 'octeontx2-af-Debugfs-support-and-updates-to-parser-profile'
Sunil Goutham says: ==================== octeontx2-af: Debugfs support and updates to parser profile This patchset adds debugfs support to dump various HW state machine info which helps in debugging issues. Info includes - Current queue context, stats, resource utilization etc - MCAM entry utilization, miss and pkt drop counter - CGX ingress and egress stats - Current RVU block allocation status - etc. Rest patches has changes wrt - Updated packet parsing profile for parsing more protocols. - RSS algorithms to include inner protocols while generating hash - Handle current version of silicon's limitations wrt shaping, coloring and fixed mapping of transmit limiter queue's configuration. - Enable broadcast packet replication to PF and it's VFs. - Support for configurable NDC cache waymask - etc Changes from v1: Removed inline keyword for newly introduced APIs in few patches. - Suggested by David Miller. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
798a496bf4
@ -16,3 +16,12 @@ config OCTEONTX2_AF
|
||||
Unit's admin function manager which manages all RVU HW resources
|
||||
and provides a medium to other PF/VFs to configure HW. Should be
|
||||
enabled for other RVU device drivers to work.
|
||||
|
||||
config NDC_DIS_DYNAMIC_CACHING
|
||||
bool "Disable caching of dynamic entries in NDC"
|
||||
depends on OCTEONTX2_AF
|
||||
default n
|
||||
---help---
|
||||
This config option disables caching of dynamic entries such as NIX SQEs
|
||||
, NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
|
||||
NPA Aura/Pool contexts.
|
||||
|
@ -8,4 +8,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
|
||||
|
||||
octeontx2_mbox-y := mbox.o
|
||||
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
|
||||
rvu_reg.o rvu_npc.o
|
||||
rvu_reg.o rvu_npc.o rvu_debugfs.o
|
||||
|
@ -138,6 +138,16 @@ void *cgx_get_pdata(int cgx_id)
|
||||
}
|
||||
EXPORT_SYMBOL(cgx_get_pdata);
|
||||
|
||||
int cgx_get_cgxid(void *cgxd)
|
||||
{
|
||||
struct cgx *cgx = cgxd;
|
||||
|
||||
if (!cgx)
|
||||
return -EINVAL;
|
||||
|
||||
return cgx->cgx_id;
|
||||
}
|
||||
|
||||
/* Ensure the required lock for event queue(where asynchronous events are
|
||||
* posted) is acquired before calling this API. Else an asynchronous event(with
|
||||
* latest link status) can reach the destination before this function returns
|
||||
@ -281,6 +291,35 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
|
||||
}
|
||||
EXPORT_SYMBOL(cgx_lmac_promisc_config);
|
||||
|
||||
/* Enable or disable forwarding received pause frames to Tx block */
|
||||
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
|
||||
{
|
||||
struct cgx *cgx = cgxd;
|
||||
u64 cfg;
|
||||
|
||||
if (!cgx)
|
||||
return;
|
||||
|
||||
if (enable) {
|
||||
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
|
||||
cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
|
||||
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
|
||||
|
||||
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
|
||||
cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
|
||||
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
|
||||
} else {
|
||||
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
|
||||
cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
|
||||
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
|
||||
|
||||
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
|
||||
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
|
||||
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
|
||||
|
||||
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
|
||||
{
|
||||
struct cgx *cgx = cgxd;
|
||||
@ -321,6 +360,27 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
|
||||
}
|
||||
EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
|
||||
|
||||
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
|
||||
{
|
||||
struct cgx *cgx = cgxd;
|
||||
u64 cfg, last;
|
||||
|
||||
if (!cgx || lmac_id >= cgx->lmac_count)
|
||||
return -ENODEV;
|
||||
|
||||
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
|
||||
last = cfg;
|
||||
if (enable)
|
||||
cfg |= DATA_PKT_TX_EN;
|
||||
else
|
||||
cfg &= ~DATA_PKT_TX_EN;
|
||||
|
||||
if (cfg != last)
|
||||
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
|
||||
return !!(last & DATA_PKT_TX_EN);
|
||||
}
|
||||
EXPORT_SYMBOL(cgx_lmac_tx_enable);
|
||||
|
||||
/* CGX Firmware interface low level support */
|
||||
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
|
||||
{
|
||||
|
@ -56,6 +56,11 @@
|
||||
#define CGXX_GMP_PCS_MRX_CTL 0x30000
|
||||
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
|
||||
|
||||
#define CGXX_SMUX_RX_FRM_CTL 0x20020
|
||||
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
|
||||
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
|
||||
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
|
||||
|
||||
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
|
||||
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
|
||||
#define CGX_CMD_TIMEOUT 2200 /* msecs */
|
||||
@ -63,6 +68,11 @@
|
||||
#define CGX_NVEC 37
|
||||
#define CGX_LMAC_FWI 0
|
||||
|
||||
enum cgx_nix_stat_type {
|
||||
NIX_STATS_RX,
|
||||
NIX_STATS_TX,
|
||||
};
|
||||
|
||||
enum LMAC_TYPE {
|
||||
LMAC_MODE_SGMII = 0,
|
||||
LMAC_MODE_XAUI = 1,
|
||||
@ -96,6 +106,7 @@ struct cgx_event_cb {
|
||||
extern struct pci_driver cgx_driver;
|
||||
|
||||
int cgx_get_cgxcnt_max(void);
|
||||
int cgx_get_cgxid(void *cgxd);
|
||||
int cgx_get_lmac_cnt(void *cgxd);
|
||||
void *cgx_get_pdata(int cgx_id);
|
||||
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
|
||||
@ -104,9 +115,11 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
|
||||
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
|
||||
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
|
||||
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
|
||||
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
|
||||
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
|
||||
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
|
||||
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
|
||||
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
|
||||
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
|
||||
int cgx_get_link_info(void *cgxd, int lmac_id,
|
||||
struct cgx_link_user_info *linfo);
|
||||
|
@ -196,4 +196,20 @@ enum nix_scheduler {
|
||||
#define DEFAULT_RSS_CONTEXT_GROUP 0
|
||||
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
|
||||
|
||||
/* NDC info */
|
||||
enum ndc_idx_e {
|
||||
NIX0_RX = 0x0,
|
||||
NIX0_TX = 0x1,
|
||||
NPA0_U = 0x2,
|
||||
};
|
||||
|
||||
enum ndc_ctype_e {
|
||||
CACHING = 0x0,
|
||||
BYPASS = 0x1,
|
||||
};
|
||||
|
||||
#define NDC_MAX_PORT 6
|
||||
#define NDC_READ_TRANS 0
|
||||
#define NDC_WRITE_TRANS 1
|
||||
|
||||
#endif /* COMMON_H */
|
||||
|
@ -19,17 +19,20 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
|
||||
|
||||
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
|
||||
{
|
||||
void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
|
||||
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
|
||||
struct mbox_hdr *tx_hdr, *rx_hdr;
|
||||
|
||||
tx_hdr = mdev->mbase + mbox->tx_start;
|
||||
rx_hdr = mdev->mbase + mbox->rx_start;
|
||||
tx_hdr = hw_mbase + mbox->tx_start;
|
||||
rx_hdr = hw_mbase + mbox->rx_start;
|
||||
|
||||
spin_lock(&mdev->mbox_lock);
|
||||
mdev->msg_size = 0;
|
||||
mdev->rsp_size = 0;
|
||||
tx_hdr->num_msgs = 0;
|
||||
tx_hdr->msg_size = 0;
|
||||
rx_hdr->num_msgs = 0;
|
||||
rx_hdr->msg_size = 0;
|
||||
spin_unlock(&mdev->mbox_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_mbox_reset);
|
||||
@ -133,16 +136,17 @@ EXPORT_SYMBOL(otx2_mbox_init);
|
||||
|
||||
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
|
||||
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
|
||||
int timeout = 0, sleep = 1;
|
||||
struct device *sender = &mbox->pdev->dev;
|
||||
|
||||
while (mdev->num_msgs != mdev->msgs_acked) {
|
||||
msleep(sleep);
|
||||
timeout += sleep;
|
||||
if (timeout >= MBOX_RSP_TIMEOUT)
|
||||
return -EIO;
|
||||
while (!time_after(jiffies, timeout)) {
|
||||
if (mdev->num_msgs == mdev->msgs_acked)
|
||||
return 0;
|
||||
usleep_range(800, 1000);
|
||||
}
|
||||
return 0;
|
||||
dev_dbg(sender, "timed out while waiting for rsp\n");
|
||||
return -EIO;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
|
||||
|
||||
@ -162,13 +166,25 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
|
||||
|
||||
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
|
||||
{
|
||||
void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
|
||||
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
|
||||
struct mbox_hdr *tx_hdr, *rx_hdr;
|
||||
|
||||
tx_hdr = mdev->mbase + mbox->tx_start;
|
||||
rx_hdr = mdev->mbase + mbox->rx_start;
|
||||
tx_hdr = hw_mbase + mbox->tx_start;
|
||||
rx_hdr = hw_mbase + mbox->rx_start;
|
||||
|
||||
/* If bounce buffer is implemented copy mbox messages from
|
||||
* bounce buffer to hw mbox memory.
|
||||
*/
|
||||
if (mdev->mbase != hw_mbase)
|
||||
memcpy(hw_mbase + mbox->tx_start + msgs_offset,
|
||||
mdev->mbase + mbox->tx_start + msgs_offset,
|
||||
mdev->msg_size);
|
||||
|
||||
spin_lock(&mdev->mbox_lock);
|
||||
|
||||
tx_hdr->msg_size = mdev->msg_size;
|
||||
|
||||
/* Reset header for next messages */
|
||||
mdev->msg_size = 0;
|
||||
mdev->rsp_size = 0;
|
||||
@ -215,7 +231,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
|
||||
msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
|
||||
|
||||
/* Clear the whole msg region */
|
||||
memset(msghdr, 0, sizeof(*msghdr) + size);
|
||||
memset(msghdr, 0, size);
|
||||
/* Init message header with reset values */
|
||||
msghdr->ver = OTX2_MBOX_VERSION;
|
||||
mdev->msg_size += size;
|
||||
@ -236,8 +252,10 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
|
||||
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
|
||||
u16 msgs;
|
||||
|
||||
spin_lock(&mdev->mbox_lock);
|
||||
|
||||
if (mdev->num_msgs != mdev->msgs_acked)
|
||||
return ERR_PTR(-ENODEV);
|
||||
goto error;
|
||||
|
||||
for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
|
||||
struct mbox_msghdr *pmsg = mdev->mbase + imsg;
|
||||
@ -245,18 +263,55 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
|
||||
|
||||
if (msg == pmsg) {
|
||||
if (pmsg->id != prsp->id)
|
||||
return ERR_PTR(-ENODEV);
|
||||
goto error;
|
||||
spin_unlock(&mdev->mbox_lock);
|
||||
return prsp;
|
||||
}
|
||||
|
||||
imsg = pmsg->next_msgoff;
|
||||
irsp = prsp->next_msgoff;
|
||||
imsg = mbox->tx_start + pmsg->next_msgoff;
|
||||
irsp = mbox->rx_start + prsp->next_msgoff;
|
||||
}
|
||||
|
||||
error:
|
||||
spin_unlock(&mdev->mbox_lock);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_mbox_get_rsp);
|
||||
|
||||
int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
|
||||
{
|
||||
unsigned long ireq = mbox->tx_start + msgs_offset;
|
||||
unsigned long irsp = mbox->rx_start + msgs_offset;
|
||||
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
|
||||
int rc = -ENODEV;
|
||||
u16 msgs;
|
||||
|
||||
spin_lock(&mdev->mbox_lock);
|
||||
|
||||
if (mdev->num_msgs != mdev->msgs_acked)
|
||||
goto exit;
|
||||
|
||||
for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
|
||||
struct mbox_msghdr *preq = mdev->mbase + ireq;
|
||||
struct mbox_msghdr *prsp = mdev->mbase + irsp;
|
||||
|
||||
if (preq->id != prsp->id)
|
||||
goto exit;
|
||||
if (prsp->rc) {
|
||||
rc = prsp->rc;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ireq = mbox->tx_start + preq->next_msgoff;
|
||||
irsp = mbox->rx_start + prsp->next_msgoff;
|
||||
}
|
||||
rc = 0;
|
||||
exit:
|
||||
spin_unlock(&mdev->mbox_lock);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
|
||||
|
||||
int
|
||||
otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
|
||||
{
|
||||
|
@ -36,7 +36,7 @@
|
||||
|
||||
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
|
||||
|
||||
#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
|
||||
#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
|
||||
|
||||
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
|
||||
|
||||
@ -75,6 +75,7 @@ struct otx2_mbox {
|
||||
|
||||
/* Header which preceeds all mbox messages */
|
||||
struct mbox_hdr {
|
||||
u64 msg_size; /* Total msgs size embedded */
|
||||
u16 num_msgs; /* No of msgs embedded */
|
||||
};
|
||||
|
||||
@ -103,6 +104,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
|
||||
int size, int size_rsp);
|
||||
struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
|
||||
struct mbox_msghdr *msg);
|
||||
int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid);
|
||||
int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
|
||||
u16 pcifunc, u16 id);
|
||||
bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
|
||||
@ -125,6 +127,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
|
||||
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
|
||||
M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
|
||||
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
|
||||
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
|
||||
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
|
||||
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
|
||||
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
|
||||
@ -300,6 +303,12 @@ struct msix_offset_rsp {
|
||||
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
|
||||
};
|
||||
|
||||
struct get_hw_cap_rsp {
|
||||
struct mbox_msghdr hdr;
|
||||
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
|
||||
u8 nix_shaping; /* Is shaping and coloring supported */
|
||||
};
|
||||
|
||||
/* CGX mbox message formats */
|
||||
|
||||
struct cgx_stats_rsp {
|
||||
@ -352,6 +361,7 @@ struct npa_lf_alloc_req {
|
||||
int node;
|
||||
int aura_sz; /* No of auras */
|
||||
u32 nr_pools; /* No of pools */
|
||||
u64 way_mask;
|
||||
};
|
||||
|
||||
struct npa_lf_alloc_rsp {
|
||||
@ -442,6 +452,7 @@ struct nix_lf_alloc_req {
|
||||
u16 npa_func;
|
||||
u16 sso_func;
|
||||
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
|
||||
u64 way_mask;
|
||||
};
|
||||
|
||||
struct nix_lf_alloc_rsp {
|
||||
@ -512,6 +523,9 @@ struct nix_txsch_alloc_rsp {
|
||||
/* Scheduler queue list allocated at each level */
|
||||
u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
|
||||
u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
|
||||
u8 aggr_level; /* Traffic aggregation scheduler level */
|
||||
u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */
|
||||
u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
|
||||
};
|
||||
|
||||
struct nix_txsch_free_req {
|
||||
@ -578,6 +592,18 @@ struct nix_rss_flowkey_cfg {
|
||||
#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
|
||||
#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
|
||||
#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
|
||||
#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6)
|
||||
#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7)
|
||||
#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8)
|
||||
#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9)
|
||||
#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10)
|
||||
#define NIX_FLOW_KEY_TYPE_GTPU BIT(11)
|
||||
#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
|
||||
#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
|
||||
#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14)
|
||||
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
|
||||
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
|
||||
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
|
||||
u32 flowkey_cfg; /* Flowkey types selected */
|
||||
u8 group; /* RSS context or group */
|
||||
};
|
||||
|
@ -27,26 +27,45 @@ enum NPC_LID_E {
|
||||
enum npc_kpu_la_ltype {
|
||||
NPC_LT_LA_8023 = 1,
|
||||
NPC_LT_LA_ETHER,
|
||||
NPC_LT_LA_IH_NIX_ETHER,
|
||||
NPC_LT_LA_IH_8_ETHER,
|
||||
NPC_LT_LA_IH_4_ETHER,
|
||||
NPC_LT_LA_IH_2_ETHER,
|
||||
NPC_LT_LA_HIGIG2_ETHER,
|
||||
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
|
||||
NPC_LT_LA_CUSTOM0 = 0xE,
|
||||
NPC_LT_LA_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
enum npc_kpu_lb_ltype {
|
||||
NPC_LT_LB_ETAG = 1,
|
||||
NPC_LT_LB_CTAG,
|
||||
NPC_LT_LB_STAG,
|
||||
NPC_LT_LB_STAG_QINQ,
|
||||
NPC_LT_LB_BTAG,
|
||||
NPC_LT_LB_QINQ,
|
||||
NPC_LT_LB_ITAG,
|
||||
NPC_LT_LB_DSA,
|
||||
NPC_LT_LB_DSA_VLAN,
|
||||
NPC_LT_LB_EDSA,
|
||||
NPC_LT_LB_EDSA_VLAN,
|
||||
NPC_LT_LB_EXDSA,
|
||||
NPC_LT_LB_EXDSA_VLAN,
|
||||
NPC_LT_LB_CUSTOM0 = 0xE,
|
||||
NPC_LT_LB_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
enum npc_kpu_lc_ltype {
|
||||
NPC_LT_LC_IP = 1,
|
||||
NPC_LT_LC_IP_OPT,
|
||||
NPC_LT_LC_IP6,
|
||||
NPC_LT_LC_IP6_EXT,
|
||||
NPC_LT_LC_ARP,
|
||||
NPC_LT_LC_RARP,
|
||||
NPC_LT_LC_MPLS,
|
||||
NPC_LT_LC_NSH,
|
||||
NPC_LT_LC_PTP,
|
||||
NPC_LT_LC_FCOE,
|
||||
NPC_LT_LC_CUSTOM0 = 0xE,
|
||||
NPC_LT_LC_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
/* Don't modify Ltypes upto SCTP, otherwise it will
|
||||
@ -57,49 +76,67 @@ enum npc_kpu_ld_ltype {
|
||||
NPC_LT_LD_UDP,
|
||||
NPC_LT_LD_ICMP,
|
||||
NPC_LT_LD_SCTP,
|
||||
NPC_LT_LD_IGMP,
|
||||
NPC_LT_LD_ICMP6,
|
||||
NPC_LT_LD_IGMP = 8,
|
||||
NPC_LT_LD_ESP,
|
||||
NPC_LT_LD_AH,
|
||||
NPC_LT_LD_GRE,
|
||||
NPC_LT_LD_GRE_MPLS,
|
||||
NPC_LT_LD_GRE_NSH,
|
||||
NPC_LT_LD_TU_MPLS,
|
||||
NPC_LT_LD_NVGRE,
|
||||
NPC_LT_LD_NSH,
|
||||
NPC_LT_LD_TU_MPLS_IN_NSH,
|
||||
NPC_LT_LD_TU_MPLS_IN_IP,
|
||||
NPC_LT_LD_CUSTOM0 = 0xE,
|
||||
NPC_LT_LD_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
enum npc_kpu_le_ltype {
|
||||
NPC_LT_LE_TU_ETHER = 1,
|
||||
NPC_LT_LE_TU_PPP,
|
||||
NPC_LT_LE_TU_MPLS_IN_NSH,
|
||||
NPC_LT_LE_TU_3RD_NSH,
|
||||
NPC_LT_LE_VXLAN = 1,
|
||||
NPC_LT_LE_GENEVE,
|
||||
NPC_LT_LE_GTPU = 4,
|
||||
NPC_LT_LE_VXLANGPE,
|
||||
NPC_LT_LE_GTPC,
|
||||
NPC_LT_LE_NSH,
|
||||
NPC_LT_LE_TU_MPLS_IN_GRE,
|
||||
NPC_LT_LE_TU_NSH_IN_GRE,
|
||||
NPC_LT_LE_TU_MPLS_IN_UDP,
|
||||
NPC_LT_LE_CUSTOM0 = 0xE,
|
||||
NPC_LT_LE_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
enum npc_kpu_lf_ltype {
|
||||
NPC_LT_LF_TU_IP = 1,
|
||||
NPC_LT_LF_TU_IP6,
|
||||
NPC_LT_LF_TU_ARP,
|
||||
NPC_LT_LF_TU_MPLS_IP,
|
||||
NPC_LT_LF_TU_MPLS_IP6,
|
||||
NPC_LT_LF_TU_MPLS_ETHER,
|
||||
NPC_LT_LF_TU_ETHER = 1,
|
||||
NPC_LT_LF_TU_PPP,
|
||||
NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
|
||||
NPC_LT_LF_TU_NSH_IN_VXLANGPE,
|
||||
NPC_LT_LF_TU_MPLS_IN_NSH,
|
||||
NPC_LT_LF_TU_3RD_NSH,
|
||||
NPC_LT_LF_CUSTOM0 = 0xE,
|
||||
NPC_LT_LF_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
enum npc_kpu_lg_ltype {
|
||||
NPC_LT_LG_TU_TCP = 1,
|
||||
NPC_LT_LG_TU_UDP,
|
||||
NPC_LT_LG_TU_SCTP,
|
||||
NPC_LT_LG_TU_ICMP,
|
||||
NPC_LT_LG_TU_IGMP,
|
||||
NPC_LT_LG_TU_ICMP6,
|
||||
NPC_LT_LG_TU_ESP,
|
||||
NPC_LT_LG_TU_AH,
|
||||
NPC_LT_LG_TU_IP = 1,
|
||||
NPC_LT_LG_TU_IP6,
|
||||
NPC_LT_LG_TU_ARP,
|
||||
NPC_LT_LG_TU_ETHER_IN_NSH,
|
||||
NPC_LT_LG_CUSTOM0 = 0xE,
|
||||
NPC_LT_LG_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
/* Don't modify Ltypes upto SCTP, otherwise it will
|
||||
* effect flow tag calculation and thus RSS.
|
||||
*/
|
||||
enum npc_kpu_lh_ltype {
|
||||
NPC_LT_LH_TCP_DATA = 1,
|
||||
NPC_LT_LH_HTTP_DATA,
|
||||
NPC_LT_LH_HTTPS_DATA,
|
||||
NPC_LT_LH_PPTP_DATA,
|
||||
NPC_LT_LH_UDP_DATA,
|
||||
NPC_LT_LH_TU_TCP = 1,
|
||||
NPC_LT_LH_TU_UDP,
|
||||
NPC_LT_LH_TU_ICMP,
|
||||
NPC_LT_LH_TU_SCTP,
|
||||
NPC_LT_LH_TU_ICMP6,
|
||||
NPC_LT_LH_TU_IGMP = 8,
|
||||
NPC_LT_LH_TU_ESP,
|
||||
NPC_LT_LH_TU_AH,
|
||||
NPC_LT_LH_CUSTOM0 = 0xE,
|
||||
NPC_LT_LH_CUSTOM1 = 0xF,
|
||||
};
|
||||
|
||||
struct npc_kpu_profile_cam {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -56,12 +56,34 @@ static char *mkex_profile; /* MKEX profile name */
|
||||
module_param(mkex_profile, charp, 0000);
|
||||
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
|
||||
|
||||
static void rvu_setup_hw_capabilities(struct rvu *rvu)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
|
||||
hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
|
||||
hw->cap.nix_fixed_txschq_mapping = false;
|
||||
hw->cap.nix_shaping = true;
|
||||
hw->cap.nix_tx_link_bp = true;
|
||||
hw->cap.nix_rx_multicast = true;
|
||||
|
||||
if (is_rvu_96xx_B0(rvu)) {
|
||||
hw->cap.nix_fixed_txschq_mapping = true;
|
||||
hw->cap.nix_txsch_per_cgx_lmac = 4;
|
||||
hw->cap.nix_txsch_per_lbk_lmac = 132;
|
||||
hw->cap.nix_txsch_per_sdp_lmac = 76;
|
||||
hw->cap.nix_shaping = false;
|
||||
hw->cap.nix_tx_link_bp = false;
|
||||
if (is_rvu_96xx_A0(rvu))
|
||||
hw->cap.nix_rx_multicast = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Poll a RVU block's register 'offset', for a 'zero'
|
||||
* or 'nonzero' at bits specified by 'mask'
|
||||
*/
|
||||
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
|
||||
{
|
||||
unsigned long timeout = jiffies + usecs_to_jiffies(100);
|
||||
unsigned long timeout = jiffies + usecs_to_jiffies(10000);
|
||||
void __iomem *reg;
|
||||
u64 reg_val;
|
||||
|
||||
@ -73,7 +95,6 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
|
||||
if (!zero && (reg_val & mask))
|
||||
return 0;
|
||||
usleep_range(1, 5);
|
||||
timeout--;
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -433,9 +454,9 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
|
||||
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
|
||||
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
|
||||
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
|
||||
rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
|
||||
rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
|
||||
rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
|
||||
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
|
||||
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
|
||||
rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
|
||||
}
|
||||
|
||||
static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
|
||||
@ -877,8 +898,8 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
|
||||
struct ready_msg_rsp *rsp)
|
||||
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
|
||||
struct ready_msg_rsp *rsp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -1023,9 +1044,9 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
|
||||
struct rsrc_detach *detach,
|
||||
struct msg_rsp *rsp)
|
||||
int rvu_mbox_handler_detach_resources(struct rvu *rvu,
|
||||
struct rsrc_detach *detach,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
|
||||
}
|
||||
@ -1171,9 +1192,9 @@ fail:
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
|
||||
struct rsrc_attach *attach,
|
||||
struct msg_rsp *rsp)
|
||||
int rvu_mbox_handler_attach_resources(struct rvu *rvu,
|
||||
struct rsrc_attach *attach,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
u16 pcifunc = attach->hdr.pcifunc;
|
||||
int err;
|
||||
@ -1294,8 +1315,8 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
|
||||
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
|
||||
}
|
||||
|
||||
static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
|
||||
struct msix_offset_rsp *rsp)
|
||||
int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
|
||||
struct msix_offset_rsp *rsp)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
@ -1343,8 +1364,8 @@ static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp)
|
||||
int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp)
|
||||
{
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
u16 vf, numvfs;
|
||||
@ -1363,6 +1384,17 @@ static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
|
||||
struct get_hw_cap_rsp *rsp)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
|
||||
rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
|
||||
rsp->nix_shaping = hw->cap.nix_shaping;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
|
||||
struct mbox_msghdr *req)
|
||||
{
|
||||
@ -1440,12 +1472,12 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
|
||||
|
||||
/* Process received mbox messages */
|
||||
req_hdr = mdev->mbase + mbox->rx_start;
|
||||
if (req_hdr->num_msgs == 0)
|
||||
if (mw->mbox_wrk[devid].num_msgs == 0)
|
||||
return;
|
||||
|
||||
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
|
||||
|
||||
for (id = 0; id < req_hdr->num_msgs; id++) {
|
||||
for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
|
||||
msg = mdev->mbase + offset;
|
||||
|
||||
/* Set which PF/VF sent this message based on mbox IRQ */
|
||||
@ -1471,13 +1503,14 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
|
||||
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
|
||||
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
|
||||
err, otx2_mbox_id2name(msg->id),
|
||||
msg->id, devid,
|
||||
msg->id, rvu_get_pf(msg->pcifunc),
|
||||
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
|
||||
else
|
||||
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
|
||||
err, otx2_mbox_id2name(msg->id),
|
||||
msg->id, devid);
|
||||
}
|
||||
mw->mbox_wrk[devid].num_msgs = 0;
|
||||
|
||||
/* Send mbox responses to VF/PF */
|
||||
otx2_mbox_msg_send(mbox, devid);
|
||||
@ -1523,14 +1556,14 @@ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
|
||||
mdev = &mbox->dev[devid];
|
||||
|
||||
rsp_hdr = mdev->mbase + mbox->rx_start;
|
||||
if (rsp_hdr->num_msgs == 0) {
|
||||
if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
|
||||
dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
|
||||
return;
|
||||
}
|
||||
|
||||
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
|
||||
|
||||
for (id = 0; id < rsp_hdr->num_msgs; id++) {
|
||||
for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
|
||||
msg = mdev->mbase + offset;
|
||||
|
||||
if (msg->id >= MBOX_MSG_MAX) {
|
||||
@ -1560,6 +1593,7 @@ end:
|
||||
offset = mbox->rx_start + msg->next_msgoff;
|
||||
mdev->msgs_acked++;
|
||||
}
|
||||
mw->mbox_wrk_up[devid].up_num_msgs = 0;
|
||||
|
||||
otx2_mbox_reset(mbox, devid);
|
||||
}
|
||||
@ -1697,14 +1731,28 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
|
||||
mbox = &mw->mbox;
|
||||
mdev = &mbox->dev[i];
|
||||
hdr = mdev->mbase + mbox->rx_start;
|
||||
if (hdr->num_msgs)
|
||||
queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
|
||||
|
||||
/*The hdr->num_msgs is set to zero immediately in the interrupt
|
||||
* handler to ensure that it holds a correct value next time
|
||||
* when the interrupt handler is called.
|
||||
* pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
|
||||
* pf>mbox.up_num_msgs holds the data for use in
|
||||
* pfaf_mbox_up_handler.
|
||||
*/
|
||||
|
||||
if (hdr->num_msgs) {
|
||||
mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
|
||||
hdr->num_msgs = 0;
|
||||
queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
|
||||
}
|
||||
mbox = &mw->mbox_up;
|
||||
mdev = &mbox->dev[i];
|
||||
hdr = mdev->mbase + mbox->rx_start;
|
||||
if (hdr->num_msgs)
|
||||
if (hdr->num_msgs) {
|
||||
mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
|
||||
hdr->num_msgs = 0;
|
||||
queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2316,18 +2364,6 @@ static int rvu_enable_sriov(struct rvu *rvu)
|
||||
if (vfs > chans)
|
||||
vfs = chans;
|
||||
|
||||
/* AF's VFs work in pairs and talk over consecutive loopback channels.
|
||||
* Thus we want to enable maximum even number of VFs. In case
|
||||
* odd number of VFs are available then the last VF on the list
|
||||
* remains disabled.
|
||||
*/
|
||||
if (vfs & 0x1) {
|
||||
dev_warn(&pdev->dev,
|
||||
"Number of VFs should be even. Enabling %d out of %d.\n",
|
||||
vfs - 1, vfs);
|
||||
vfs--;
|
||||
}
|
||||
|
||||
if (!vfs)
|
||||
return 0;
|
||||
|
||||
@ -2432,6 +2468,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
rvu_reset_all_blocks(rvu);
|
||||
|
||||
rvu_setup_hw_capabilities(rvu);
|
||||
|
||||
err = rvu_setup_hw_resources(rvu);
|
||||
if (err)
|
||||
goto err_release_regions;
|
||||
@ -2456,6 +2494,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (err)
|
||||
goto err_irq;
|
||||
|
||||
/* Initialize debugfs */
|
||||
rvu_dbg_init(rvu);
|
||||
|
||||
return 0;
|
||||
err_irq:
|
||||
rvu_unregister_interrupts(rvu);
|
||||
@ -2482,6 +2523,7 @@ static void rvu_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct rvu *rvu = pci_get_drvdata(pdev);
|
||||
|
||||
rvu_dbg_exit(rvu);
|
||||
rvu_unregister_interrupts(rvu);
|
||||
rvu_flr_wq_destroy(rvu);
|
||||
rvu_cgx_exit(rvu);
|
||||
|
@ -35,9 +35,36 @@
|
||||
#define RVU_PFVF_FUNC_SHIFT 0
|
||||
#define RVU_PFVF_FUNC_MASK 0x3FF
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dump_ctx {
|
||||
int lf;
|
||||
int id;
|
||||
bool all;
|
||||
};
|
||||
|
||||
struct rvu_debugfs {
|
||||
struct dentry *root;
|
||||
struct dentry *cgx_root;
|
||||
struct dentry *cgx;
|
||||
struct dentry *lmac;
|
||||
struct dentry *npa;
|
||||
struct dentry *nix;
|
||||
struct dentry *npc;
|
||||
struct dump_ctx npa_aura_ctx;
|
||||
struct dump_ctx npa_pool_ctx;
|
||||
struct dump_ctx nix_cq_ctx;
|
||||
struct dump_ctx nix_rq_ctx;
|
||||
struct dump_ctx nix_sq_ctx;
|
||||
int npa_qsize_id;
|
||||
int nix_qsize_id;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct rvu_work {
|
||||
struct work_struct work;
|
||||
struct rvu *rvu;
|
||||
int num_msgs;
|
||||
int up_num_msgs;
|
||||
};
|
||||
|
||||
struct rsrc_bmap {
|
||||
@ -99,6 +126,7 @@ struct npc_mcam {
|
||||
u16 lprio_start;
|
||||
u16 hprio_count;
|
||||
u16 hprio_end;
|
||||
u16 rx_miss_act_cntr; /* Counter for RX MISS action */
|
||||
};
|
||||
|
||||
/* Structure for per RVU func info ie PF/VF */
|
||||
@ -151,15 +179,20 @@ struct rvu_pfvf {
|
||||
struct mcam_entry entry;
|
||||
int rxvlan_index;
|
||||
bool rxvlan;
|
||||
|
||||
bool cgx_in_use; /* this PF/VF using CGX? */
|
||||
int cgx_users; /* number of cgx users - used only by PFs */
|
||||
};
|
||||
|
||||
struct nix_txsch {
|
||||
struct rsrc_bmap schq;
|
||||
u8 lvl;
|
||||
#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
|
||||
#define NIX_TXSCHQ_FREE BIT_ULL(1)
|
||||
#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0)
|
||||
#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
|
||||
#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
|
||||
#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
|
||||
#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16))
|
||||
u32 *pfvf_map;
|
||||
};
|
||||
|
||||
@ -193,6 +226,21 @@ struct nix_hw {
|
||||
struct nix_lso lso;
|
||||
};
|
||||
|
||||
/* RVU block's capabilities or functionality,
|
||||
* which vary by silicon version/skew.
|
||||
*/
|
||||
struct hw_cap {
|
||||
/* Transmit side supported functionality */
|
||||
u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
|
||||
u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
|
||||
u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
|
||||
u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
|
||||
bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
|
||||
bool nix_shaping; /* Is shaping and coloring supported */
|
||||
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
|
||||
bool nix_rx_multicast; /* Rx packet replication support */
|
||||
};
|
||||
|
||||
struct rvu_hwinfo {
|
||||
u8 total_pfs; /* MAX RVU PFs HW supports */
|
||||
u16 total_vfs; /* Max RVU VFs HW supports */
|
||||
@ -204,7 +252,7 @@ struct rvu_hwinfo {
|
||||
u8 sdp_links;
|
||||
u8 npc_kpus; /* No of parser units */
|
||||
|
||||
|
||||
struct hw_cap cap;
|
||||
struct rvu_block block[BLK_COUNT]; /* Block info */
|
||||
struct nix_hw *nix0;
|
||||
struct npc_pkind pkind;
|
||||
@ -261,8 +309,13 @@ struct rvu {
|
||||
struct workqueue_struct *cgx_evh_wq;
|
||||
spinlock_t cgx_evq_lock; /* cgx event queue lock */
|
||||
struct list_head cgx_evq_head; /* cgx event queue head */
|
||||
struct mutex cgx_cfg_lock; /* serialize cgx configuration */
|
||||
|
||||
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct rvu_debugfs rvu_dbg;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
|
||||
@ -285,7 +338,8 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
|
||||
return readq(rvu->pfreg_base + offset);
|
||||
}
|
||||
|
||||
static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
|
||||
/* Silicon revisions */
|
||||
static inline bool is_rvu_96xx_A0(struct rvu *rvu)
|
||||
{
|
||||
struct pci_dev *pdev = rvu->pdev;
|
||||
|
||||
@ -293,6 +347,14 @@ static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
|
||||
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
|
||||
}
|
||||
|
||||
static inline bool is_rvu_96xx_B0(struct rvu *rvu)
|
||||
{
|
||||
struct pci_dev *pdev = rvu->pdev;
|
||||
|
||||
return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
|
||||
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
|
||||
}
|
||||
|
||||
/* Function Prototypes
|
||||
* RVU
|
||||
*/
|
||||
@ -342,52 +404,25 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
|
||||
*lmac_id = (map & 0xF);
|
||||
}
|
||||
|
||||
#define M(_name, _id, fn_name, req, rsp) \
|
||||
int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
|
||||
MBOX_MESSAGES
|
||||
#undef M
|
||||
|
||||
int rvu_cgx_init(struct rvu *rvu);
|
||||
int rvu_cgx_exit(struct rvu *rvu);
|
||||
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
|
||||
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
|
||||
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
|
||||
struct cgx_stats_rsp *rsp);
|
||||
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
|
||||
struct cgx_mac_addr_set_or_get *req,
|
||||
struct cgx_mac_addr_set_or_get *rsp);
|
||||
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
|
||||
struct cgx_mac_addr_set_or_get *req,
|
||||
struct cgx_mac_addr_set_or_get *rsp);
|
||||
int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
|
||||
struct cgx_link_info_msg *rsp);
|
||||
int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
|
||||
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
|
||||
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
|
||||
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
|
||||
int rxtxflag, u64 *stat);
|
||||
/* NPA APIs */
|
||||
int rvu_npa_init(struct rvu *rvu);
|
||||
void rvu_npa_freemem(struct rvu *rvu);
|
||||
void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
|
||||
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
|
||||
struct npa_aq_enq_req *req,
|
||||
struct npa_aq_enq_rsp *rsp);
|
||||
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
|
||||
struct hwctx_disable_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
|
||||
struct npa_lf_alloc_req *req,
|
||||
struct npa_lf_alloc_rsp *rsp);
|
||||
int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
|
||||
struct npa_aq_enq_rsp *rsp);
|
||||
|
||||
/* NIX APIs */
|
||||
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
|
||||
@ -397,55 +432,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
|
||||
void rvu_nix_freemem(struct rvu *rvu);
|
||||
int rvu_get_nixlf_count(struct rvu *rvu);
|
||||
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
|
||||
int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
||||
struct nix_lf_alloc_req *req,
|
||||
struct nix_lf_alloc_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
|
||||
struct nix_aq_enq_req *req,
|
||||
struct nix_aq_enq_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
|
||||
struct hwctx_disable_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
|
||||
struct nix_txsch_alloc_req *req,
|
||||
struct nix_txsch_alloc_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
|
||||
struct nix_txsch_free_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
|
||||
struct nix_txschq_config *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
|
||||
struct nix_vtag_config *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
|
||||
struct nix_rss_flowkey_cfg *req,
|
||||
struct nix_rss_flowkey_cfg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
|
||||
struct nix_set_mac_addr *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
|
||||
struct nix_mark_format_cfg *req,
|
||||
struct nix_mark_format_cfg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
|
||||
struct nix_lso_format_cfg *req,
|
||||
struct nix_lso_format_cfg_rsp *rsp);
|
||||
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
|
||||
|
||||
/* NPC APIs */
|
||||
int rvu_npc_init(struct rvu *rvu);
|
||||
@ -460,45 +447,25 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
|
||||
int nixlf, u64 chan);
|
||||
void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc);
|
||||
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
|
||||
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
|
||||
int group, int alg_idx, int mcam_index);
|
||||
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
|
||||
struct npc_mcam_alloc_entry_req *req,
|
||||
struct npc_mcam_alloc_entry_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
|
||||
struct npc_mcam_free_entry_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
|
||||
struct npc_mcam_write_entry_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
|
||||
struct npc_mcam_ena_dis_entry_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
|
||||
struct npc_mcam_ena_dis_entry_req *req,
|
||||
struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
|
||||
struct npc_mcam_shift_entry_req *req,
|
||||
struct npc_mcam_shift_entry_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
|
||||
struct npc_mcam_alloc_counter_req *req,
|
||||
struct npc_mcam_alloc_counter_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
|
||||
struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
|
||||
struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
|
||||
struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
|
||||
struct npc_mcam_oper_counter_req *req,
|
||||
struct npc_mcam_oper_counter_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
|
||||
struct npc_mcam_alloc_and_write_entry_req *req,
|
||||
struct npc_mcam_alloc_and_write_entry_rsp *rsp);
|
||||
int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
|
||||
struct npc_get_kex_cfg_rsp *rsp);
|
||||
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
|
||||
int blkaddr, int *alloc_cnt,
|
||||
int *enable_cnt);
|
||||
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
|
||||
int blkaddr, int *alloc_cnt,
|
||||
int *enable_cnt);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void rvu_dbg_init(struct rvu *rvu);
|
||||
void rvu_dbg_exit(struct rvu *rvu);
|
||||
#else
|
||||
static inline void rvu_dbg_init(struct rvu *rvu) {}
|
||||
static inline void rvu_dbg_exit(struct rvu *rvu) {}
|
||||
#endif
|
||||
#endif /* RVU_H */
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include "rvu.h"
|
||||
#include "cgx.h"
|
||||
#include "rvu_reg.h"
|
||||
|
||||
struct cgx_evq_entry {
|
||||
struct list_head evq_node;
|
||||
@ -40,12 +41,25 @@ MBOX_UP_CGX_MESSAGES
|
||||
#undef M
|
||||
|
||||
/* Returns bitmap of mapped PFs */
|
||||
static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
|
||||
static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
|
||||
{
|
||||
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
|
||||
}
|
||||
|
||||
static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
|
||||
static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
|
||||
{
|
||||
unsigned long pfmap;
|
||||
|
||||
pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
|
||||
|
||||
/* Assumes only one pf mapped to a cgx lmac port */
|
||||
if (!pfmap)
|
||||
return -ENODEV;
|
||||
else
|
||||
return find_first_bit(&pfmap, 16);
|
||||
}
|
||||
|
||||
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
|
||||
{
|
||||
return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
|
||||
}
|
||||
@ -294,6 +308,8 @@ int rvu_cgx_init(struct rvu *rvu)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_init(&rvu->cgx_cfg_lock);
|
||||
|
||||
/* Ensure event handler registration is completed, before
|
||||
* we turn on the links
|
||||
*/
|
||||
@ -334,6 +350,24 @@ int rvu_cgx_exit(struct rvu *rvu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
|
||||
{
|
||||
u8 cgx_id, lmac_id;
|
||||
void *cgxd;
|
||||
|
||||
if (!is_pf_cgxmapped(rvu, pf))
|
||||
return;
|
||||
|
||||
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
|
||||
cgxd = rvu_cgx_pdata(cgx_id, rvu);
|
||||
|
||||
/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
|
||||
if (enable)
|
||||
cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
|
||||
else
|
||||
cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
|
||||
}
|
||||
|
||||
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
|
||||
{
|
||||
int pf = rvu_get_pf(pcifunc);
|
||||
@ -562,3 +596,94 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
|
||||
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
|
||||
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level
|
||||
*/
|
||||
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
|
||||
int index, int rxtxflag, u64 *stat)
|
||||
{
|
||||
struct rvu_block *block;
|
||||
int blkaddr;
|
||||
u16 pcifunc;
|
||||
int pf, lf;
|
||||
|
||||
if (!cgxd || !rvu)
|
||||
return -EINVAL;
|
||||
|
||||
pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
|
||||
if (pf < 0)
|
||||
return pf;
|
||||
|
||||
/* Assumes LF of a PF and all of its VF belongs to the same
|
||||
* NIX block
|
||||
*/
|
||||
pcifunc = pf << RVU_PFVF_PF_SHIFT;
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
||||
if (blkaddr < 0)
|
||||
return 0;
|
||||
block = &rvu->hw->block[blkaddr];
|
||||
|
||||
*stat = 0;
|
||||
for (lf = 0; lf < block->lf.max; lf++) {
|
||||
/* Check if a lf is attached to this PF or one of its VFs */
|
||||
if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
|
||||
~RVU_PFVF_FUNC_MASK)))
|
||||
continue;
|
||||
if (rxtxflag == NIX_STATS_RX)
|
||||
*stat += rvu_read64(rvu, blkaddr,
|
||||
NIX_AF_LFX_RX_STATX(lf, index));
|
||||
else
|
||||
*stat += rvu_read64(rvu, blkaddr,
|
||||
NIX_AF_LFX_TX_STATX(lf, index));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
|
||||
{
|
||||
struct rvu_pfvf *parent_pf, *pfvf;
|
||||
int cgx_users, err = 0;
|
||||
|
||||
if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
|
||||
return 0;
|
||||
|
||||
parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
|
||||
mutex_lock(&rvu->cgx_cfg_lock);
|
||||
|
||||
if (start && pfvf->cgx_in_use)
|
||||
goto exit; /* CGX is already started hence nothing to do */
|
||||
if (!start && !pfvf->cgx_in_use)
|
||||
goto exit; /* CGX is already stopped hence nothing to do */
|
||||
|
||||
if (start) {
|
||||
cgx_users = parent_pf->cgx_users;
|
||||
parent_pf->cgx_users++;
|
||||
} else {
|
||||
parent_pf->cgx_users--;
|
||||
cgx_users = parent_pf->cgx_users;
|
||||
}
|
||||
|
||||
/* Start CGX when first of all NIXLFs is started.
|
||||
* Stop CGX when last of all NIXLFs is stopped.
|
||||
*/
|
||||
if (!cgx_users) {
|
||||
err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
|
||||
start);
|
||||
if (err) {
|
||||
dev_err(rvu->dev, "Unable to %s CGX\n",
|
||||
start ? "start" : "stop");
|
||||
/* Revert the usage count in case of error */
|
||||
parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
|
||||
: parent_pf->cgx_users + 1;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
pfvf->cgx_in_use = start;
|
||||
exit:
|
||||
mutex_unlock(&rvu->cgx_cfg_lock);
|
||||
return err;
|
||||
}
|
||||
|
1711
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
Normal file
1711
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -52,8 +52,8 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
|
||||
struct npa_aq_enq_rsp *rsp)
|
||||
int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
|
||||
struct npa_aq_enq_rsp *rsp)
|
||||
{
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
u16 pcifunc = req->hdr.pcifunc;
|
||||
@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
|
||||
static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
|
||||
{
|
||||
struct npa_aq_enq_req lock_ctx_req;
|
||||
int err;
|
||||
|
||||
if (req->op != NPA_AQ_INSTOP_INIT)
|
||||
return 0;
|
||||
|
||||
memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
|
||||
lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
|
||||
lock_ctx_req.ctype = req->ctype;
|
||||
lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
|
||||
lock_ctx_req.aura_id = req->aura_id;
|
||||
err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
|
||||
if (err)
|
||||
dev_err(rvu->dev,
|
||||
"PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
|
||||
req->hdr.pcifunc,
|
||||
(req->ctype == NPA_AQ_CTYPE_AURA) ?
|
||||
"Aura" : "Pool", req->aura_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
|
||||
struct npa_aq_enq_req *req,
|
||||
struct npa_aq_enq_rsp *rsp)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = rvu_npa_aq_enq_inst(rvu, req, rsp);
|
||||
if (!err)
|
||||
err = npa_lf_hwctx_lockdown(rvu, req);
|
||||
return err;
|
||||
}
|
||||
#else
|
||||
|
||||
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
|
||||
struct npa_aq_enq_req *req,
|
||||
struct npa_aq_enq_rsp *rsp)
|
||||
{
|
||||
return rvu_npa_aq_enq_inst(rvu, req, rsp);
|
||||
}
|
||||
#endif
|
||||
|
||||
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
|
||||
struct hwctx_disable_req *req,
|
||||
@ -289,6 +327,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
|
||||
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
|
||||
return NPA_AF_ERR_PARAM;
|
||||
|
||||
if (req->way_mask)
|
||||
req->way_mask &= 0xFFFF;
|
||||
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
|
||||
if (!pfvf->npalf || blkaddr < 0)
|
||||
@ -345,7 +386,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
|
||||
/* Clear way partition mask and set aura offset to '0' */
|
||||
cfg &= ~(BIT_ULL(34) - 1);
|
||||
/* Set aura size & enable caching of contexts */
|
||||
cfg |= (req->aura_sz << 16) | BIT_ULL(34);
|
||||
cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
|
||||
|
||||
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
|
||||
|
||||
/* Configure aura HW context's base */
|
||||
@ -353,7 +395,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
|
||||
(u64)pfvf->aura_ctx->iova);
|
||||
|
||||
/* Enable caching of qints hw context */
|
||||
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
|
||||
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
|
||||
BIT_ULL(36) | req->way_mask << 20);
|
||||
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
|
||||
(u64)pfvf->npa_qints_ctx->iova);
|
||||
|
||||
@ -422,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
|
||||
/* Do not bypass NDC cache */
|
||||
cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
|
||||
cfg &= ~0x03DULL;
|
||||
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
|
||||
/* Disable caching of stack pages */
|
||||
cfg |= 0x10ULL;
|
||||
#endif
|
||||
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
|
||||
|
||||
/* Result structure can be followed by Aura/Pool context at
|
||||
|
@ -120,6 +120,31 @@ static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
}
|
||||
}
|
||||
|
||||
static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
int blkaddr, int index)
|
||||
{
|
||||
int bank = npc_get_bank(mcam, index);
|
||||
int actbank = bank;
|
||||
|
||||
index &= (mcam->banksize - 1);
|
||||
for (; bank < (actbank + mcam->banks_per_entry); bank++) {
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0);
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0);
|
||||
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0);
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0);
|
||||
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0);
|
||||
rvu_write64(rvu, blkaddr,
|
||||
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void npc_get_keyword(struct mcam_entry *entry, int idx,
|
||||
u64 *cam0, u64 *cam1)
|
||||
{
|
||||
@ -211,6 +236,12 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
actindex = index;
|
||||
index &= (mcam->banksize - 1);
|
||||
|
||||
/* Disable before mcam entry update */
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
|
||||
|
||||
/* Clear mcam entry to avoid writes being suppressed by NPC */
|
||||
npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex);
|
||||
|
||||
/* CAM1 takes the comparison value and
|
||||
* CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
|
||||
* CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
|
||||
@ -251,8 +282,6 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
/* Enable the entry */
|
||||
if (enable)
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
|
||||
else
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
|
||||
}
|
||||
|
||||
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
||||
@ -354,8 +383,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
|
||||
NIX_INTF_RX, &entry, true);
|
||||
|
||||
/* add VLAN matching, setup action and save entry back for later */
|
||||
entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
|
||||
entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
|
||||
entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
|
||||
entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
|
||||
|
||||
entry.vtag_action = VTAG0_VALID_BIT |
|
||||
FIELD_PREP(VTAG0_TYPE_MASK, 0) |
|
||||
@ -448,68 +477,75 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
|
||||
{
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
struct mcam_entry entry = { {0} };
|
||||
struct rvu_hwinfo *hw = rvu->hw;
|
||||
struct nix_rx_action action;
|
||||
#ifdef MCAST_MCE
|
||||
struct rvu_pfvf *pfvf;
|
||||
#endif
|
||||
int blkaddr, index;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
if (blkaddr < 0)
|
||||
return;
|
||||
|
||||
/* Only PF can add a bcast match entry */
|
||||
if (pcifunc & RVU_PFVF_FUNC_MASK)
|
||||
/* Skip LBK VFs */
|
||||
if (is_afvf(pcifunc))
|
||||
return;
|
||||
#ifdef MCAST_MCE
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
|
||||
#endif
|
||||
|
||||
/* If pkt replication is not supported,
|
||||
* then only PF is allowed to add a bcast match entry.
|
||||
*/
|
||||
if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
|
||||
return;
|
||||
|
||||
/* Get 'pcifunc' of PF device */
|
||||
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
|
||||
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
|
||||
nixlf, NIXLF_BCAST_ENTRY);
|
||||
|
||||
/* Check for L2B bit and LMAC channel
|
||||
* NOTE: Since MKEX default profile(a reduced version intended to
|
||||
* accommodate more capability but igoring few bits) a stap-gap
|
||||
* approach.
|
||||
* Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
|
||||
* moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
|
||||
* on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
|
||||
* DSCP.
|
||||
*
|
||||
* Reduced layout of MKEX default profile -
|
||||
* Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
|
||||
*
|
||||
* BIT_POS[31:28] : LD
|
||||
* BIT_POS[27:24] : LC
|
||||
* BIT_POS[23:20] : LB
|
||||
* BIT_POS[19:16] : LA
|
||||
* BIT_POS[15:12] : L3B, L3M, L2B, L2M
|
||||
* BIT_POS[11:00] : CHAN
|
||||
*
|
||||
/* Match ingress channel */
|
||||
entry.kw[0] = chan;
|
||||
entry.kw_mask[0] = 0xfffull;
|
||||
|
||||
/* Match broadcast MAC address.
|
||||
* DMAC is extracted at 0th bit of PARSE_KEX::KW1
|
||||
*/
|
||||
entry.kw[0] = BIT_ULL(13) | chan;
|
||||
entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
|
||||
entry.kw[1] = 0xffffffffffffull;
|
||||
entry.kw_mask[1] = 0xffffffffffffull;
|
||||
|
||||
*(u64 *)&action = 0x00;
|
||||
#ifdef MCAST_MCE
|
||||
/* Early silicon doesn't support pkt replication,
|
||||
* so install entry with UCAST action, so that PF
|
||||
* receives all broadcast packets.
|
||||
*/
|
||||
action.op = NIX_RX_ACTIONOP_MCAST;
|
||||
action.pf_func = pcifunc;
|
||||
action.index = pfvf->bcast_mce_idx;
|
||||
#else
|
||||
action.op = NIX_RX_ACTIONOP_UCAST;
|
||||
action.pf_func = pcifunc;
|
||||
#endif
|
||||
if (!hw->cap.nix_rx_multicast) {
|
||||
/* Early silicon doesn't support pkt replication,
|
||||
* so install entry with UCAST action, so that PF
|
||||
* receives all broadcast packets.
|
||||
*/
|
||||
action.op = NIX_RX_ACTIONOP_UCAST;
|
||||
action.pf_func = pcifunc;
|
||||
} else {
|
||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||
action.index = pfvf->bcast_mce_idx;
|
||||
action.op = NIX_RX_ACTIONOP_MCAST;
|
||||
}
|
||||
|
||||
entry.action = *(u64 *)&action;
|
||||
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
|
||||
NIX_INTF_RX, &entry, true);
|
||||
}
|
||||
|
||||
void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc)
|
||||
{
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
int blkaddr, index;
|
||||
|
||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
|
||||
if (blkaddr < 0)
|
||||
return;
|
||||
|
||||
/* Get 'pcifunc' of PF device */
|
||||
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
|
||||
|
||||
index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
|
||||
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
|
||||
}
|
||||
|
||||
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
|
||||
int group, int alg_idx, int mcam_index)
|
||||
{
|
||||
@ -704,8 +740,7 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
|
||||
/* Layer B: Stacked VLAN (STAG|QinQ) */
|
||||
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
|
||||
cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
|
||||
SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
|
||||
SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
|
||||
SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
|
||||
|
||||
/* Layer C: IPv4 */
|
||||
/* SIP+DIP: 8 bytes, KW2[63:0] */
|
||||
@ -806,11 +841,11 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
|
||||
/* Compare with mkex mod_param name string */
|
||||
if (mcam_kex->mkex_sign == MKEX_SIGN &&
|
||||
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
|
||||
/* Due to an errata (35786) in A0 pass silicon,
|
||||
/* Due to an errata (35786) in A0/B0 pass silicon,
|
||||
* parse nibble enable configuration has to be
|
||||
* identical for both Rx and Tx interfaces.
|
||||
*/
|
||||
if (is_rvu_9xxx_A0(rvu) &&
|
||||
if (is_rvu_96xx_B0(rvu) &&
|
||||
mcam_kex->keyx_cfg[NIX_INTF_RX] !=
|
||||
mcam_kex->keyx_cfg[NIX_INTF_TX])
|
||||
goto load_default;
|
||||
@ -1064,6 +1099,13 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
|
||||
mcam->hprio_count = mcam->lprio_count;
|
||||
mcam->hprio_end = mcam->hprio_count;
|
||||
|
||||
/* Reserve last counter for MCAM RX miss action which is set to
|
||||
* drop pkt. This way we will know how many pkts didn't match
|
||||
* any MCAM entry.
|
||||
*/
|
||||
mcam->counters.max--;
|
||||
mcam->rx_miss_act_cntr = mcam->counters.max;
|
||||
|
||||
/* Allocate bitmap for managing MCAM counters and memory
|
||||
* for saving counter to RVU PFFUNC allocation mapping.
|
||||
*/
|
||||
@ -1101,6 +1143,7 @@ free_mem:
|
||||
int rvu_npc_init(struct rvu *rvu)
|
||||
{
|
||||
struct npc_pkind *pkind = &rvu->hw->pkind;
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
u64 keyz = NPC_MCAM_KEY_X2;
|
||||
int blkaddr, entry, bank, err;
|
||||
u64 cfg, nibble_ena;
|
||||
@ -1143,7 +1186,7 @@ int rvu_npc_init(struct rvu *rvu)
|
||||
|
||||
/* Config Inner IPV4 NPC layer info */
|
||||
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
|
||||
(NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
|
||||
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
|
||||
|
||||
/* Enable below for Rx pkts.
|
||||
* - Outer IPv4 header checksum validation.
|
||||
@ -1165,7 +1208,7 @@ int rvu_npc_init(struct rvu *rvu)
|
||||
/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
|
||||
* configuration has to be identical for both Rx and Tx interfaces.
|
||||
*/
|
||||
if (!is_rvu_9xxx_A0(rvu))
|
||||
if (!is_rvu_96xx_B0(rvu))
|
||||
nibble_ena = (1ULL << 19) - 1;
|
||||
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
|
||||
((keyz & 0x3) << 32) | nibble_ena);
|
||||
@ -1183,9 +1226,13 @@ int rvu_npc_init(struct rvu *rvu)
|
||||
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
|
||||
NIX_TX_ACTIONOP_UCAST_DEFAULT);
|
||||
|
||||
/* If MCAM lookup doesn't result in a match, drop the received packet */
|
||||
/* If MCAM lookup doesn't result in a match, drop the received packet.
|
||||
* And map this action to a counter to count dropped pkts.
|
||||
*/
|
||||
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
|
||||
NIX_RX_ACTIONOP_DROP);
|
||||
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
|
||||
BIT_ULL(9) | mcam->rx_miss_act_cntr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1200,6 +1247,44 @@ void rvu_npc_freemem(struct rvu *rvu)
|
||||
mutex_destroy(&mcam->lock);
|
||||
}
|
||||
|
||||
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
|
||||
int blkaddr, int *alloc_cnt,
|
||||
int *enable_cnt)
|
||||
{
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
int entry;
|
||||
|
||||
*alloc_cnt = 0;
|
||||
*enable_cnt = 0;
|
||||
|
||||
for (entry = 0; entry < mcam->bmap_entries; entry++) {
|
||||
if (mcam->entry2pfvf_map[entry] == pcifunc) {
|
||||
(*alloc_cnt)++;
|
||||
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry))
|
||||
(*enable_cnt)++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
|
||||
int blkaddr, int *alloc_cnt,
|
||||
int *enable_cnt)
|
||||
{
|
||||
struct npc_mcam *mcam = &rvu->hw->mcam;
|
||||
int cntr;
|
||||
|
||||
*alloc_cnt = 0;
|
||||
*enable_cnt = 0;
|
||||
|
||||
for (cntr = 0; cntr < mcam->counters.max; cntr++) {
|
||||
if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
|
||||
(*alloc_cnt)++;
|
||||
if (mcam->cntr_refcnt[cntr])
|
||||
(*enable_cnt)++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int npc_mcam_verify_entry(struct npc_mcam *mcam,
|
||||
u16 pcifunc, int entry)
|
||||
{
|
||||
|
@ -246,6 +246,7 @@
|
||||
|
||||
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
|
||||
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
|
||||
#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
|
||||
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
|
||||
#define NIX_AF_PSE_SHAPER_CFG (0x810)
|
||||
#define NIX_AF_TX_EXPR_CREDIT (0x830)
|
||||
@ -435,7 +436,6 @@
|
||||
#define CPT_AF_LF_RST (0x44000)
|
||||
#define CPT_AF_BLK_RST (0x46000)
|
||||
|
||||
#define NDC_AF_BLK_RST (0x002F0)
|
||||
#define NPC_AF_BLK_RST (0x00040)
|
||||
|
||||
/* NPC */
|
||||
@ -499,4 +499,30 @@
|
||||
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
|
||||
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
|
||||
|
||||
/* NDC */
|
||||
#define NDC_AF_CONST (0x00000)
|
||||
#define NDC_AF_CLK_EN (0x00020)
|
||||
#define NDC_AF_CTL (0x00030)
|
||||
#define NDC_AF_BANK_CTL (0x00040)
|
||||
#define NDC_AF_BANK_CTL_DONE (0x00048)
|
||||
#define NDC_AF_INTR (0x00058)
|
||||
#define NDC_AF_INTR_W1S (0x00060)
|
||||
#define NDC_AF_INTR_ENA_W1S (0x00068)
|
||||
#define NDC_AF_INTR_ENA_W1C (0x00070)
|
||||
#define NDC_AF_ACTIVE_PC (0x00078)
|
||||
#define NDC_AF_BP_TEST_ENABLE (0x001F8)
|
||||
#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
|
||||
#define NDC_AF_BLK_RST (0x002F0)
|
||||
#define NDC_PRIV_AF_INT_CFG (0x002F8)
|
||||
#define NDC_AF_HASHX(a) (0x00300 | (a) << 3)
|
||||
#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \
|
||||
(0x00C00 | (a) << 5 | (b) << 4 | (c) << 3)
|
||||
#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \
|
||||
(0x00D00 | (a) << 5 | (b) << 4 | (c) << 3)
|
||||
#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \
|
||||
(0x00E00 | (a) << 5 | (b) << 4 | (c) << 3)
|
||||
#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \
|
||||
(0x00F00 | (a) << 5 | (b) << 4)
|
||||
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
|
||||
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
|
||||
#endif /* RVU_REG_H */
|
||||
|
@ -13,22 +13,22 @@
|
||||
|
||||
/* RVU Block Address Enumeration */
|
||||
enum rvu_block_addr_e {
|
||||
BLKADDR_RVUM = 0x0ULL,
|
||||
BLKADDR_LMT = 0x1ULL,
|
||||
BLKADDR_MSIX = 0x2ULL,
|
||||
BLKADDR_NPA = 0x3ULL,
|
||||
BLKADDR_NIX0 = 0x4ULL,
|
||||
BLKADDR_NIX1 = 0x5ULL,
|
||||
BLKADDR_NPC = 0x6ULL,
|
||||
BLKADDR_SSO = 0x7ULL,
|
||||
BLKADDR_SSOW = 0x8ULL,
|
||||
BLKADDR_TIM = 0x9ULL,
|
||||
BLKADDR_CPT0 = 0xaULL,
|
||||
BLKADDR_CPT1 = 0xbULL,
|
||||
BLKADDR_NDC0 = 0xcULL,
|
||||
BLKADDR_NDC1 = 0xdULL,
|
||||
BLKADDR_NDC2 = 0xeULL,
|
||||
BLK_COUNT = 0xfULL,
|
||||
BLKADDR_RVUM = 0x0ULL,
|
||||
BLKADDR_LMT = 0x1ULL,
|
||||
BLKADDR_MSIX = 0x2ULL,
|
||||
BLKADDR_NPA = 0x3ULL,
|
||||
BLKADDR_NIX0 = 0x4ULL,
|
||||
BLKADDR_NIX1 = 0x5ULL,
|
||||
BLKADDR_NPC = 0x6ULL,
|
||||
BLKADDR_SSO = 0x7ULL,
|
||||
BLKADDR_SSOW = 0x8ULL,
|
||||
BLKADDR_TIM = 0x9ULL,
|
||||
BLKADDR_CPT0 = 0xaULL,
|
||||
BLKADDR_CPT1 = 0xbULL,
|
||||
BLKADDR_NDC_NIX0_RX = 0xcULL,
|
||||
BLKADDR_NDC_NIX0_TX = 0xdULL,
|
||||
BLKADDR_NDC_NPA0 = 0xeULL,
|
||||
BLK_COUNT = 0xfULL,
|
||||
};
|
||||
|
||||
/* RVU Block Type Enumeration */
|
||||
@ -474,9 +474,9 @@ struct nix_cq_ctx_s {
|
||||
u64 ena : 1;
|
||||
u64 drop_ena : 1;
|
||||
u64 drop : 8;
|
||||
u64 dp : 8;
|
||||
u64 bp : 8;
|
||||
#else
|
||||
u64 dp : 8;
|
||||
u64 bp : 8;
|
||||
u64 drop : 8;
|
||||
u64 drop_ena : 1;
|
||||
u64 ena : 1;
|
||||
|
Loading…
Reference in New Issue
Block a user