octeontx2-af: Support configurable NDC cache way_mask
Each of the NIX/NPA LFs can choose which ways of their respective NDC caches should be used to cache their contexts. This enables flexible configurations like disabling caching for a LF, limiting it's context to a certain set of ways etc etc. Separate way_mask for NIX-TX and NIX-RX is not supported. Signed-off-by: Geetha sowjanya <gakula@marvell.com> Signed-off-by: Sunil Goutham <sgoutham@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
561e8752a1
commit
ee1e75915f
@ -361,6 +361,7 @@ struct npa_lf_alloc_req {
|
|||||||
int node;
|
int node;
|
||||||
int aura_sz; /* No of auras */
|
int aura_sz; /* No of auras */
|
||||||
u32 nr_pools; /* No of pools */
|
u32 nr_pools; /* No of pools */
|
||||||
|
u64 way_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct npa_lf_alloc_rsp {
|
struct npa_lf_alloc_rsp {
|
||||||
@ -451,6 +452,7 @@ struct nix_lf_alloc_req {
|
|||||||
u16 npa_func;
|
u16 npa_func;
|
||||||
u16 sso_func;
|
u16 sso_func;
|
||||||
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
|
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
|
||||||
|
u64 way_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nix_lf_alloc_rsp {
|
struct nix_lf_alloc_rsp {
|
||||||
|
@ -378,7 +378,8 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
|
|||||||
|
|
||||||
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
|
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
|
||||||
struct rvu_pfvf *pfvf, int nixlf,
|
struct rvu_pfvf *pfvf, int nixlf,
|
||||||
int rss_sz, int rss_grps, int hwctx_size)
|
int rss_sz, int rss_grps, int hwctx_size,
|
||||||
|
u64 way_mask)
|
||||||
{
|
{
|
||||||
int err, grp, num_indices;
|
int err, grp, num_indices;
|
||||||
|
|
||||||
@ -398,7 +399,8 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
|
|||||||
/* Config full RSS table size, enable RSS and caching */
|
/* Config full RSS table size, enable RSS and caching */
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
|
||||||
BIT_ULL(36) | BIT_ULL(4) |
|
BIT_ULL(36) | BIT_ULL(4) |
|
||||||
ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
|
ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
|
||||||
|
way_mask << 20);
|
||||||
/* Config RSS group offset and sizes */
|
/* Config RSS group offset and sizes */
|
||||||
for (grp = 0; grp < rss_grps; grp++)
|
for (grp = 0; grp < rss_grps; grp++)
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
|
||||||
@ -741,6 +743,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
|||||||
if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
|
if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
|
||||||
return NIX_AF_ERR_PARAM;
|
return NIX_AF_ERR_PARAM;
|
||||||
|
|
||||||
|
if (req->way_mask)
|
||||||
|
req->way_mask &= 0xFFFF;
|
||||||
|
|
||||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
|
||||||
if (!pfvf->nixlf || blkaddr < 0)
|
if (!pfvf->nixlf || blkaddr < 0)
|
||||||
@ -806,7 +811,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
|||||||
(u64)pfvf->rq_ctx->iova);
|
(u64)pfvf->rq_ctx->iova);
|
||||||
|
|
||||||
/* Set caching and queue count in HW */
|
/* Set caching and queue count in HW */
|
||||||
cfg = BIT_ULL(36) | (req->rq_cnt - 1);
|
cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
|
||||||
|
|
||||||
/* Alloc NIX SQ HW context memory and config the base */
|
/* Alloc NIX SQ HW context memory and config the base */
|
||||||
@ -821,7 +826,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
|||||||
|
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
|
||||||
(u64)pfvf->sq_ctx->iova);
|
(u64)pfvf->sq_ctx->iova);
|
||||||
cfg = BIT_ULL(36) | (req->sq_cnt - 1);
|
|
||||||
|
cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
|
||||||
|
|
||||||
/* Alloc NIX CQ HW context memory and config the base */
|
/* Alloc NIX CQ HW context memory and config the base */
|
||||||
@ -836,13 +842,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
|||||||
|
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
|
||||||
(u64)pfvf->cq_ctx->iova);
|
(u64)pfvf->cq_ctx->iova);
|
||||||
cfg = BIT_ULL(36) | (req->cq_cnt - 1);
|
|
||||||
|
cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
|
||||||
|
|
||||||
/* Initialize receive side scaling (RSS) */
|
/* Initialize receive side scaling (RSS) */
|
||||||
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
|
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
|
||||||
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
|
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
|
||||||
req->rss_sz, req->rss_grps, hwctx_size);
|
req->rss_grps, hwctx_size, req->way_mask);
|
||||||
if (err)
|
if (err)
|
||||||
goto free_mem;
|
goto free_mem;
|
||||||
|
|
||||||
@ -856,7 +863,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
|||||||
|
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
|
||||||
(u64)pfvf->cq_ints_ctx->iova);
|
(u64)pfvf->cq_ints_ctx->iova);
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
|
|
||||||
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
|
||||||
|
BIT_ULL(36) | req->way_mask << 20);
|
||||||
|
|
||||||
/* Alloc memory for QINT's HW contexts */
|
/* Alloc memory for QINT's HW contexts */
|
||||||
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
|
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
|
||||||
@ -868,7 +877,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
|
|||||||
|
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
|
||||||
(u64)pfvf->nix_qints_ctx->iova);
|
(u64)pfvf->nix_qints_ctx->iova);
|
||||||
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
|
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
|
||||||
|
BIT_ULL(36) | req->way_mask << 20);
|
||||||
|
|
||||||
/* Setup VLANX TPID's.
|
/* Setup VLANX TPID's.
|
||||||
* Use VLAN1 for 802.1Q
|
* Use VLAN1 for 802.1Q
|
||||||
|
@ -289,6 +289,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
|
|||||||
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
|
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
|
||||||
return NPA_AF_ERR_PARAM;
|
return NPA_AF_ERR_PARAM;
|
||||||
|
|
||||||
|
if (req->way_mask)
|
||||||
|
req->way_mask &= 0xFFFF;
|
||||||
|
|
||||||
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
pfvf = rvu_get_pfvf(rvu, pcifunc);
|
||||||
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
|
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
|
||||||
if (!pfvf->npalf || blkaddr < 0)
|
if (!pfvf->npalf || blkaddr < 0)
|
||||||
@ -345,7 +348,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
|
|||||||
/* Clear way partition mask and set aura offset to '0' */
|
/* Clear way partition mask and set aura offset to '0' */
|
||||||
cfg &= ~(BIT_ULL(34) - 1);
|
cfg &= ~(BIT_ULL(34) - 1);
|
||||||
/* Set aura size & enable caching of contexts */
|
/* Set aura size & enable caching of contexts */
|
||||||
cfg |= (req->aura_sz << 16) | BIT_ULL(34);
|
cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
|
||||||
|
|
||||||
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
|
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
|
||||||
|
|
||||||
/* Configure aura HW context's base */
|
/* Configure aura HW context's base */
|
||||||
@ -353,7 +357,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
|
|||||||
(u64)pfvf->aura_ctx->iova);
|
(u64)pfvf->aura_ctx->iova);
|
||||||
|
|
||||||
/* Enable caching of qints hw context */
|
/* Enable caching of qints hw context */
|
||||||
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
|
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
|
||||||
|
BIT_ULL(36) | req->way_mask << 20);
|
||||||
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
|
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
|
||||||
(u64)pfvf->npa_qints_ctx->iova);
|
(u64)pfvf->npa_qints_ctx->iova);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user