crypto: octeontx2 - load microcode and create engine groups

CPT includes microcoded GigaCypher symmetric engines(SEs), IPsec
symmetric engines(IEs), and asymmetric engines (AEs).
Each engine receives CPT instructions from the engine groups it has
subscribed to. This patch loads microcode, configures three engine
groups(one for SEs, one for IEs and one for AEs), and configures
all engines.

Signed-off-by: Suheil Chandran <schandran@marvell.com>
Signed-off-by: Lukasz Bartosik <lbartosik@marvell.com>
Signed-off-by: Srujana Challa <schalla@marvell.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Srujana Challa 2021-01-15 19:22:22 +05:30 committed by Herbert Xu
parent fe16eceab0
commit 43ac0b824f
8 changed files with 1655 additions and 2 deletions

View File

@ -2,6 +2,6 @@
obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += octeontx2-cpt.o
octeontx2-cpt-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
otx2_cpt_mbox_common.o
otx2_cpt_mbox_common.o otx2_cptpf_ucode.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af

View File

@ -18,6 +18,37 @@
#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
(((blk) << 20) | ((slot) << 12) | (offs))
#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
#define OTX2_CPT_NAME_LENGTH 64
#define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
enum otx2_cpt_eng_type {
OTX2_CPT_AE_TYPES = 1,
OTX2_CPT_SE_TYPES = 2,
OTX2_CPT_IE_TYPES = 3,
OTX2_CPT_MAX_ENG_TYPES,
};
/* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
#define MBOX_MSG_GET_ENG_GRP_NUM 0xBFF
/*
* Message request and response to get engine group number
* which has attached a given type of engines (SE, AE, IE)
* This messages are only used between CPT PF <=> CPT VF
*/
struct otx2_cpt_egrp_num_msg {
struct mbox_msghdr hdr;
u8 eng_type;
};
struct otx2_cpt_egrp_num_rsp {
struct mbox_msghdr hdr;
u8 eng_type;
u8 eng_grp_num;
};
static inline void otx2_cpt_write64(void __iomem *reg_base, u64 blk, u64 slot,
u64 offs, u64 val)
{
@ -34,4 +65,15 @@ static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
struct pci_dev *pdev);
int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
struct pci_dev *pdev, u64 reg, u64 *val);
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val);
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val);
#endif /* __OTX2_CPT_COMMON_H */

View File

@ -35,3 +35,80 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val)
{
struct cpt_rd_wr_reg_msg *reg_msg;
reg_msg = (struct cpt_rd_wr_reg_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
sizeof(*reg_msg));
if (reg_msg == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
reg_msg->hdr.pcifunc = 0;
reg_msg->is_write = 0;
reg_msg->reg_offset = reg;
reg_msg->ret_val = val;
return 0;
}
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val)
{
struct cpt_rd_wr_reg_msg *reg_msg;
reg_msg = (struct cpt_rd_wr_reg_msg *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*reg_msg),
sizeof(*reg_msg));
if (reg_msg == NULL) {
dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
return -EFAULT;
}
reg_msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
reg_msg->hdr.sig = OTX2_MBOX_REQ_SIG;
reg_msg->hdr.pcifunc = 0;
reg_msg->is_write = 1;
reg_msg->reg_offset = reg;
reg_msg->val = val;
return 0;
}
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val)
{
int ret;
ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val);
if (ret)
return ret;
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val)
{
int ret;
ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val);
if (ret)
return ret;
return otx2_cpt_send_mbox_msg(mbox, pdev);
}

View File

@ -6,6 +6,7 @@
#define __OTX2_CPTPF_H
#include "otx2_cpt_common.h"
#include "otx2_cptpf_ucode.h"
struct otx2_cptpf_dev;
struct otx2_cptvf_info {
@ -27,6 +28,8 @@ struct otx2_cptpf_dev {
void __iomem *vfpf_mbox_base; /* VF-PF mbox start address */
struct pci_dev *pdev; /* PCI device handle */
struct otx2_cptvf_info vf[OTX2_CPT_MAX_VFS_NUM];
struct otx2_cpt_eng_grps eng_grps;/* Engine groups information */
/* AF <=> PF mbox */
struct otx2_mbox afpf_mbox;
struct work_struct afpf_mbox_work;

View File

@ -4,6 +4,7 @@
#include <linux/firmware.h>
#include "otx2_cpt_hw_types.h"
#include "otx2_cpt_common.h"
#include "otx2_cptpf_ucode.h"
#include "otx2_cptpf.h"
#include "rvu_reg.h"
@ -410,6 +411,59 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
return 0;
}
static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
{
int timeout = 10, ret;
u64 reg = 0;
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_BLK_RST, 0x1);
if (ret)
return ret;
do {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_BLK_RST, &reg);
if (ret)
return ret;
if (!((reg >> 63) & 0x1))
break;
usleep_range(10000, 20000);
if (timeout-- < 0)
return -EBUSY;
} while (1);
return ret;
}
static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
{
union otx2_cptx_af_constants1 af_cnsts1 = {0};
int ret = 0;
/* Reset the CPT PF device */
ret = cptpf_device_reset(cptpf);
if (ret)
return ret;
/* Get number of SE, IE and AE engines */
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_CONSTANTS1, &af_cnsts1.u);
if (ret)
return ret;
cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
/* Disable all cores */
ret = otx2_cpt_disable_all_cores(cptpf);
return ret;
}
static int cptpf_sriov_disable(struct pci_dev *pdev)
{
struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
@ -446,6 +500,10 @@ static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
if (ret)
goto destroy_flr;
ret = otx2_cpt_create_eng_grps(cptpf->pdev, &cptpf->eng_grps);
if (ret)
goto disable_intr;
cptpf->enabled_vfs = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret)
@ -543,8 +601,20 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
/* Initialize CPT PF device */
err = cptpf_device_init(cptpf);
if (err)
goto unregister_intr;
/* Initialize engine groups */
err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
if (err)
goto unregister_intr;
return 0;
unregister_intr:
cptpf_disable_afpf_mbox_intr(cptpf);
destroy_afpf_mbox:
cptpf_afpf_mbox_destroy(cptpf);
clear_drvdata:
@ -560,6 +630,8 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
return;
cptpf_sriov_disable(pdev);
/* Cleanup engine groups */
otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
/* Disable AF-PF mailbox interrupt */
cptpf_disable_afpf_mbox_intr(cptpf);
/* Destroy AF-PF mbox */

View File

@ -35,6 +35,29 @@ static int forward_to_af(struct otx2_cptpf_dev *cptpf,
return 0;
}
static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req)
{
struct otx2_cpt_egrp_num_msg *grp_req;
struct otx2_cpt_egrp_num_rsp *rsp;
grp_req = (struct otx2_cpt_egrp_num_msg *)req;
rsp = (struct otx2_cpt_egrp_num_rsp *)
otx2_mbox_alloc_msg(&cptpf->vfpf_mbox, vf->vf_id, sizeof(*rsp));
if (!rsp)
return -ENOMEM;
rsp->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = req->pcifunc;
rsp->eng_type = grp_req->eng_type;
rsp->eng_grp_num = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
grp_req->eng_type);
return 0;
}
static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
struct otx2_cptvf_info *vf,
struct mbox_msghdr *req, int size)
@ -45,7 +68,15 @@ static int cptpf_handle_vf_req(struct otx2_cptpf_dev *cptpf,
if (req->sig != OTX2_MBOX_REQ_SIG)
goto inval_msg;
return forward_to_af(cptpf, vf, req, size);
switch (req->id) {
case MBOX_MSG_GET_ENG_GRP_NUM:
err = handle_msg_get_eng_grp_num(cptpf, vf, req);
break;
default:
err = forward_to_af(cptpf, vf, req, size);
break;
}
return err;
inval_msg:
otx2_reply_invalid_msg(&cptpf->vfpf_mbox, vf->vf_id, 0, req->id);
@ -148,6 +179,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *msg)
{
struct device *dev = &cptpf->pdev->dev;
struct cpt_rd_wr_reg_msg *rsp_rd_wr;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
@ -164,6 +196,18 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
RVU_PFVF_PF_MASK;
break;
case MBOX_MSG_CPT_RD_WR_REGISTER:
rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
if (msg->rc) {
dev_err(dev, "Reg %llx rd/wr(%d) failed %d\n",
rsp_rd_wr->reg_offset, rsp_rd_wr->is_write,
msg->rc);
return;
}
if (!rsp_rd_wr->is_write)
*rsp_rd_wr->ret_val = rsp_rd_wr->val;
break;
default:
dev_err(dev,
"Unsupported msg %d received.\n", msg->id);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,161 @@
/* SPDX-License-Identifier: GPL-2.0-only
* Copyright (C) 2020 Marvell.
*/
#ifndef __OTX2_CPTPF_UCODE_H
#define __OTX2_CPTPF_UCODE_H
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/module.h>
#include "otx2_cpt_hw_types.h"
#include "otx2_cpt_common.h"
/*
* On OcteonTX2 platform IPSec ucode can use both IE and SE engines therefore
* IE and SE engines can be attached to the same engine group.
*/
#define OTX2_CPT_MAX_ETYPES_PER_GRP 2
/* CPT ucode signature size */
#define OTX2_CPT_UCODE_SIGN_LEN 256
/* Microcode version string length */
#define OTX2_CPT_UCODE_VER_STR_SZ 44
/* Maximum number of supported engines/cores on OcteonTX2 platform */
#define OTX2_CPT_MAX_ENGINES 128
#define OTX2_CPT_ENGS_BITMASK_LEN BITS_TO_LONGS(OTX2_CPT_MAX_ENGINES)
/* Microcode types */
enum otx2_cpt_ucode_type {
OTX2_CPT_AE_UC_TYPE = 1, /* AE-MAIN */
OTX2_CPT_SE_UC_TYPE1 = 20,/* SE-MAIN - combination of 21 and 22 */
OTX2_CPT_SE_UC_TYPE2 = 21,/* Fast Path IPSec + AirCrypto */
OTX2_CPT_SE_UC_TYPE3 = 22,/*
* Hash + HMAC + FlexiCrypto + RNG +
* Full Feature IPSec + AirCrypto + Kasumi
*/
OTX2_CPT_IE_UC_TYPE1 = 30, /* IE-MAIN - combination of 31 and 32 */
OTX2_CPT_IE_UC_TYPE2 = 31, /* Fast Path IPSec */
OTX2_CPT_IE_UC_TYPE3 = 32, /*
* Hash + HMAC + FlexiCrypto + RNG +
* Full Future IPSec
*/
};
struct otx2_cpt_bitmap {
unsigned long bits[OTX2_CPT_ENGS_BITMASK_LEN];
int size;
};
struct otx2_cpt_engines {
int type;
int count;
};
/* Microcode version number */
struct otx2_cpt_ucode_ver_num {
u8 nn;
u8 xx;
u8 yy;
u8 zz;
};
struct otx2_cpt_ucode_hdr {
struct otx2_cpt_ucode_ver_num ver_num;
u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
__be32 code_length;
u32 padding[3];
};
struct otx2_cpt_ucode {
u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];/*
* ucode version in readable
* format
*/
struct otx2_cpt_ucode_ver_num ver_num;/* ucode version number */
char filename[OTX2_CPT_NAME_LENGTH];/* ucode filename */
dma_addr_t dma; /* phys address of ucode image */
void *va; /* virt address of ucode image */
u32 size; /* ucode image size */
int type; /* ucode image type SE, IE, AE or SE+IE */
};
struct otx2_cpt_uc_info_t {
struct list_head list;
struct otx2_cpt_ucode ucode;/* microcode information */
const struct firmware *fw;
};
/* Maximum and current number of engines available for all engine groups */
struct otx2_cpt_engs_available {
int max_se_cnt;
int max_ie_cnt;
int max_ae_cnt;
int se_cnt;
int ie_cnt;
int ae_cnt;
};
/* Engines reserved to an engine group */
struct otx2_cpt_engs_rsvd {
int type; /* engine type */
int count; /* number of engines attached */
int offset; /* constant offset of engine type in the bitmap */
unsigned long *bmap; /* attached engines bitmap */
struct otx2_cpt_ucode *ucode; /* ucode used by these engines */
};
struct otx2_cpt_mirror_info {
int is_ena; /*
* is mirroring enabled, it is set only for engine
* group which mirrors another engine group
*/
int idx; /*
* index of engine group which is mirrored by this
* group, set only for engine group which mirrors
* another group
*/
int ref_count; /*
* number of times this engine group is mirrored by
* other groups, this is set only for engine group
* which is mirrored by other group(s)
*/
};
struct otx2_cpt_eng_grp_info {
struct otx2_cpt_eng_grps *g; /* pointer to engine_groups structure */
/* engines attached */
struct otx2_cpt_engs_rsvd engs[OTX2_CPT_MAX_ETYPES_PER_GRP];
/* ucodes information */
struct otx2_cpt_ucode ucode[OTX2_CPT_MAX_ETYPES_PER_GRP];
/* engine group mirroring information */
struct otx2_cpt_mirror_info mirror;
int idx; /* engine group index */
bool is_enabled; /*
* is engine group enabled, engine group is enabled
* when it has engines attached and ucode loaded
*/
};
struct otx2_cpt_eng_grps {
struct otx2_cpt_eng_grp_info grp[OTX2_CPT_MAX_ENGINE_GROUPS];
struct otx2_cpt_engs_available avail;
void *obj; /* device specific data */
int engs_num; /* total number of engines supported */
u8 eng_ref_cnt[OTX2_CPT_MAX_ENGINES];/* engines reference count */
bool is_grps_created; /* Is the engine groups are already created */
};
struct otx2_cptpf_dev;
int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps);
void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps);
int otx2_cpt_create_eng_grps(struct pci_dev *pdev,
struct otx2_cpt_eng_grps *eng_grps);
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf);
int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type);
#endif /* __OTX2_CPTPF_UCODE_H */