crypto: qat - fix configuration of iov threads

The number of AE2FUNC_MAP registers is different in every QAT device
(c62x, c3xxx and dh895xcc) although the logic and the register offsets
are the same across devices.

This patch separates the logic that configures the iov threads in a
common function that takes as input the number of AE2FUNC_MAP registers
supported by a device. The function is then added to the
adf_hw_device_data structure of each device, and called with the
appropriate parameters.

The configure iov thread logic is added to a new file,
adf_gen2_hw_data.c, that is going to contain code that is shared across
QAT GEN2 devices.

Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Fiona Trahe <fiona.trahe@intel.com>
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Giovanni Cabiddu 2020-10-12 21:38:20 +01:00 committed by Herbert Xu
parent 70b9bd3929
commit c4e8428673
11 changed files with 115 additions and 59 deletions

View File

@ -3,6 +3,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
#include <adf_gen2_hw_data.h>
#include "adf_c3xxx_hw_data.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
@ -171,6 +172,13 @@ static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
return 0;
}
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS,
ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS);
}
void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c3xxx_class;
@ -199,6 +207,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->fw_mmp_name = ADF_C3XXX_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->configure_iov_threads = configure_iov_threads;
hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;

View File

@ -31,6 +31,10 @@
#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_C3XXX_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
#define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
/* Firmware Binary */
#define ADF_C3XXX_FW "qat_c3xxx.bin"
#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"

View File

@ -3,6 +3,7 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
#include <adf_gen2_hw_data.h>
#include "adf_c62x_hw_data.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
@ -181,6 +182,13 @@ static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
return 0;
}
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS,
ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS);
}
void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c62x_class;
@ -209,6 +217,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->fw_mmp_name = ADF_C62X_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->configure_iov_threads = configure_iov_threads;
hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;

View File

@ -32,6 +32,10 @@
#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_C62X_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
#define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
/* Firmware Binary */
#define ADF_C62X_FW "qat_c62x.bin"
#define ADF_C62X_MMP "qat_c62x_mmp.bin"

View File

@ -10,6 +10,7 @@ intel_qat-objs := adf_cfg.o \
adf_transport.o \
adf_admin.o \
adf_hw_arbiter.o \
adf_gen2_hw_data.o \
qat_crypto.o \
qat_algs.o \
qat_asym_algs.o \

View File

@ -125,6 +125,8 @@ struct adf_hw_device_data {
void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
const u32 **cfg);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
void (*reset_device)(struct adf_accel_dev *accel_dev);

View File

@ -0,0 +1,38 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include "adf_gen2_hw_data.h"
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
void __iomem *pmisc_addr;
struct adf_bar *pmisc;
int pmisc_id, i;
u32 reg;
pmisc_id = hw_data->get_misc_bar_id(hw_data);
pmisc = &GET_BARS(accel_dev)[pmisc_id];
pmisc_addr = pmisc->virt_addr;
/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
for (i = 0; i < num_a_regs; i++) {
reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
if (enable)
reg |= AE2FUNCTION_MAP_VALID;
else
reg &= ~AE2FUNCTION_MAP_VALID;
WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
}
/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
for (i = 0; i < num_b_regs; i++) {
reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
if (enable)
reg |= AE2FUNCTION_MAP_VALID;
else
reg &= ~AE2FUNCTION_MAP_VALID;
WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
}
}
EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2020 Intel Corporation */
#ifndef ADF_GEN2_HW_DATA_H_
#define ADF_GEN2_HW_DATA_H_
#include "adf_accel_devices.h"
/* AE to function map */
#define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
#define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
#define AE2FUNCTION_MAP_REG_SIZE 4
#define AE2FUNCTION_MAP_VALID BIT(7)
#define READ_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index) \
ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
AE2FUNCTION_MAP_REG_SIZE * (index))
#define WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_A_OFFSET + \
AE2FUNCTION_MAP_REG_SIZE * (index), value)
#define READ_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index) \
ADF_CSR_RD(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
AE2FUNCTION_MAP_REG_SIZE * (index))
#define WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
ADF_CSR_WR(pmisc_bar_addr, AE2FUNCTION_MAP_B_OFFSET + \
AE2FUNCTION_MAP_REG_SIZE * (index), value)
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
#endif

View File

@ -10,31 +10,6 @@
static struct workqueue_struct *pf2vf_resp_wq;
#define ME2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
#define ME2FUNCTION_MAP_A_NUM_REGS 96
#define ME2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
#define ME2FUNCTION_MAP_B_NUM_REGS 12
#define ME2FUNCTION_MAP_REG_SIZE 4
#define ME2FUNCTION_MAP_VALID BIT(7)
#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index) \
ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
ME2FUNCTION_MAP_REG_SIZE * index)
#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET + \
ME2FUNCTION_MAP_REG_SIZE * index, value)
#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index) \
ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
ME2FUNCTION_MAP_REG_SIZE * index)
#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET + \
ME2FUNCTION_MAP_REG_SIZE * index, value)
struct adf_pf2vf_resp {
struct work_struct pf2vf_resp_work;
struct adf_accel_vf_info *vf_info;
@ -68,12 +43,8 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
int totalvfs = pci_sriov_get_totalvfs(pdev);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_addr = pmisc->virt_addr;
struct adf_accel_vf_info *vf_info;
int i;
u32 reg;
for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
i++, vf_info++) {
@ -90,19 +61,8 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
DEFAULT_RATELIMIT_BURST);
}
/* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
reg |= ME2FUNCTION_MAP_VALID;
WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
}
/* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
reg |= ME2FUNCTION_MAP_VALID;
WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
}
/* Set Valid bits in AE Thread to PCIe Function Mapping */
hw_data->configure_iov_threads(accel_dev, true);
/* Enable VF to PF interrupts for all VFs */
adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
@ -127,12 +87,8 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_addr = pmisc->virt_addr;
int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
struct adf_accel_vf_info *vf;
u32 reg;
int i;
if (!accel_dev->pf.vf_info)
@ -145,19 +101,8 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
/* Disable VF to PF interrupts */
adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
/* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
reg &= ~ME2FUNCTION_MAP_VALID;
WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
}
/* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
reg &= ~ME2FUNCTION_MAP_VALID;
WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
}
/* Clear Valid bits in AE Thread to PCIe Function Mapping */
hw_data->configure_iov_threads(accel_dev, false);
for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
tasklet_disable(&vf->vf2pf_bh_tasklet);

View File

@ -3,6 +3,7 @@
#include <adf_accel_devices.h>
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_gen2_hw_data.h>
#include "adf_dh895xcc_hw_data.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
@ -180,6 +181,13 @@ static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
return 0;
}
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
}
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcc_class;
@ -208,6 +216,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->configure_iov_threads = configure_iov_threads;
hw_data->disable_iov = adf_disable_sriov;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;

View File

@ -36,6 +36,11 @@
#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
#define ADF_DH895XCC_VINTMSK_OFFSET(i) (0x3A000 + 0x200 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
/* FW names */
#define ADF_DH895XCC_FW "qat_895xcc.bin"
#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"