bnxt_en: update all firmware calls to use the new APIs

The conversion follows this general pattern for most of the calls:

1. The input message is changed from a stack variable initialized
using bnxt_hwrm_cmd_hdr_init() to a pointer allocated and intialized
using hwrm_req_init().

2. If we don't need to read the firmware response, the hwrm_send_message()
call is replaced with hwrm_req_send().

3. If we need to read the firmware response, the mutex lock is replaced
by hwrm_req_hold() to hold the response.  When the response is read, the
mutex unlock is replaced by hwrm_req_drop().

If additional DMA buffers are needed for firmware response data, the
hwrm_req_dma_slice() is used instead of calling dma_alloc_coherent().

Some minor refactoring is also done while doing these conversions.

v2: Fix unintialized variable warnings in __bnxt_hwrm_get_tx_rings()
and bnxt_approve_mac()

Signed-off-by: Edwin Peer <edwin.peer@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Edwin Peer 2021-08-29 03:35:04 -04:00 committed by David S. Miller
parent 3c10ed497f
commit bbf33d1d98
9 changed files with 1955 additions and 1540 deletions

File diff suppressed because it is too large Load Diff

View File

@ -39,38 +39,43 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_cfg_input req = {0};
struct hwrm_queue_pri2cos_cfg_input *req;
u8 *pri2cos;
int i;
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG);
if (rc)
return rc;
req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
pri2cos = &req.pri0_cos_queue_id;
pri2cos = &req->pri0_cos_queue_id;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 qidx;
req.enables |= cpu_to_le32(
req->enables |= cpu_to_le32(
QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = bp->q_info[qidx].queue_id;
}
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_pri2cos_qcfg_input req = {0};
int rc = 0;
struct hwrm_queue_pri2cos_qcfg_output *resp;
struct hwrm_queue_pri2cos_qcfg_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
int i;
@ -84,23 +89,26 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
ets->prio_tc[i] = tc;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
u8 max_tc)
{
struct hwrm_queue_cos2bw_cfg_input req = {0};
struct hwrm_queue_cos2bw_cfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
void *data;
int i;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
if (rc)
return rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
u8 qidx = bp->tc_to_qidx[i];
req.enables |= cpu_to_le32(
req->enables |= cpu_to_le32(
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
qidx);
@ -121,30 +129,32 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4);
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (qidx == 0) {
req.queue_id0 = cos2bw.queue_id;
req.unused_0 = 0;
req->queue_id0 = cos2bw.queue_id;
req->unused_0 = 0;
}
}
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_cos2bw_qcfg_input req = {0};
struct hwrm_queue_cos2bw_qcfg_output *resp;
struct hwrm_queue_cos2bw_qcfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
void *data;
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -168,7 +178,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
ets->tc_tx_bw[tc] = cos2bw.bw_weight;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return 0;
}
@ -230,11 +240,12 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
{
struct hwrm_queue_pfcenable_cfg_input req = {0};
struct hwrm_queue_pfcenable_cfg_input *req;
struct ieee_ets *my_ets = bp->ieee_ets;
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
bool need_q_remap = false;
int rc;
if (!my_ets)
return -EINVAL;
@ -267,38 +278,43 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
if (need_q_remap)
bnxt_queue_remap(bp, tc_mask);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req.flags = cpu_to_le32(pri_mask);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG);
if (rc)
return rc;
req->flags = cpu_to_le32(pri_mask);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
{
struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_pfcenable_qcfg_input req = {0};
struct hwrm_queue_pfcenable_qcfg_output *resp;
struct hwrm_queue_pfcenable_qcfg_input *req;
u8 pri_mask;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
pri_mask = le32_to_cpu(resp->flags);
pfc->pfc_en = pri_mask;
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return 0;
}
static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
bool add)
{
struct hwrm_fw_set_structured_data_input set = {0};
struct hwrm_fw_get_structured_data_input get = {0};
struct hwrm_fw_set_structured_data_input *set;
struct hwrm_fw_get_structured_data_input *get;
struct hwrm_struct_data_dcbx_app *fw_app;
struct hwrm_struct_hdr *data;
dma_addr_t mapping;
@ -308,19 +324,26 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
if (bp->hwrm_spec_code < 0x10601)
return 0;
rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA);
if (rc)
return rc;
hwrm_req_hold(bp, get);
hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO);
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
GFP_KERNEL);
if (!data)
return -ENOMEM;
data = hwrm_req_dma_slice(bp, get, data_len, &mapping);
if (!data) {
rc = -ENOMEM;
goto set_app_exit;
}
bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
get.dest_data_addr = cpu_to_le64(mapping);
get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
get.count = 0;
rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
get->dest_data_addr = cpu_to_le64(mapping);
get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
get->count = 0;
rc = hwrm_req_send(bp, get);
if (rc)
goto set_app_exit;
@ -366,44 +389,49 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
data->len = cpu_to_le16(sizeof(*fw_app) * n);
data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
set.src_data_addr = cpu_to_le64(mapping);
set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set.hdr_cnt = 1;
rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA);
if (rc)
goto set_app_exit;
set->src_data_addr = cpu_to_le64(mapping);
set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set->hdr_cnt = 1;
rc = hwrm_req_send(bp, set);
set_app_exit:
dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
hwrm_req_drop(bp, get); /* dropping get request and associated slice */
return rc;
}
static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
{
struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_dscp_qcaps_input req = {0};
struct hwrm_queue_dscp_qcaps_output *resp;
struct hwrm_queue_dscp_qcaps_input *req;
int rc;
bp->max_dscp_value = 0;
if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS);
if (rc)
return rc;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc) {
bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
if (bp->max_dscp_value < 0x3f)
bp->max_dscp_value = 0;
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
bool add)
{
struct hwrm_queue_dscp2pri_cfg_input req = {0};
struct hwrm_queue_dscp2pri_cfg_input *req;
struct bnxt_dscp2pri_entry *dscp2pri;
dma_addr_t mapping;
int rc;
@ -411,23 +439,25 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
if (bp->hwrm_spec_code < 0x10800)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
&mapping, GFP_KERNEL);
if (!dscp2pri)
return -ENOMEM;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG);
if (rc)
return rc;
req.src_data_addr = cpu_to_le64(mapping);
dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping);
if (!dscp2pri) {
hwrm_req_drop(bp, req);
return -ENOMEM;
}
req->src_data_addr = cpu_to_le64(mapping);
dscp2pri->dscp = app->protocol;
if (add)
dscp2pri->mask = 0x3f;
else
dscp2pri->mask = 0;
dscp2pri->pri = app->priority;
req.entry_cnt = cpu_to_le16(1);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
mapping);
req->entry_cnt = cpu_to_le16(1);
rc = hwrm_req_send(bp, req);
return rc;
}

View File

@ -355,28 +355,34 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
union devlink_param_value *nvm_cfg_ver)
{
struct hwrm_nvm_get_variable_input req = {0};
struct hwrm_nvm_get_variable_input *req;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
&data_dma_addr, GFP_KERNEL);
if (!data)
return -ENOMEM;
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
if (rc)
return rc;
req.dest_data_addr = cpu_to_le64(data_dma_addr);
req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
if (!data) {
rc = -ENOMEM;
goto exit;
}
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
hwrm_req_hold(bp, req);
req->dest_data_addr = cpu_to_le64(data_dma_addr);
req->data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
rc = hwrm_req_send_silent(bp, req);
if (!rc)
bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
BNXT_NVM_CFG_VER_BITS,
BNXT_NVM_CFG_VER_BYTES);
dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
exit:
hwrm_req_drop(bp, req);
return rc;
}
@ -563,17 +569,20 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
}
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
int msg_len, union devlink_param_value *val)
union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
int idx = 0, rc, i;
/* Get/Set NVM CFG parameter is supported only on PFs */
if (BNXT_VF(bp))
if (BNXT_VF(bp)) {
hwrm_req_drop(bp, req);
return -EPERM;
}
for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
if (nvm_params[i].id == param_id) {
@ -582,18 +591,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
}
if (i == ARRAY_SIZE(nvm_params))
if (i == ARRAY_SIZE(nvm_params)) {
hwrm_req_drop(bp, req);
return -EOPNOTSUPP;
}
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
&data_dma_addr, GFP_KERNEL);
if (!data)
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
if (!data) {
hwrm_req_drop(bp, req);
return -ENOMEM;
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
@ -602,26 +615,24 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_send_message_silent(bp, msg, msg_len,
HWRM_CMD_TIMEOUT);
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
} else {
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
rc = -EOPNOTSUPP;
}
}
dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
hwrm_req_drop(bp, req);
if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
return rc;
@ -630,14 +641,16 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct hwrm_nvm_get_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_get_variable_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
if (!rc)
if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
if (rc)
return rc;
rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
ctx->val.vbool = !ctx->val.vbool;
return rc;
@ -646,15 +659,18 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct hwrm_nvm_set_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_set_variable_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE);
if (rc)
return rc;
if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
ctx->val.vbool = !ctx->val.vbool;
return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,

View File

@ -1366,7 +1366,7 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
struct pcie_ctx_hw_stats *hw_pcie_stats;
struct hwrm_pcie_qstats_input req = {0};
struct hwrm_pcie_qstats_input *req;
struct bnxt *bp = netdev_priv(dev);
dma_addr_t hw_pcie_stats_addr;
int rc;
@ -1377,18 +1377,21 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev,
sizeof(*hw_pcie_stats),
&hw_pcie_stats_addr, GFP_KERNEL);
if (!hw_pcie_stats)
if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
return;
hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
&hw_pcie_stats_addr);
if (!hw_pcie_stats) {
hwrm_req_drop(bp, req);
return;
}
regs->version = 1;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
hwrm_req_hold(bp, req); /* hold on to slice */
req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
rc = hwrm_req_send(bp, req);
if (!rc) {
__le64 *src = (__le64 *)hw_pcie_stats;
u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
@ -1397,9 +1400,7 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
dst[i] = le64_to_cpu(src[i]);
}
mutex_unlock(&bp->hwrm_cmd_lock);
dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats,
hw_pcie_stats_addr);
hwrm_req_drop(bp, req);
}
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@ -1979,7 +1980,7 @@ static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
static int bnxt_set_fecparam(struct net_device *dev,
struct ethtool_fecparam *fecparam)
{
struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_input *req;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info;
u32 new_cfg, fec = fecparam->fec;
@ -2011,9 +2012,11 @@ static int bnxt_set_fecparam(struct net_device *dev,
}
apply_fec:
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
if (rc)
return rc;
req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
rc = hwrm_req_send(bp, req);
/* update current settings */
if (!rc) {
mutex_lock(&bp->link_lock);
@ -2107,19 +2110,22 @@ static u32 bnxt_get_link(struct net_device *dev)
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
{
struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_nvm_get_dev_info_input req = {0};
struct hwrm_nvm_get_dev_info_output *resp;
struct hwrm_nvm_get_dev_info_input *req;
int rc;
if (BNXT_VF(bp))
return -EOPNOTSUPP;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
if (rc)
return rc;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
memcpy(nvm_dev_info, resp, sizeof(*resp));
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -2132,77 +2138,67 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length);
static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
u32 dir_item_len, const u8 *data,
size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_write_input *req;
int rc;
struct hwrm_nvm_write_input req = {0};
dma_addr_t dma_handle;
u8 *kmem = NULL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
if (rc)
return rc;
req.dir_type = cpu_to_le16(dir_type);
req.dir_ordinal = cpu_to_le16(dir_ordinal);
req.dir_ext = cpu_to_le16(dir_ext);
req.dir_attr = cpu_to_le16(dir_attr);
req.dir_item_length = cpu_to_le32(dir_item_len);
if (data_len && data) {
req.dir_data_length = cpu_to_le32(data_len);
dma_addr_t dma_handle;
u8 *kmem;
kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
GFP_KERNEL);
if (!kmem)
kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
if (!kmem) {
hwrm_req_drop(bp, req);
return -ENOMEM;
memcpy(kmem, data, data_len);
req.host_src_addr = cpu_to_le64(dma_handle);
}
rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
if (kmem)
dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
req->dir_data_length = cpu_to_le32(data_len);
memcpy(kmem, data, data_len);
req->host_src_addr = cpu_to_le64(dma_handle);
}
hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT);
req->dir_type = cpu_to_le16(dir_type);
req->dir_ordinal = cpu_to_le16(dir_ordinal);
req->dir_ext = cpu_to_le16(dir_ext);
req->dir_attr = cpu_to_le16(dir_attr);
req->dir_item_length = cpu_to_le32(dir_item_len);
rc = hwrm_req_send(bp, req);
if (rc == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}
static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
const u8 *data, size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr,
0, data, data_len);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
u8 self_reset, u8 flags)
{
struct hwrm_fw_reset_input req = {0};
struct bnxt *bp = netdev_priv(dev);
struct hwrm_fw_reset_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
if (rc)
return rc;
req.embedded_proc_type = proc_type;
req.selfrst_status = self_reset;
req.flags = flags;
req->embedded_proc_type = proc_type;
req->selfrst_status = self_reset;
req->flags = flags;
if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
rc = hwrm_send_message_silent(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
rc = hwrm_req_send_silent(bp, req);
} else {
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, req);
if (rc == -EACCES)
bnxt_print_admin_err(bp);
}
@ -2340,7 +2336,7 @@ static int bnxt_flash_firmware(struct net_device *dev,
return -EINVAL;
}
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw_data, fw_size);
0, 0, 0, fw_data, fw_size);
if (rc == 0) /* Firmware update successful */
rc = bnxt_firmware_reset(dev, dir_type);
@ -2393,7 +2389,7 @@ static int bnxt_flash_microcode(struct net_device *dev,
return -EINVAL;
}
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw_data, fw_size);
0, 0, 0, fw_data, fw_size);
return rc;
}
@ -2459,7 +2455,7 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw->data, fw->size);
0, 0, 0, fw->data, fw->size);
release_firmware(fw);
return rc;
}
@ -2471,21 +2467,23 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
u32 install_type)
{
struct hwrm_nvm_install_update_input install = {0};
struct hwrm_nvm_install_update_output resp = {0};
struct hwrm_nvm_modify_input modify = {0};
struct hwrm_nvm_install_update_input *install;
struct hwrm_nvm_install_update_output *resp;
struct hwrm_nvm_modify_input *modify;
struct bnxt *bp = netdev_priv(dev);
bool defrag_attempted = false;
dma_addr_t dma_handle;
u8 *kmem = NULL;
u32 modify_len;
u32 item_len;
int rc = 0;
u16 index;
int rc;
bnxt_hwrm_fw_set_time(bp);
bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
if (rc)
return rc;
/* Try allocating a large DMA buffer first. Older fw will
* cause excessive NVRAM erases when using small blocks.
@ -2493,22 +2491,33 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
modify_len = roundup_pow_of_two(fw->size);
modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
while (1) {
kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len,
&dma_handle, GFP_KERNEL);
kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
if (!kmem && modify_len > PAGE_SIZE)
modify_len /= 2;
else
break;
}
if (!kmem)
if (!kmem) {
hwrm_req_drop(bp, modify);
return -ENOMEM;
}
modify.host_src_addr = cpu_to_le64(dma_handle);
rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
if (rc) {
hwrm_req_drop(bp, modify);
return rc;
}
bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT);
hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT);
hwrm_req_hold(bp, modify);
modify->host_src_addr = cpu_to_le64(dma_handle);
resp = hwrm_req_hold(bp, install);
if ((install_type & 0xffff) == 0)
install_type >>= 16;
install.install_type = cpu_to_le32(install_type);
install->install_type = cpu_to_le32(install_type);
do {
u32 copied = 0, len = modify_len;
@ -2528,76 +2537,69 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
break;
}
modify.dir_idx = cpu_to_le16(index);
modify->dir_idx = cpu_to_le16(index);
if (fw->size > modify_len)
modify.flags = BNXT_NVM_MORE_FLAG;
modify->flags = BNXT_NVM_MORE_FLAG;
while (copied < fw->size) {
u32 balance = fw->size - copied;
if (balance <= modify_len) {
len = balance;
if (copied)
modify.flags |= BNXT_NVM_LAST_FLAG;
modify->flags |= BNXT_NVM_LAST_FLAG;
}
memcpy(kmem, fw->data + copied, len);
modify.len = cpu_to_le32(len);
modify.offset = cpu_to_le32(copied);
rc = hwrm_send_message(bp, &modify, sizeof(modify),
FLASH_PACKAGE_TIMEOUT);
modify->len = cpu_to_le32(len);
modify->offset = cpu_to_le32(copied);
rc = hwrm_req_send(bp, modify);
if (rc)
goto pkg_abort;
copied += len;
}
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message_silent(bp, &install, sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp));
rc = hwrm_req_send_silent(bp, install);
if (defrag_attempted) {
/* We have tried to defragment already in the previous
* iteration. Return with the result for INSTALL_UPDATE
*/
mutex_unlock(&bp->hwrm_cmd_lock);
break;
}
if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
install.flags =
install->flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = _hwrm_send_message_silent(bp, &install,
sizeof(install),
INSTALL_PACKAGE_TIMEOUT);
memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp));
rc = hwrm_req_send_silent(bp, install);
if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
/* FW has cleared NVM area, driver will create
* UPDATE directory and try the flash again
*/
defrag_attempted = true;
install.flags = 0;
rc = __bnxt_flash_nvram(bp->dev,
install->flags = 0;
rc = bnxt_flash_nvram(bp->dev,
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
0, 0, item_len, NULL,
0);
0, 0, item_len, NULL, 0);
} else if (rc) {
netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
}
} else if (rc) {
netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
} while (defrag_attempted && !rc);
pkg_abort:
dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle);
if (resp.result) {
hwrm_req_drop(bp, modify);
hwrm_req_drop(bp, install);
if (resp->result) {
netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
(s8)resp.result, (int)resp.problem_item);
(s8)resp->result, (int)resp->problem_item);
rc = -ENOPKG;
}
if (rc == -EACCES)
@ -2643,20 +2645,22 @@ static int bnxt_flash_device(struct net_device *dev,
static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
{
struct hwrm_nvm_get_dir_info_output *output;
struct hwrm_nvm_get_dir_info_input *req;
struct bnxt *bp = netdev_priv(dev);
int rc;
struct hwrm_nvm_get_dir_info_input req = {0};
struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
output = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
*entries = le32_to_cpu(output->entries);
*length = le32_to_cpu(output->entry_length);
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -2682,7 +2686,7 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
u8 *buf;
size_t buflen;
dma_addr_t dma_handle;
struct hwrm_nvm_get_dir_entries_input req = {0};
struct hwrm_nvm_get_dir_entries_input *req;
rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
if (rc != 0)
@ -2700,20 +2704,23 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
len -= 2;
memset(data, 0xff, len);
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
if (rc)
return rc;
buflen = dir_entries * entry_length;
buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
GFP_KERNEL);
buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
if (!buf) {
netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
(unsigned)buflen);
hwrm_req_drop(bp, req);
return -ENOMEM;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
req.host_dest_addr = cpu_to_le64(dma_handle);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->host_dest_addr = cpu_to_le64(dma_handle);
hwrm_req_hold(bp, req); /* hold the slice */
rc = hwrm_req_send(bp, req);
if (rc == 0)
memcpy(data, buf, len > buflen ? buflen : len);
dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
hwrm_req_drop(bp, req);
return rc;
}
@ -2724,28 +2731,31 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
int rc;
u8 *buf;
dma_addr_t dma_handle;
struct hwrm_nvm_read_input req = {0};
struct hwrm_nvm_read_input *req;
if (!length)
return -EINVAL;
buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
GFP_KERNEL);
rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
if (rc)
return rc;
buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
if (!buf) {
netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
(unsigned)length);
hwrm_req_drop(bp, req);
return -ENOMEM;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
req.host_dest_addr = cpu_to_le64(dma_handle);
req.dir_idx = cpu_to_le16(index);
req.offset = cpu_to_le32(offset);
req.len = cpu_to_le32(length);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->host_dest_addr = cpu_to_le64(dma_handle);
req->dir_idx = cpu_to_le16(index);
req->offset = cpu_to_le32(offset);
req->len = cpu_to_le32(length);
hwrm_req_hold(bp, req); /* hold the slice */
rc = hwrm_req_send(bp, req);
if (rc == 0)
memcpy(data, buf, length);
dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
hwrm_req_drop(bp, req);
return rc;
}
@ -2753,20 +2763,23 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length)
{
struct hwrm_nvm_find_dir_entry_output *output;
struct hwrm_nvm_find_dir_entry_input *req;
struct bnxt *bp = netdev_priv(dev);
int rc;
struct hwrm_nvm_find_dir_entry_input req = {0};
struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
req.enables = 0;
req.dir_idx = 0;
req.dir_type = cpu_to_le16(type);
req.dir_ordinal = cpu_to_le16(ordinal);
req.dir_ext = cpu_to_le16(ext);
req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
if (rc)
return rc;
req->enables = 0;
req->dir_idx = 0;
req->dir_type = cpu_to_le16(type);
req->dir_ordinal = cpu_to_le16(ordinal);
req->dir_ext = cpu_to_le16(ext);
req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
output = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (rc == 0) {
if (index)
*index = le16_to_cpu(output->dir_idx);
@ -2775,7 +2788,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
if (data_length)
*data_length = le32_to_cpu(output->dir_data_length);
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -2870,12 +2883,16 @@ static int bnxt_get_eeprom(struct net_device *dev,
static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
{
struct hwrm_nvm_erase_dir_entry_input *req;
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_erase_dir_entry_input req = {0};
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
req.dir_idx = cpu_to_le16(index);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
if (rc)
return rc;
req->dir_idx = cpu_to_le16(index);
return hwrm_req_send(bp, req);
}
static int bnxt_set_eeprom(struct net_device *dev,
@ -2915,7 +2932,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
ordinal = eeprom->offset >> 16;
attr = eeprom->offset & 0xffff;
return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
eeprom->len);
}
@ -3003,31 +3020,33 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
u16 page_number, u16 start_addr,
u16 data_length, u8 *buf)
{
struct hwrm_port_phy_i2c_read_input req = {0};
struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
struct hwrm_port_phy_i2c_read_output *output;
struct hwrm_port_phy_i2c_read_input *req;
int rc, byte_offset = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
req.i2c_slave_addr = i2c_addr;
req.page_number = cpu_to_le16(page_number);
req.port_id = cpu_to_le16(bp->pf.port_id);
rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
if (rc)
return rc;
output = hwrm_req_hold(bp, req);
req->i2c_slave_addr = i2c_addr;
req->page_number = cpu_to_le16(page_number);
req->port_id = cpu_to_le16(bp->pf.port_id);
do {
u16 xfer_size;
xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
data_length -= xfer_size;
req.page_offset = cpu_to_le16(start_addr + byte_offset);
req.data_length = xfer_size;
req.enables = cpu_to_le32(start_addr + byte_offset ?
req->page_offset = cpu_to_le16(start_addr + byte_offset);
req->data_length = xfer_size;
req->enables = cpu_to_le32(start_addr + byte_offset ?
PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, req);
if (!rc)
memcpy(buf + byte_offset, output->data, xfer_size);
mutex_unlock(&bp->hwrm_cmd_lock);
byte_offset += xfer_size;
} while (!rc && data_length > 0);
hwrm_req_drop(bp, req);
return rc;
}
@ -3136,13 +3155,13 @@ static int bnxt_nway_reset(struct net_device *dev)
static int bnxt_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct hwrm_port_led_cfg_input req = {0};
struct hwrm_port_led_cfg_input *req;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_pf_info *pf = &bp->pf;
struct bnxt_led_cfg *led_cfg;
u8 led_state;
__le16 duration;
int i;
int rc, i;
if (!bp->num_leds || BNXT_VF(bp))
return -EOPNOTSUPP;
@ -3156,27 +3175,35 @@ static int bnxt_set_phys_id(struct net_device *dev,
} else {
return -EINVAL;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
req.port_id = cpu_to_le16(pf->port_id);
req.num_leds = bp->num_leds;
led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
if (rc)
return rc;
req->port_id = cpu_to_le16(pf->port_id);
req->num_leds = bp->num_leds;
led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
for (i = 0; i < bp->num_leds; i++, led_cfg++) {
req.enables |= BNXT_LED_DFLT_ENABLES(i);
req->enables |= BNXT_LED_DFLT_ENABLES(i);
led_cfg->led_id = bp->leds[i].led_id;
led_cfg->led_state = led_state;
led_cfg->led_blink_on = duration;
led_cfg->led_blink_off = duration;
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
{
struct hwrm_selftest_irq_input req = {0};
struct hwrm_selftest_irq_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
if (rc)
return rc;
req->cmpl_ring = cpu_to_le16(cmpl_ring);
return hwrm_req_send(bp, req);
}
static int bnxt_test_irq(struct bnxt *bp)
@ -3196,31 +3223,37 @@ static int bnxt_test_irq(struct bnxt *bp)
static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
{
struct hwrm_port_mac_cfg_input req = {0};
struct hwrm_port_mac_cfg_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
if (rc)
return rc;
req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
if (enable)
req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
else
req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
return hwrm_req_send(bp, req);
}
static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
{
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_port_phy_qcaps_input req = {0};
struct hwrm_port_phy_qcaps_output *resp;
struct hwrm_port_phy_qcaps_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
if (rc)
return rc;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
*force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -3255,7 +3288,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
req->force_link_speed = cpu_to_le16(fw_speed);
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, req);
req->flags = 0;
req->force_link_speed = cpu_to_le16(0);
return rc;
@ -3263,21 +3296,29 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
{
struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
if (rc)
return rc;
/* prevent bnxt_disable_an_for_lpbk() from consuming the request */
hwrm_req_hold(bp, req);
if (enable) {
bnxt_disable_an_for_lpbk(bp, &req);
bnxt_disable_an_for_lpbk(bp, req);
if (ext)
req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
else
req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
} else {
req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
}
req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
rc = hwrm_req_send(bp, req);
hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@ -3395,17 +3436,21 @@ static int bnxt_run_loopback(struct bnxt *bp)
static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
{
struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_selftest_exec_input req = {0};
struct hwrm_selftest_exec_output *resp;
struct hwrm_selftest_exec_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
resp->test_success = 0;
req.flags = test_mask;
rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
if (rc)
return rc;
hwrm_req_timeout(bp, req, bp->test_info->timeout);
req->flags = test_mask;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
*test_results = resp->test_success;
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -3564,32 +3609,34 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return 0;
}
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
struct bnxt_hwrm_dbg_dma_info *info)
{
struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr;
struct hwrm_dbg_cmn_input *cmn_req = msg;
__le16 *seq_ptr = msg + info->seq_off;
struct hwrm_dbg_cmn_output *cmn_resp;
u16 seq = 0, len, segs_off;
void *resp = cmn_resp;
dma_addr_t dma_handle;
void *dma_buf, *resp;
int rc, off = 0;
void *dma_buf;
dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle,
GFP_KERNEL);
if (!dma_buf)
dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
if (!dma_buf) {
hwrm_req_drop(bp, msg);
return -ENOMEM;
}
hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
cmn_resp = hwrm_req_hold(bp, msg);
resp = cmn_resp;
segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
total_segments);
cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
mutex_lock(&bp->hwrm_cmd_lock);
while (1) {
*seq_ptr = cpu_to_le16(seq);
rc = _hwrm_send_message(bp, msg, msg_len,
HWRM_COREDUMP_TIMEOUT);
rc = hwrm_req_send(bp, msg);
if (rc)
break;
@ -3633,26 +3680,27 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len,
seq++;
off += len;
}
mutex_unlock(&bp->hwrm_cmd_lock);
dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle);
hwrm_req_drop(bp, msg);
return rc;
}
static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
struct bnxt_coredump *coredump)
{
struct hwrm_dbg_coredump_list_input req = {0};
struct bnxt_hwrm_dbg_dma_info info = {NULL};
struct hwrm_dbg_coredump_list_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
if (rc)
return rc;
info.dma_len = COREDUMP_LIST_BUF_LEN;
info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
data_len);
rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
if (!rc) {
coredump->data = info.dest_buf;
coredump->data_size = info.dest_buf_size;
@ -3664,26 +3712,34 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input req = {0};
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1);
req.component_id = cpu_to_le16(component_id);
req.segment_id = cpu_to_le16(segment_id);
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
if (rc)
return rc;
return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
u16 segment_id, u32 *seg_len,
void *buf, u32 buf_len, u32 offset)
{
struct hwrm_dbg_coredump_retrieve_input req = {0};
struct hwrm_dbg_coredump_retrieve_input *req;
struct bnxt_hwrm_dbg_dma_info info = {NULL};
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1);
req.component_id = cpu_to_le16(component_id);
req.segment_id = cpu_to_le16(segment_id);
rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
if (rc)
return rc;
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
@ -3696,7 +3752,7 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
info.seg_start = offset;
}
rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info);
rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
if (!rc)
*seg_len = info.dest_buf_size;
@ -3975,8 +4031,8 @@ static int bnxt_get_ts_info(struct net_device *dev,
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_selftest_qlist_input req = {0};
struct hwrm_selftest_qlist_output *resp;
struct hwrm_selftest_qlist_input *req;
struct bnxt_test_info *test_info;
struct net_device *dev = bp->dev;
int i, rc;
@ -3988,19 +4044,22 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
test_info = bp->test_info;
if (!test_info) {
test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
if (!test_info)
return;
bp->test_info = test_info;
}
if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
return;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (rc)
goto ethtool_init_exit;
test_info = bp->test_info;
if (!test_info)
test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
if (!test_info)
goto ethtool_init_exit;
bp->test_info = test_info;
bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
if (bp->num_tests > BNXT_MAX_TEST)
bp->num_tests = BNXT_MAX_TEST;
@ -4034,7 +4093,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
}
ethtool_init_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
}
static void bnxt_get_eth_phy_stats(struct net_device *dev,

View File

@ -86,24 +86,28 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
{
struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_port_ts_query_input req = {0};
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1);
req.flags = cpu_to_le32(flags);
rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY);
if (rc)
return rc;
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
}
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
*ts = le64_to_cpu(resp->ptp_msg_ts);
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -144,14 +148,17 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct hwrm_port_mac_cfg_input req = {0};
struct hwrm_port_mac_cfg_input *req;
struct bnxt *bp = ptp->bp;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
req.ptp_freq_adj_ppb = cpu_to_le32(ppb);
req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
if (rc)
return rc;
req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
rc = hwrm_req_send(ptp->bp, req);
if (rc)
netdev_err(ptp->bp->dev,
"ptp adjfreq failed. rc = %d\n", rc);
@ -187,7 +194,7 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
{
struct hwrm_func_ptp_pin_cfg_input req = {0};
struct hwrm_func_ptp_pin_cfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u8 state = usage != BNXT_PPS_PIN_NONE;
u8 *pin_state, *pin_usg;
@ -199,18 +206,21 @@ static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
return -EOPNOTSUPP;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_CFG, -1, -1);
rc = hwrm_req_init(ptp->bp, req, HWRM_FUNC_PTP_PIN_CFG);
if (rc)
return rc;
enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE |
FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2);
req.enables = cpu_to_le32(enables);
req->enables = cpu_to_le32(enables);
pin_state = &req.pin0_state;
pin_usg = &req.pin0_usage;
pin_state = &req->pin0_state;
pin_usg = &req->pin0_usage;
*(pin_state + (pin * 2)) = state;
*(pin_usg + (pin * 2)) = usage;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(ptp->bp, req);
if (rc)
return rc;
@ -222,12 +232,16 @@ static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
{
struct hwrm_func_ptp_cfg_input req = {0};
struct hwrm_func_ptp_cfg_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
req.enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
req.ptp_pps_event = event;
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
if (rc)
return rc;
req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
req->ptp_pps_event = event;
return hwrm_req_send(bp, req);
}
void bnxt_ptp_reapply_pps(struct bnxt *bp)
@ -278,7 +292,7 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
struct ptp_clock_request *rq)
{
struct hwrm_func_ptp_cfg_input req = {0};
struct hwrm_func_ptp_cfg_input *req;
struct bnxt *bp = ptp->bp;
struct timespec64 ts;
u64 target_ns, delta;
@ -293,20 +307,22 @@ static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
if (rc)
return rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
if (rc)
return rc;
enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD |
FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP |
FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE;
req.enables = cpu_to_le16(enables);
req.ptp_pps_event = 0;
req.ptp_freq_adj_dll_source = 0;
req.ptp_freq_adj_dll_phase = 0;
req.ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
req.ptp_freq_adj_ext_up = 0;
req.ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
req->enables = cpu_to_le16(enables);
req->ptp_pps_event = 0;
req->ptp_freq_adj_dll_source = 0;
req->ptp_freq_adj_dll_phase = 0;
req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
req->ptp_freq_adj_ext_up = 0;
req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
@ -363,11 +379,15 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct hwrm_port_mac_cfg_input req = {0};
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct hwrm_port_mac_cfg_input *req;
u32 flags = 0;
int rc;
rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
if (rc)
return rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
if (ptp->rx_filter)
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
else
@ -376,11 +396,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
else
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
req.flags = cpu_to_le32(flags);
req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
req->flags = cpu_to_le32(flags);
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@ -631,11 +651,10 @@ static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin,
return -EOPNOTSUPP;
}
/* bp->hwrm_cmd_lock held by the caller */
static int bnxt_ptp_pps_init(struct bnxt *bp)
{
struct hwrm_func_ptp_pin_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_ptp_pin_qcfg_input req = {0};
struct hwrm_func_ptp_pin_qcfg_output *resp;
struct hwrm_func_ptp_pin_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct ptp_clock_info *ptp_info;
struct bnxt_pps *pps_info;
@ -643,11 +662,16 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
u32 i, rc;
/* Query current/default PIN CFG */
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_QCFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_QCFG);
if (rc)
return rc;
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc || !resp->num_pins)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc || !resp->num_pins) {
hwrm_req_drop(bp, req);
return -EOPNOTSUPP;
}
ptp_info = &ptp->ptp_info;
pps_info = &ptp->pps_info;
@ -656,8 +680,10 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
ptp_info->pin_config = kcalloc(ptp_info->n_pins,
sizeof(*ptp_info->pin_config),
GFP_KERNEL);
if (!ptp_info->pin_config)
if (!ptp_info->pin_config) {
hwrm_req_drop(bp, req);
return -ENOMEM;
}
/* Report the TSIO capability to kernel */
pin_usg = &resp->pin0_usage;
@ -675,6 +701,7 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
pps_info->pins[i].usage = *pin_usg;
}
hwrm_req_drop(bp, req);
/* Only 1 each of ext_ts and per_out pins is available in HW */
ptp_info->n_ext_ts = 1;

View File

@ -27,21 +27,26 @@
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf, u16 event_id)
{
struct hwrm_fwd_async_event_cmpl_input req = {0};
struct hwrm_fwd_async_event_cmpl_input *req;
struct hwrm_async_event_cmpl *async_cmpl;
int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
if (rc)
goto exit;
if (vf)
req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
else
/* broadcast this async event to all VFs */
req.encap_async_event_target_id = cpu_to_le16(0xffff);
async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
req->encap_async_event_target_id = cpu_to_le16(0xffff);
async_cmpl =
(struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl;
async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, req);
exit:
if (rc)
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
@ -63,10 +68,10 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vf_info *vf;
struct hwrm_func_cfg_input *req;
bool old_setting = false;
struct bnxt_vf_info *vf;
u32 func_flags;
int rc;
@ -90,36 +95,38 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(func_flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->flags = cpu_to_le32(func_flags);
rc = hwrm_req_send(bp, req);
if (!rc) {
if (setting)
vf->flags |= BNXT_VF_SPOOFCHK;
else
vf->flags &= ~BNXT_VF_SPOOFCHK;
}
}
return rc;
}
static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
{
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp;
struct hwrm_func_qcfg_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
if (rc)
return rc;
}
req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
vf->func_qcfg_flags = le16_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
hwrm_req_drop(bp, req);
return rc;
}
bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
@ -133,18 +140,22 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
{
struct hwrm_func_cfg_input req = {0};
struct hwrm_func_cfg_input *req;
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (rc)
return rc;
req->fid = cpu_to_le16(vf->fw_fid);
if (vf->flags & BNXT_VF_TRUST)
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
else
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
return hwrm_req_send(bp, req);
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
@ -204,8 +215,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
struct hwrm_func_cfg_input *req;
struct bnxt_vf_info *vf;
int rc;
@ -221,19 +232,23 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
}
vf = &bp->pf.vf[vf_id];
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (rc)
return rc;
memcpy(vf->mac_addr, mac, ETH_ALEN);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->fid = cpu_to_le16(vf->fw_fid);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
return hwrm_req_send(bp, req);
}
int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
struct hwrm_func_cfg_input *req;
struct bnxt_vf_info *vf;
u16 vlan_tag;
int rc;
@ -259,21 +274,23 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (vlan_tag == vf->vlan)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
req.dflt_vlan = cpu_to_le16(vlan_tag);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
rc = hwrm_req_send(bp, req);
if (!rc)
vf->vlan = vlan_tag;
}
return rc;
}
int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
int max_tx_rate)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
struct hwrm_func_cfg_input *req;
struct bnxt_vf_info *vf;
u32 pf_link_speed;
int rc;
@ -297,17 +314,19 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
}
if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(max_tx_rate);
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
req.min_bw = cpu_to_le32(min_tx_rate);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
FUNC_CFG_REQ_ENABLES_MIN_BW);
req->max_bw = cpu_to_le32(max_tx_rate);
req->min_bw = cpu_to_le32(min_tx_rate);
rc = hwrm_req_send(bp, req);
if (!rc) {
vf->min_tx_rate = min_tx_rate;
vf->max_tx_rate = max_tx_rate;
}
}
return rc;
}
@ -359,21 +378,22 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
{
int i, rc = 0;
struct hwrm_func_vf_resc_free_input *req;
struct bnxt_pf_info *pf = &bp->pf;
struct hwrm_func_vf_resc_free_input req = {0};
int i, rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
hwrm_req_hold(bp, req);
for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
req.vf_id = cpu_to_le16(i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
req->vf_id = cpu_to_le16(i);
rc = hwrm_req_send(bp, req);
if (rc)
break;
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -447,51 +467,55 @@ static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
{
struct hwrm_func_buf_rgtr_input req = {0};
struct hwrm_func_buf_rgtr_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
if (rc)
return rc;
req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
/* Caller holds bp->hwrm_cmd_lock mutex lock */
static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
{
struct hwrm_func_cfg_input req = {0};
struct hwrm_func_cfg_input *req;
struct bnxt_vf_info *vf;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (rc)
return rc;
vf = &bp->pf.vf[vf_id];
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
req->fid = cpu_to_le16(vf->fw_fid);
if (is_valid_ether_addr(vf->mac_addr)) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN);
}
if (vf->vlan) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
req.dflt_vlan = cpu_to_le16(vf->vlan);
req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
req->dflt_vlan = cpu_to_le16(vf->vlan);
}
if (vf->max_tx_rate) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(vf->max_tx_rate);
#ifdef HAVE_IFLA_TX_RATE
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
req.min_bw = cpu_to_le32(vf->min_tx_rate);
#endif
req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
FUNC_CFG_REQ_ENABLES_MIN_BW);
req->max_bw = cpu_to_le32(vf->max_tx_rate);
req->min_bw = cpu_to_le32(vf->min_tx_rate);
}
if (vf->flags & BNXT_VF_TRUST)
req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
/* Only called by PF to reserve resources for VFs, returns actual number of
@ -499,7 +523,7 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
*/
static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
struct hwrm_func_vf_resource_cfg_input req = {0};
struct hwrm_func_vf_resource_cfg_input *req;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
@ -508,7 +532,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
u16 vf_msix = 0;
u16 vf_rss;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
if (rc)
return rc;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
@ -527,21 +553,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
min = 0;
req.min_rsscos_ctx = cpu_to_le16(min);
req->min_rsscos_ctx = cpu_to_le16(min);
}
if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
req.min_cmpl_rings = cpu_to_le16(min);
req.min_tx_rings = cpu_to_le16(min);
req.min_rx_rings = cpu_to_le16(min);
req.min_l2_ctxs = cpu_to_le16(min);
req.min_vnics = cpu_to_le16(min);
req.min_stat_ctx = cpu_to_le16(min);
req->min_cmpl_rings = cpu_to_le16(min);
req->min_tx_rings = cpu_to_le16(min);
req->min_rx_rings = cpu_to_le16(min);
req->min_l2_ctxs = cpu_to_le16(min);
req->min_vnics = cpu_to_le16(min);
req->min_stat_ctx = cpu_to_le16(min);
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
req.min_hw_ring_grps = cpu_to_le16(min);
req->min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
@ -551,56 +577,57 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_ring_grps /= num_vfs;
vf_rss /= num_vfs;
req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.min_tx_rings = cpu_to_le16(vf_tx_rings);
req.min_rx_rings = cpu_to_le16(vf_rx_rings);
req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req.min_vnics = cpu_to_le16(vf_vnics);
req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.min_rsscos_ctx = cpu_to_le16(vf_rss);
req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req->min_tx_rings = cpu_to_le16(vf_tx_rings);
req->min_rx_rings = cpu_to_le16(vf_rx_rings);
req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->min_vnics = cpu_to_le16(vf_vnics);
req->min_stat_ctx = cpu_to_le16(vf_stat_ctx);
req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req->min_rsscos_ctx = cpu_to_le16(vf_rss);
}
req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.max_tx_rings = cpu_to_le16(vf_tx_rings);
req.max_rx_rings = cpu_to_le16(vf_rx_rings);
req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.max_rsscos_ctx = cpu_to_le16(vf_rss);
req->max_cmpl_rings = cpu_to_le16(vf_cp_rings);
req->max_tx_rings = cpu_to_le16(vf_tx_rings);
req->max_rx_rings = cpu_to_le16(vf_rx_rings);
req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->max_vnics = cpu_to_le16(vf_vnics);
req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req->max_rsscos_ctx = cpu_to_le16(vf_rss);
if (bp->flags & BNXT_FLAG_CHIP_P5)
req.max_msix = cpu_to_le16(vf_msix / num_vfs);
req->max_msix = cpu_to_le16(vf_msix / num_vfs);
mutex_lock(&bp->hwrm_cmd_lock);
hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
if (reset)
__bnxt_set_vf_params(bp, i);
req.vf_id = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
req->vf_id = cpu_to_le16(pf->first_vf_id + i);
rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = pf->first_vf_id + i;
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (pf->active_vfs) {
u16 n = pf->active_vfs;
hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
n;
hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n;
hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n;
hw_resc->max_hw_ring_grps -=
le16_to_cpu(req->min_hw_ring_grps) * n;
hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n;
hw_resc->max_rsscos_ctxs -=
le16_to_cpu(req->min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
hw_resc->max_irqs -= vf_msix * n;
rc = pf->active_vfs;
}
hwrm_req_drop(bp, req);
return rc;
}
@ -609,15 +636,18 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
*/
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{
u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
struct hwrm_func_cfg_input *req;
int total_vf_tx_rings = 0;
u16 vf_ring_grps;
u32 mtu, i;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
if (rc)
return rc;
/* Remaining rings are distributed equally amongs VF's for now */
vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
@ -633,7 +663,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
FUNC_CFG_REQ_ENABLES_MRU |
FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
@ -645,38 +675,37 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
req.mru = cpu_to_le16(mtu);
req.admin_mtu = cpu_to_le16(mtu);
req->mru = cpu_to_le16(mtu);
req->admin_mtu = cpu_to_le16(mtu);
req.num_rsscos_ctxs = cpu_to_le16(1);
req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.num_tx_rings = cpu_to_le16(vf_tx_rings);
req.num_rx_rings = cpu_to_le16(vf_rx_rings);
req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.num_l2_ctxs = cpu_to_le16(4);
req->num_rsscos_ctxs = cpu_to_le16(1);
req->num_cmpl_rings = cpu_to_le16(vf_cp_rings);
req->num_tx_rings = cpu_to_le16(vf_tx_rings);
req->num_rx_rings = cpu_to_le16(vf_rx_rings);
req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req->num_l2_ctxs = cpu_to_le16(4);
req.num_vnics = cpu_to_le16(vf_vnics);
req->num_vnics = cpu_to_le16(vf_vnics);
/* FIXME spec currently uses 1 bit for stats ctx */
req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
mutex_lock(&bp->hwrm_cmd_lock);
hwrm_req_hold(bp, req);
for (i = 0; i < num_vfs; i++) {
int vf_tx_rsvd = vf_tx_rings;
req.fid = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
req->fid = cpu_to_le16(pf->first_vf_id + i);
rc = hwrm_req_send(bp, req);
if (rc)
break;
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = le16_to_cpu(req.fid);
pf->vf[i].fw_fid = le16_to_cpu(req->fid);
rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
&vf_tx_rsvd);
if (rc)
break;
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
if (pf->active_vfs) {
hw_resc->max_tx_rings -= total_vf_tx_rings;
hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
@ -894,23 +923,24 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
void *encap_resp, __le64 encap_resp_addr,
__le16 encap_resp_cpr, u32 msg_size)
{
int rc = 0;
struct hwrm_fwd_resp_input req = {0};
struct hwrm_fwd_resp_input *req;
int rc;
if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_len = cpu_to_le16(msg_size);
req.encap_resp_addr = encap_resp_addr;
req.encap_resp_cmpl_ring = encap_resp_cpr;
memcpy(req.encap_resp, encap_resp, msg_size);
req->target_id = cpu_to_le16(vf->fw_fid);
req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
req->encap_resp_len = cpu_to_le16(msg_size);
req->encap_resp_addr = encap_resp_addr;
req->encap_resp_cmpl_ring = encap_resp_cpr;
memcpy(req->encap_resp, encap_resp, msg_size);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, req);
}
if (rc)
netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
return rc;
@ -919,19 +949,21 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
u32 msg_size)
{
int rc = 0;
struct hwrm_reject_fwd_resp_input req = {0};
struct hwrm_reject_fwd_resp_input *req;
int rc;
if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
if (!rc) {
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
req->target_id = cpu_to_le16(vf->fw_fid);
req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, req);
}
if (rc)
netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
return rc;
@ -940,19 +972,21 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
u32 msg_size)
{
int rc = 0;
struct hwrm_exec_fwd_resp_input req = {0};
struct hwrm_exec_fwd_resp_input *req;
int rc;
if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
if (!rc) {
/* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
req->target_id = cpu_to_le16(vf->fw_fid);
req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, req);
}
if (rc)
netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
return rc;
@ -1119,7 +1153,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input req = {0};
struct hwrm_func_vf_cfg_input *req;
int rc = 0;
if (!BNXT_VF(bp))
@ -1130,10 +1164,16 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
rc = -EADDRNOTAVAIL;
goto mac_done;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
if (rc)
goto mac_done;
req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
if (!strict)
hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
rc = hwrm_req_send(bp, req);
mac_done:
if (rc && strict) {
rc = -EADDRNOTAVAIL;
@ -1146,15 +1186,17 @@ mac_done:
void bnxt_update_vf_mac(struct bnxt *bp)
{
struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
bool inform_pf = false;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff);
if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
return;
mutex_lock(&bp->hwrm_cmd_lock);
if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
req->fid = cpu_to_le16(0xffff);
resp = hwrm_req_hold(bp, req);
if (hwrm_req_send(bp, req))
goto update_vf_mac_exit;
/* Store MAC address from the firmware. There are 2 cases:
@ -1177,7 +1219,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
if (is_valid_ether_addr(bp->vf.mac_addr))
memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
if (inform_pf)
bnxt_approve_mac(bp, bp->dev->dev_addr, false);
}

View File

@ -503,16 +503,18 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
struct hwrm_cfa_flow_free_input req = { 0 };
struct hwrm_cfa_flow_free_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE);
if (!rc) {
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
req.ext_flow_handle = flow_node->ext_flow_handle;
req->ext_flow_handle = flow_node->ext_flow_handle;
else
req.flow_handle = flow_node->flow_handle;
req->flow_handle = flow_node->flow_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, req);
}
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
@ -588,20 +590,22 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
struct bnxt_tc_actions *actions = &flow->actions;
struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
struct hwrm_cfa_flow_alloc_input req = { 0 };
struct hwrm_cfa_flow_alloc_output *resp;
struct hwrm_cfa_flow_alloc_input *req;
u16 flow_flags = 0, action_flags = 0;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC);
if (rc)
return rc;
req.src_fid = cpu_to_le16(flow->src_fid);
req.ref_flow_handle = ref_flow_handle;
req->src_fid = cpu_to_le16(flow->src_fid);
req->ref_flow_handle = ref_flow_handle;
if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac,
ETH_ALEN);
memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac,
ETH_ALEN);
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
@ -616,71 +620,71 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
/* L3 source rewrite */
req.nat_ip_address[0] =
req->nat_ip_address[0] =
actions->nat.l3.ipv4.saddr.s_addr;
/* L4 source port */
if (actions->nat.l4.ports.sport)
req.nat_port =
req->nat_port =
actions->nat.l4.ports.sport;
} else {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
/* L3 destination rewrite */
req.nat_ip_address[0] =
req->nat_ip_address[0] =
actions->nat.l3.ipv4.daddr.s_addr;
/* L4 destination port */
if (actions->nat.l4.ports.dport)
req.nat_port =
req->nat_port =
actions->nat.l4.ports.dport;
}
netdev_dbg(bp->dev,
"req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
req.nat_ip_address, actions->nat.src_xlate,
req.nat_port);
"req->nat_ip_address: %pI4 src_xlate: %d req->nat_port: %x\n",
req->nat_ip_address, actions->nat.src_xlate,
req->nat_port);
} else {
if (actions->nat.src_xlate) {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
/* L3 source rewrite */
memcpy(req.nat_ip_address,
memcpy(req->nat_ip_address,
actions->nat.l3.ipv6.saddr.s6_addr32,
sizeof(req.nat_ip_address));
sizeof(req->nat_ip_address));
/* L4 source port */
if (actions->nat.l4.ports.sport)
req.nat_port =
req->nat_port =
actions->nat.l4.ports.sport;
} else {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
/* L3 destination rewrite */
memcpy(req.nat_ip_address,
memcpy(req->nat_ip_address,
actions->nat.l3.ipv6.daddr.s6_addr32,
sizeof(req.nat_ip_address));
sizeof(req->nat_ip_address));
/* L4 destination port */
if (actions->nat.l4.ports.dport)
req.nat_port =
req->nat_port =
actions->nat.l4.ports.dport;
}
netdev_dbg(bp->dev,
"req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
req.nat_ip_address, actions->nat.src_xlate,
req.nat_port);
"req->nat_ip_address: %pI6 src_xlate: %d req->nat_port: %x\n",
req->nat_ip_address, actions->nat.src_xlate,
req->nat_port);
}
}
if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
req.tunnel_handle = tunnel_handle;
req->tunnel_handle = tunnel_handle;
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
}
req.ethertype = flow->l2_key.ether_type;
req.ip_proto = flow->l4_key.ip_proto;
req->ethertype = flow->l2_key.ether_type;
req->ip_proto = flow->l4_key.ip_proto;
if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN);
memcpy(req->smac, flow->l2_key.smac, ETH_ALEN);
}
if (flow->l2_key.num_vlans > 0) {
@ -689,7 +693,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
* in outer_vlan_tci when num_vlans is 1 (which is
* always the case in TC.)
*/
req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
req->outer_vlan_tci = flow->l2_key.inner_vlan_tci;
}
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
@ -702,68 +706,67 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
req.ip_dst_mask_len =
req->ip_dst[0] = l3_key->ipv4.daddr.s_addr;
req->ip_dst_mask_len =
inet_mask_len(l3_mask->ipv4.daddr.s_addr);
req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
req.ip_src_mask_len =
req->ip_src[0] = l3_key->ipv4.saddr.s_addr;
req->ip_src_mask_len =
inet_mask_len(l3_mask->ipv4.saddr.s_addr);
} else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
sizeof(req.ip_dst));
req.ip_dst_mask_len =
memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32,
sizeof(req->ip_dst));
req->ip_dst_mask_len =
ipv6_mask_len(&l3_mask->ipv6.daddr);
memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
sizeof(req.ip_src));
req.ip_src_mask_len =
memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32,
sizeof(req->ip_src));
req->ip_src_mask_len =
ipv6_mask_len(&l3_mask->ipv6.saddr);
}
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
req.l4_src_port = flow->l4_key.ports.sport;
req.l4_src_port_mask = flow->l4_mask.ports.sport;
req.l4_dst_port = flow->l4_key.ports.dport;
req.l4_dst_port_mask = flow->l4_mask.ports.dport;
req->l4_src_port = flow->l4_key.ports.sport;
req->l4_src_port_mask = flow->l4_mask.ports.sport;
req->l4_dst_port = flow->l4_key.ports.dport;
req->l4_dst_port_mask = flow->l4_mask.ports.dport;
} else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
/* l4 ports serve as type/code when ip_proto is ICMP */
req.l4_src_port = htons(flow->l4_key.icmp.type);
req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
req.l4_dst_port = htons(flow->l4_key.icmp.code);
req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
req->l4_src_port = htons(flow->l4_key.icmp.type);
req->l4_src_port_mask = htons(flow->l4_mask.icmp.type);
req->l4_dst_port = htons(flow->l4_key.icmp.code);
req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
}
req.flags = cpu_to_le16(flow_flags);
req->flags = cpu_to_le16(flow_flags);
if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
} else {
if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
req.dst_fid = cpu_to_le16(actions->dst_fid);
req->dst_fid = cpu_to_le16(actions->dst_fid);
}
if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
req->l2_rewrite_vlan_tci = actions->push_vlan_tci;
memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN);
memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN);
}
if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
action_flags |=
CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
/* Rewrite config with tpid = 0 implies vlan pop */
req.l2_rewrite_vlan_tpid = 0;
memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
req->l2_rewrite_vlan_tpid = 0;
memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN);
memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN);
}
}
req.action_flags = cpu_to_le16(action_flags);
req->action_flags = cpu_to_le16(action_flags);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
/* CFA_FLOW_ALLOC response interpretation:
* fw with fw with
* 16-bit 64-bit
@ -779,7 +782,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
flow_node->flow_id = resp->flow_id;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
@ -789,67 +792,69 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
__le32 ref_decap_handle,
__le32 *decap_filter_handle)
{
struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
struct hwrm_cfa_decap_filter_alloc_output *resp;
struct ip_tunnel_key *tun_key = &flow->tun_key;
struct hwrm_cfa_decap_filter_alloc_input *req;
u32 enables = 0;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_ALLOC);
if (rc)
goto exit;
req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
/* tunnel_id is wrongly defined in hsi defn. as __le32 */
req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
ether_addr_copy(req.dst_macaddr, l2_info->dmac);
ether_addr_copy(req->dst_macaddr, l2_info->dmac);
}
if (l2_info->num_vlans) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
req.t_ivlan_vid = l2_info->inner_vlan_tci;
req->t_ivlan_vid = l2_info->inner_vlan_tci;
}
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
req.ethertype = htons(ETH_P_IP);
req->ethertype = htons(ETH_P_IP);
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
req.src_ipaddr[0] = tun_key->u.ipv4.src;
req->ip_addr_type =
CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req->dst_ipaddr[0] = tun_key->u.ipv4.dst;
req->src_ipaddr[0] = tun_key->u.ipv4.src;
}
if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
req.dst_port = tun_key->tp_dst;
req->dst_port = tun_key->tp_dst;
}
/* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
* is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
*/
req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
req.enables = cpu_to_le32(enables);
req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
req->enables = cpu_to_le32(enables);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc)
*decap_filter_handle = resp->decap_filter_id;
} else {
hwrm_req_drop(bp, req);
exit:
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@ -857,13 +862,14 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
__le32 decap_filter_handle)
{
struct hwrm_cfa_decap_filter_free_input req = { 0 };
struct hwrm_cfa_decap_filter_free_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
req.decap_filter_id = decap_filter_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_FREE);
if (!rc) {
req->decap_filter_id = decap_filter_handle;
rc = hwrm_req_send(bp, req);
}
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
@ -875,18 +881,18 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
struct bnxt_tc_l2_key *l2_info,
__le32 *encap_record_handle)
{
struct hwrm_cfa_encap_record_alloc_input req = { 0 };
struct hwrm_cfa_encap_record_alloc_output *resp;
struct hwrm_cfa_encap_data_vxlan *encap =
(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
(struct hwrm_vxlan_ipv4_hdr *)encap->l3;
struct hwrm_cfa_encap_record_alloc_input *req;
struct hwrm_cfa_encap_data_vxlan *encap;
struct hwrm_vxlan_ipv4_hdr *encap_ipv4;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_ALLOC);
if (rc)
goto exit;
encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data;
req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
ether_addr_copy(encap->src_mac_addr, l2_info->smac);
if (l2_info->num_vlans) {
@ -895,6 +901,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
encap->ovlan_tpid = l2_info->inner_vlan_tpid;
}
encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
encap_ipv4->ttl = encap_key->ttl;
@ -906,15 +913,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
encap->dst_port = encap_key->tp_dst;
encap->vni = tunnel_id_to_key32(encap_key->tun_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc)
*encap_record_handle = resp->encap_record_id;
} else {
hwrm_req_drop(bp, req);
exit:
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@ -922,13 +928,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
static int hwrm_cfa_encap_record_free(struct bnxt *bp,
__le32 encap_record_handle)
{
struct hwrm_cfa_encap_record_free_input req = { 0 };
struct hwrm_cfa_encap_record_free_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
req.encap_record_id = encap_record_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_FREE);
if (!rc) {
req->encap_record_id = encap_record_handle;
rc = hwrm_req_send(bp, req);
}
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
@ -1674,14 +1681,20 @@ static int
bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
struct bnxt_tc_stats_batch stats_batch[])
{
struct hwrm_cfa_flow_stats_input req = { 0 };
struct hwrm_cfa_flow_stats_output *resp;
__le16 *req_flow_handles = &req.flow_handle_0;
__le32 *req_flow_ids = &req.flow_id_0;
struct hwrm_cfa_flow_stats_input *req;
__le16 *req_flow_handles;
__le32 *req_flow_ids;
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
req.num_flows = cpu_to_le16(num_flows);
rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_STATS);
if (rc)
goto exit;
req_flow_handles = &req->flow_handle_0;
req_flow_ids = &req->flow_id_0;
req->num_flows = cpu_to_le16(num_flows);
for (i = 0; i < num_flows; i++) {
struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
@ -1689,13 +1702,12 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
&req_flow_handles[i], &req_flow_ids[i]);
}
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
__le64 *resp_packets;
__le64 *resp_bytes;
resp = bnxt_get_hwrm_resp_addr(bp, &req);
resp_packets = &resp->packet_0;
resp_bytes = &resp->byte_0;
@ -1705,10 +1717,11 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
stats_batch[i].hw_stats.bytes =
le64_to_cpu(resp_bytes[i]);
}
} else {
netdev_info(bp->dev, "error rc=%d\n", rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
exit:
if (rc)
netdev_info(bp->dev, "error rc=%d\n", rc);
return rc;
}

View File

@ -238,27 +238,33 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct output *resp;
struct input *req;
u32 resp_len;
int rc;
if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
return -EBUSY;
mutex_lock(&bp->hwrm_cmd_lock);
req = fw_msg->msg;
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
fw_msg->timeout);
if (!rc) {
struct output *resp = bp->hwrm_cmd_resp_addr;
u32 len = le16_to_cpu(resp->resp_len);
rc = hwrm_req_init(bp, req, 0 /* don't care */);
if (rc)
return rc;
if (fw_msg->resp_max_len < len)
len = fw_msg->resp_max_len;
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
return rc;
memcpy(fw_msg->resp, resp, len);
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
resp_len = le16_to_cpu(resp->resp_len);
if (resp_len) {
if (fw_msg->resp_max_len < resp_len)
resp_len = fw_msg->resp_max_len;
memcpy(fw_msg->resp, resp, resp_len);
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}

View File

@ -28,38 +28,40 @@
static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
u16 *tx_cfa_action, u16 *rx_cfa_code)
{
struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_vfr_alloc_input req = { 0 };
struct hwrm_cfa_vfr_alloc_output *resp;
struct hwrm_cfa_vfr_alloc_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
req.vf_id = cpu_to_le16(vf_idx);
sprintf(req.vfr_name, "vfr%d", vf_idx);
rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC);
if (!rc) {
req->vf_id = cpu_to_le16(vf_idx);
sprintf(req->vfr_name, "vfr%d", vf_idx);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
*tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
*rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
*tx_cfa_action, *rx_cfa_code);
} else {
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
}
if (rc)
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
}
static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
{
struct hwrm_cfa_vfr_free_input req = { 0 };
struct hwrm_cfa_vfr_free_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
sprintf(req.vfr_name, "vfr%d", vf_idx);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE);
if (!rc) {
sprintf(req->vfr_name, "vfr%d", vf_idx);
rc = hwrm_req_send(bp, req);
}
if (rc)
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
@ -68,17 +70,18 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
u16 *max_mtu)
{
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp;
struct hwrm_func_qcfg_input *req;
u16 mtu;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
mtu = le16_to_cpu(resp->max_mtu_configured);
if (!mtu)
@ -86,7 +89,7 @@ static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
else
*max_mtu = mtu;
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}