mirror of
https://github.com/torvalds/linux.git
synced 2024-12-14 15:13:52 +00:00
Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Saeed Mahameed says: ==================== mlx5-next 2022-22-02 The following PR includes updates to mlx5-next branch: Headlines: ========== 1) Jakub cleans up unused static inline functions 2) I did some low level firmware command interface return status changes to provide the caller with full visibility on the error/status returned by the Firmware. 3) Use the new command interface in RDMA DEVX usecases to avoid flooding dmesg with some "expected" user error prone use cases. 4) Moshe also uses the new command interface to grab the specific error code from MFRL register command to provide the exact error reason for why SW reset couldn't perform internally in FW. 5) From Mark Bloch: Lag, drop packets in hardware when possible In active-backup mode the inactive interface's packets are dropped by the bond device. In switchdev where TC rules are offloaded to the FDB this can lead to packets being hit in the FDB where without offload they would have been dropped before reaching TC rules in the kernel. Create a drop rule to make sure packets on inactive ports are dropped before reaching the FDB. Listen on NETDEV_CHANGEUPPER / NETDEV_CHANGEINFODATA events and record the inactive state and offload accordingly. * 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux: net/mlx5: Add clarification on sync reset failure net/mlx5: Add reset_state field to MFRL register RDMA/mlx5: Use new command interface API net/mlx5: cmdif, Refactor error handling and reporting of async commands net/mlx5: Use mlx5_cmd_do() in core create_{cq,dct} net/mlx5: cmdif, Add new api for command execution net/mlx5: cmdif, cmd_check refactoring net/mlx5: cmdif, Return value improvements net/mlx5: Lag, offload active-backup drops to hardware net/mlx5: Lag, record inactive state of bond device net/mlx5: Lag, don't use magic numbers for ports net/mlx5: Lag, use local variable already defined to access E-Switch net/mlx5: E-switch, add drop rule support to ingress ACL net/mlx5: E-switch, remove special uplink ingress ACL handling net/mlx5: E-Switch, reserve and use same uplink metadata across ports net/mlx5: Add ability to insert to specific flow group mlx5: remove unused static inlines ==================== Link: https://lore.kernel.org/r/20220223233930.319301-1-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
f2b77012dd
@ -1055,7 +1055,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
|
|||||||
int cmd_out_len = uverbs_attr_get_len(attrs,
|
int cmd_out_len = uverbs_attr_get_len(attrs,
|
||||||
MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
|
MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
|
||||||
void *cmd_out;
|
void *cmd_out;
|
||||||
int err;
|
int err, err2;
|
||||||
int uid;
|
int uid;
|
||||||
|
|
||||||
c = devx_ufile2uctx(attrs);
|
c = devx_ufile2uctx(attrs);
|
||||||
@ -1076,14 +1076,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
|
|||||||
return PTR_ERR(cmd_out);
|
return PTR_ERR(cmd_out);
|
||||||
|
|
||||||
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
||||||
err = mlx5_cmd_exec(dev->mdev, cmd_in,
|
err = mlx5_cmd_do(dev->mdev, cmd_in,
|
||||||
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
|
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
|
||||||
cmd_out, cmd_out_len);
|
cmd_out, cmd_out_len);
|
||||||
if (err)
|
if (err && err != -EREMOTEIO)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
|
err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
|
||||||
cmd_out_len);
|
cmd_out_len);
|
||||||
|
|
||||||
|
return err2 ?: err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
|
static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
|
||||||
@ -1457,7 +1459,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
|
|||||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
|
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
|
||||||
struct devx_obj *obj;
|
struct devx_obj *obj;
|
||||||
u16 obj_type = 0;
|
u16 obj_type = 0;
|
||||||
int err;
|
int err, err2 = 0;
|
||||||
int uid;
|
int uid;
|
||||||
u32 obj_id;
|
u32 obj_id;
|
||||||
u16 opcode;
|
u16 opcode;
|
||||||
@ -1497,15 +1499,18 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
|
|||||||
!is_apu_cq(dev, cmd_in)) {
|
!is_apu_cq(dev, cmd_in)) {
|
||||||
obj->flags |= DEVX_OBJ_FLAGS_CQ;
|
obj->flags |= DEVX_OBJ_FLAGS_CQ;
|
||||||
obj->core_cq.comp = devx_cq_comp;
|
obj->core_cq.comp = devx_cq_comp;
|
||||||
err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
|
err = mlx5_create_cq(dev->mdev, &obj->core_cq,
|
||||||
cmd_in, cmd_in_len, cmd_out,
|
cmd_in, cmd_in_len, cmd_out,
|
||||||
cmd_out_len);
|
cmd_out_len);
|
||||||
} else {
|
} else {
|
||||||
err = mlx5_cmd_exec(dev->mdev, cmd_in,
|
err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
|
||||||
cmd_in_len,
|
cmd_out, cmd_out_len);
|
||||||
cmd_out, cmd_out_len);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (err == -EREMOTEIO)
|
||||||
|
err2 = uverbs_copy_to(attrs,
|
||||||
|
MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
|
||||||
|
cmd_out, cmd_out_len);
|
||||||
if (err)
|
if (err)
|
||||||
goto obj_free;
|
goto obj_free;
|
||||||
|
|
||||||
@ -1548,7 +1553,7 @@ obj_destroy:
|
|||||||
sizeof(out));
|
sizeof(out));
|
||||||
obj_free:
|
obj_free:
|
||||||
kfree(obj);
|
kfree(obj);
|
||||||
return err;
|
return err2 ?: err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
|
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
|
||||||
@ -1563,7 +1568,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
|
|||||||
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
|
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
|
||||||
struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
|
struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
|
||||||
void *cmd_out;
|
void *cmd_out;
|
||||||
int err;
|
int err, err2;
|
||||||
int uid;
|
int uid;
|
||||||
|
|
||||||
if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
|
if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
|
||||||
@ -1586,14 +1591,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
|
|||||||
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
||||||
devx_set_umem_valid(cmd_in);
|
devx_set_umem_valid(cmd_in);
|
||||||
|
|
||||||
err = mlx5_cmd_exec(mdev->mdev, cmd_in,
|
err = mlx5_cmd_do(mdev->mdev, cmd_in,
|
||||||
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
|
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
|
||||||
cmd_out, cmd_out_len);
|
cmd_out, cmd_out_len);
|
||||||
if (err)
|
if (err && err != -EREMOTEIO)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
|
err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
|
||||||
cmd_out, cmd_out_len);
|
cmd_out, cmd_out_len);
|
||||||
|
|
||||||
|
return err2 ?: err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
|
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
|
||||||
@ -1607,7 +1614,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
|
|||||||
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
|
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
|
||||||
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
|
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
|
||||||
void *cmd_out;
|
void *cmd_out;
|
||||||
int err;
|
int err, err2;
|
||||||
int uid;
|
int uid;
|
||||||
struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
|
struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
|
||||||
|
|
||||||
@ -1629,14 +1636,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
|
|||||||
return PTR_ERR(cmd_out);
|
return PTR_ERR(cmd_out);
|
||||||
|
|
||||||
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
|
||||||
err = mlx5_cmd_exec(mdev->mdev, cmd_in,
|
err = mlx5_cmd_do(mdev->mdev, cmd_in,
|
||||||
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
|
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
|
||||||
cmd_out, cmd_out_len);
|
cmd_out, cmd_out_len);
|
||||||
if (err)
|
if (err && err != -EREMOTEIO)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
|
err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
|
||||||
cmd_out, cmd_out_len);
|
cmd_out, cmd_out_len);
|
||||||
|
|
||||||
|
return err2 ?: err;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct devx_async_event_queue {
|
struct devx_async_event_queue {
|
||||||
|
@ -140,6 +140,19 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||||||
return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
|
return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
|
||||||
|
{
|
||||||
|
if (status == -ENXIO) /* core driver is not available */
|
||||||
|
return;
|
||||||
|
|
||||||
|
mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
|
||||||
|
if (status != -EREMOTEIO) /* driver specific failure */
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Failed in FW, print cmd out failure details */
|
||||||
|
mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
|
||||||
|
}
|
||||||
|
|
||||||
static void create_mkey_callback(int status, struct mlx5_async_work *context)
|
static void create_mkey_callback(int status, struct mlx5_async_work *context)
|
||||||
{
|
{
|
||||||
struct mlx5_ib_mr *mr =
|
struct mlx5_ib_mr *mr =
|
||||||
@ -149,7 +162,7 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (status) {
|
if (status) {
|
||||||
mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
|
create_mkey_warn(dev, status, mr->out);
|
||||||
kfree(mr);
|
kfree(mr);
|
||||||
spin_lock_irqsave(&ent->lock, flags);
|
spin_lock_irqsave(&ent->lock, flags);
|
||||||
ent->pending--;
|
ent->pending--;
|
||||||
|
@ -4465,6 +4465,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||||||
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
|
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
|
||||||
MLX5_ST_SZ_BYTES(create_dct_in), out,
|
MLX5_ST_SZ_BYTES(create_dct_in), out,
|
||||||
sizeof(out));
|
sizeof(out));
|
||||||
|
err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
resp.dctn = qp->dct.mdct.mqp.qpn;
|
resp.dctn = qp->dct.mdct.mqp.qpn;
|
||||||
|
@ -220,7 +220,7 @@ int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
|
|||||||
init_completion(&dct->drained);
|
init_completion(&dct->drained);
|
||||||
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
|
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
|
||||||
|
|
||||||
err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
|
err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
@ -190,10 +190,10 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
|
|||||||
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
|
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
|
||||||
|
|
||||||
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
|
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
|
||||||
return -EINVAL;
|
return -EHWPOISON;
|
||||||
|
|
||||||
if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
|
if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
|
||||||
return -EINVAL;
|
return -EHWPOISON;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -259,12 +259,12 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
|
|||||||
|
|
||||||
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
|
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
|
||||||
if (sig != 0xff)
|
if (sig != 0xff)
|
||||||
return -EINVAL;
|
return -EHWPOISON;
|
||||||
|
|
||||||
for (i = 0; i < n && next; i++) {
|
for (i = 0; i < n && next; i++) {
|
||||||
err = verify_block_sig(next->buf);
|
err = verify_block_sig(next->buf);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return -EHWPOISON;
|
||||||
|
|
||||||
next = next->next;
|
next = next->next;
|
||||||
}
|
}
|
||||||
@ -479,7 +479,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
|
|||||||
case MLX5_CMD_OP_ALLOC_SF:
|
case MLX5_CMD_OP_ALLOC_SF:
|
||||||
*status = MLX5_DRIVER_STATUS_ABORTED;
|
*status = MLX5_DRIVER_STATUS_ABORTED;
|
||||||
*synd = MLX5_DRIVER_SYND;
|
*synd = MLX5_DRIVER_SYND;
|
||||||
return -EIO;
|
return -ENOLINK;
|
||||||
default:
|
default:
|
||||||
mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
|
mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -760,45 +760,73 @@ struct mlx5_ifc_mbox_in_bits {
|
|||||||
u8 reserved_at_40[0x40];
|
u8 reserved_at_40[0x40];
|
||||||
};
|
};
|
||||||
|
|
||||||
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
|
void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
|
||||||
{
|
{
|
||||||
*status = MLX5_GET(mbox_out, out, status);
|
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
|
||||||
*syndrome = MLX5_GET(mbox_out, out, syndrome);
|
u8 status = MLX5_GET(mbox_out, out, status);
|
||||||
}
|
|
||||||
|
|
||||||
static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
|
mlx5_core_err_rl(dev,
|
||||||
|
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
|
||||||
|
mlx5_command_str(opcode), opcode, op_mod,
|
||||||
|
cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mlx5_cmd_out_err);
|
||||||
|
|
||||||
|
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
|
||||||
{
|
{
|
||||||
|
u16 opcode, op_mod;
|
||||||
u32 syndrome;
|
u32 syndrome;
|
||||||
u8 status;
|
u8 status;
|
||||||
u16 opcode;
|
|
||||||
u16 op_mod;
|
|
||||||
u16 uid;
|
u16 uid;
|
||||||
|
int err;
|
||||||
|
|
||||||
mlx5_cmd_mbox_status(out, &status, &syndrome);
|
syndrome = MLX5_GET(mbox_out, out, syndrome);
|
||||||
if (!status)
|
status = MLX5_GET(mbox_out, out, status);
|
||||||
return 0;
|
|
||||||
|
|
||||||
opcode = MLX5_GET(mbox_in, in, opcode);
|
opcode = MLX5_GET(mbox_in, in, opcode);
|
||||||
op_mod = MLX5_GET(mbox_in, in, op_mod);
|
op_mod = MLX5_GET(mbox_in, in, op_mod);
|
||||||
uid = MLX5_GET(mbox_in, in, uid);
|
uid = MLX5_GET(mbox_in, in, uid);
|
||||||
|
|
||||||
|
err = cmd_status_to_err(status);
|
||||||
|
|
||||||
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
|
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
|
||||||
mlx5_core_err_rl(dev,
|
mlx5_cmd_out_err(dev, opcode, op_mod, out);
|
||||||
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
|
|
||||||
mlx5_command_str(opcode), opcode, op_mod,
|
|
||||||
cmd_status_str(status), status, syndrome);
|
|
||||||
else
|
else
|
||||||
mlx5_core_dbg(dev,
|
mlx5_core_dbg(dev,
|
||||||
"%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
|
"%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
|
||||||
mlx5_command_str(opcode),
|
mlx5_command_str(opcode), opcode, op_mod, uid,
|
||||||
opcode, op_mod,
|
cmd_status_str(status), status, syndrome, err);
|
||||||
cmd_status_str(status),
|
|
||||||
status,
|
|
||||||
syndrome);
|
|
||||||
|
|
||||||
return cmd_status_to_err(status);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
|
||||||
|
{
|
||||||
|
/* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
|
||||||
|
if (err == -ENXIO) {
|
||||||
|
u16 opcode = MLX5_GET(mbox_in, in, opcode);
|
||||||
|
u32 syndrome;
|
||||||
|
u8 status;
|
||||||
|
|
||||||
|
/* PCI Error, emulate command return status, for smooth reset */
|
||||||
|
err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
|
||||||
|
MLX5_SET(mbox_out, out, status, status);
|
||||||
|
MLX5_SET(mbox_out, out, syndrome, syndrome);
|
||||||
|
if (!err)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* driver or FW delivery error */
|
||||||
|
if (err != -EREMOTEIO && err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* check outbox status */
|
||||||
|
err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
|
||||||
|
if (err)
|
||||||
|
cmd_status_print(dev, in, out);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mlx5_cmd_check);
|
||||||
|
|
||||||
static void dump_command(struct mlx5_core_dev *dev,
|
static void dump_command(struct mlx5_core_dev *dev,
|
||||||
struct mlx5_cmd_work_ent *ent, int input)
|
struct mlx5_cmd_work_ent *ent, int input)
|
||||||
{
|
{
|
||||||
@ -980,13 +1008,7 @@ static void cmd_work_handler(struct work_struct *work)
|
|||||||
|
|
||||||
/* Skip sending command to fw if internal error */
|
/* Skip sending command to fw if internal error */
|
||||||
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
|
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
|
||||||
u8 status = 0;
|
ent->ret = -ENXIO;
|
||||||
u32 drv_synd;
|
|
||||||
|
|
||||||
ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
|
|
||||||
MLX5_SET(mbox_out, ent->out, status, status);
|
|
||||||
MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
|
|
||||||
|
|
||||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1005,6 +1027,31 @@ static void cmd_work_handler(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int deliv_status_to_err(u8 status)
|
||||||
|
{
|
||||||
|
switch (status) {
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_OK:
|
||||||
|
case MLX5_DRIVER_STATUS_ABORTED:
|
||||||
|
return 0;
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
|
||||||
|
return -EBADR;
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
|
||||||
|
return -EFAULT; /* Bad address */
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
|
||||||
|
return -ENOMSG;
|
||||||
|
case MLX5_CMD_DELIVERY_STAT_FW_ERR:
|
||||||
|
return -EIO;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const char *deliv_status_to_str(u8 status)
|
static const char *deliv_status_to_str(u8 status)
|
||||||
{
|
{
|
||||||
switch (status) {
|
switch (status) {
|
||||||
@ -1101,16 +1148,27 @@ out_err:
|
|||||||
/* Notes:
|
/* Notes:
|
||||||
* 1. Callback functions may not sleep
|
* 1. Callback functions may not sleep
|
||||||
* 2. page queue commands do not support asynchrous completion
|
* 2. page queue commands do not support asynchrous completion
|
||||||
|
*
|
||||||
|
* return value in case (!callback):
|
||||||
|
* ret < 0 : Command execution couldn't be submitted by driver
|
||||||
|
* ret > 0 : Command execution couldn't be performed by firmware
|
||||||
|
* ret == 0: Command was executed by FW, Caller must check FW outbox status.
|
||||||
|
*
|
||||||
|
* return value in case (callback):
|
||||||
|
* ret < 0 : Command execution couldn't be submitted by driver
|
||||||
|
* ret == 0: Command will be submitted to FW for execution
|
||||||
|
* and the callback will be called for further status updates
|
||||||
*/
|
*/
|
||||||
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||||
struct mlx5_cmd_msg *out, void *uout, int uout_size,
|
struct mlx5_cmd_msg *out, void *uout, int uout_size,
|
||||||
mlx5_cmd_cbk_t callback,
|
mlx5_cmd_cbk_t callback,
|
||||||
void *context, int page_queue, u8 *status,
|
void *context, int page_queue,
|
||||||
u8 token, bool force_polling)
|
u8 token, bool force_polling)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd *cmd = &dev->cmd;
|
struct mlx5_cmd *cmd = &dev->cmd;
|
||||||
struct mlx5_cmd_work_ent *ent;
|
struct mlx5_cmd_work_ent *ent;
|
||||||
struct mlx5_cmd_stats *stats;
|
struct mlx5_cmd_stats *stats;
|
||||||
|
u8 status = 0;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
s64 ds;
|
s64 ds;
|
||||||
u16 op;
|
u16 op;
|
||||||
@ -1141,12 +1199,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
|||||||
cmd_work_handler(&ent->work);
|
cmd_work_handler(&ent->work);
|
||||||
} else if (!queue_work(cmd->wq, &ent->work)) {
|
} else if (!queue_work(cmd->wq, &ent->work)) {
|
||||||
mlx5_core_warn(dev, "failed to queue work\n");
|
mlx5_core_warn(dev, "failed to queue work\n");
|
||||||
err = -ENOMEM;
|
err = -EALREADY;
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (callback)
|
if (callback)
|
||||||
goto out; /* mlx5_cmd_comp_handler() will put(ent) */
|
return 0; /* mlx5_cmd_comp_handler() will put(ent) */
|
||||||
|
|
||||||
err = wait_func(dev, ent);
|
err = wait_func(dev, ent);
|
||||||
if (err == -ETIMEDOUT || err == -ECANCELED)
|
if (err == -ETIMEDOUT || err == -ECANCELED)
|
||||||
@ -1164,12 +1222,11 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
|||||||
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
|
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
|
||||||
"fw exec time for %s is %lld nsec\n",
|
"fw exec time for %s is %lld nsec\n",
|
||||||
mlx5_command_str(op), ds);
|
mlx5_command_str(op), ds);
|
||||||
*status = ent->status;
|
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
|
status = ent->status;
|
||||||
cmd_ent_put(ent);
|
cmd_ent_put(ent);
|
||||||
out:
|
return err ? : status;
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t dbg_write(struct file *filp, const char __user *buf,
|
static ssize_t dbg_write(struct file *filp, const char __user *buf,
|
||||||
@ -1612,15 +1669,15 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
|
|||||||
ent->ts2 = ktime_get_ns();
|
ent->ts2 = ktime_get_ns();
|
||||||
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
|
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
|
||||||
dump_command(dev, ent, 0);
|
dump_command(dev, ent, 0);
|
||||||
if (!ent->ret) {
|
|
||||||
|
if (vec & MLX5_TRIGGERED_CMD_COMP)
|
||||||
|
ent->ret = -ENXIO;
|
||||||
|
|
||||||
|
if (!ent->ret) { /* Command completed by FW */
|
||||||
if (!cmd->checksum_disabled)
|
if (!cmd->checksum_disabled)
|
||||||
ent->ret = verify_signature(ent);
|
ent->ret = verify_signature(ent);
|
||||||
else
|
|
||||||
ent->ret = 0;
|
ent->status = ent->lay->status_own >> 1;
|
||||||
if (vec & MLX5_TRIGGERED_CMD_COMP)
|
|
||||||
ent->status = MLX5_DRIVER_STATUS_ABORTED;
|
|
||||||
else
|
|
||||||
ent->status = ent->lay->status_own >> 1;
|
|
||||||
|
|
||||||
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
|
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
|
||||||
ent->ret, deliv_status_to_str(ent->status), ent->status);
|
ent->ret, deliv_status_to_str(ent->status), ent->status);
|
||||||
@ -1638,21 +1695,18 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
|
|||||||
|
|
||||||
callback = ent->callback;
|
callback = ent->callback;
|
||||||
context = ent->context;
|
context = ent->context;
|
||||||
err = ent->ret;
|
err = ent->ret ? : ent->status;
|
||||||
if (!err) {
|
if (err > 0) /* Failed in FW, command didn't execute */
|
||||||
|
err = deliv_status_to_err(err);
|
||||||
|
|
||||||
|
if (!err)
|
||||||
err = mlx5_copy_from_msg(ent->uout,
|
err = mlx5_copy_from_msg(ent->uout,
|
||||||
ent->out,
|
ent->out,
|
||||||
ent->uout_size);
|
ent->uout_size);
|
||||||
|
|
||||||
err = err ? err : mlx5_cmd_check(dev,
|
|
||||||
ent->in->first.data,
|
|
||||||
ent->uout);
|
|
||||||
}
|
|
||||||
|
|
||||||
mlx5_free_cmd_msg(dev, ent->out);
|
mlx5_free_cmd_msg(dev, ent->out);
|
||||||
free_msg(dev, ent->in);
|
free_msg(dev, ent->in);
|
||||||
|
|
||||||
err = err ? err : ent->status;
|
|
||||||
/* final consumer is done, release ent */
|
/* final consumer is done, release ent */
|
||||||
cmd_ent_put(ent);
|
cmd_ent_put(ent);
|
||||||
callback(err, context);
|
callback(err, context);
|
||||||
@ -1719,31 +1773,6 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
|
|||||||
up(&cmd->sem);
|
up(&cmd->sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int status_to_err(u8 status)
|
|
||||||
{
|
|
||||||
switch (status) {
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_OK:
|
|
||||||
case MLX5_DRIVER_STATUS_ABORTED:
|
|
||||||
return 0;
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
|
|
||||||
return -EBADR;
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
|
|
||||||
return -EFAULT; /* Bad address */
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
|
|
||||||
return -ENOMSG;
|
|
||||||
case MLX5_CMD_DELIVERY_STAT_FW_ERR:
|
|
||||||
return -EIO;
|
|
||||||
default:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
|
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
|
||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
@ -1787,27 +1816,23 @@ static int is_manage_pages(void *in)
|
|||||||
return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
|
return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Notes:
|
||||||
|
* 1. Callback functions may not sleep
|
||||||
|
* 2. Page queue commands do not support asynchrous completion
|
||||||
|
*/
|
||||||
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
int out_size, mlx5_cmd_cbk_t callback, void *context,
|
int out_size, mlx5_cmd_cbk_t callback, void *context,
|
||||||
bool force_polling)
|
bool force_polling)
|
||||||
{
|
{
|
||||||
struct mlx5_cmd_msg *inb;
|
u16 opcode = MLX5_GET(mbox_in, in, opcode);
|
||||||
struct mlx5_cmd_msg *outb;
|
struct mlx5_cmd_msg *inb, *outb;
|
||||||
int pages_queue;
|
int pages_queue;
|
||||||
gfp_t gfp;
|
gfp_t gfp;
|
||||||
int err;
|
|
||||||
u8 status = 0;
|
|
||||||
u32 drv_synd;
|
|
||||||
u16 opcode;
|
|
||||||
u8 token;
|
u8 token;
|
||||||
|
int err;
|
||||||
|
|
||||||
opcode = MLX5_GET(mbox_in, in, opcode);
|
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
|
||||||
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
|
return -ENXIO;
|
||||||
err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
|
|
||||||
MLX5_SET(mbox_out, out, status, status);
|
|
||||||
MLX5_SET(mbox_out, out, syndrome, drv_synd);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
pages_queue = is_manage_pages(in);
|
pages_queue = is_manage_pages(in);
|
||||||
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
|
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
|
||||||
@ -1833,39 +1858,108 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
|
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
|
||||||
pages_queue, &status, token, force_polling);
|
pages_queue, token, force_polling);
|
||||||
|
if (callback)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
if (err > 0) /* Failed in FW, command didn't execute */
|
||||||
|
err = deliv_status_to_err(err);
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto out_out;
|
goto out_out;
|
||||||
|
|
||||||
mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
|
/* command completed by FW */
|
||||||
if (status) {
|
err = mlx5_copy_from_msg(out, outb, out_size);
|
||||||
err = status_to_err(status);
|
|
||||||
goto out_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!callback)
|
|
||||||
err = mlx5_copy_from_msg(out, outb, out_size);
|
|
||||||
|
|
||||||
out_out:
|
out_out:
|
||||||
if (!callback)
|
mlx5_free_cmd_msg(dev, outb);
|
||||||
mlx5_free_cmd_msg(dev, outb);
|
|
||||||
|
|
||||||
out_in:
|
out_in:
|
||||||
if (!callback)
|
free_msg(dev, inb);
|
||||||
free_msg(dev, inb);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
|
||||||
|
static int cmd_status_err(int err, void *out)
|
||||||
|
{
|
||||||
|
if (err) /* -EREMOTEIO is preserved */
|
||||||
|
return err == -EREMOTEIO ? -EIO : err;
|
||||||
|
|
||||||
|
if (MLX5_GET(mbox_out, out, status) != MLX5_CMD_STAT_OK)
|
||||||
|
return -EREMOTEIO;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mlx5_cmd_do - Executes a fw command, wait for completion.
|
||||||
|
* Unlike mlx5_cmd_exec, this function will not translate or intercept
|
||||||
|
* outbox.status and will return -EREMOTEIO when
|
||||||
|
* outbox.status != MLX5_CMD_STAT_OK
|
||||||
|
*
|
||||||
|
* @dev: mlx5 core device
|
||||||
|
* @in: inbox mlx5_ifc command buffer
|
||||||
|
* @in_size: inbox buffer size
|
||||||
|
* @out: outbox mlx5_ifc buffer
|
||||||
|
* @out_size: outbox size
|
||||||
|
*
|
||||||
|
* @return:
|
||||||
|
* -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
|
||||||
|
* Caller must check FW outbox status.
|
||||||
|
* 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
|
||||||
|
* < 0 : Command execution couldn't be performed by firmware or driver
|
||||||
|
*/
|
||||||
|
int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
|
||||||
|
{
|
||||||
|
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
|
||||||
|
|
||||||
|
return cmd_status_err(err, out);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mlx5_cmd_do);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mlx5_cmd_exec - Executes a fw command, wait for completion
|
||||||
|
*
|
||||||
|
* @dev: mlx5 core device
|
||||||
|
* @in: inbox mlx5_ifc command buffer
|
||||||
|
* @in_size: inbox buffer size
|
||||||
|
* @out: outbox mlx5_ifc buffer
|
||||||
|
* @out_size: outbox size
|
||||||
|
*
|
||||||
|
* @return: 0 if no error, FW command execution was successful
|
||||||
|
* and outbox status is ok.
|
||||||
|
*/
|
||||||
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
int out_size)
|
int out_size)
|
||||||
{
|
{
|
||||||
int err;
|
int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
|
||||||
|
|
||||||
err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
|
return mlx5_cmd_check(dev, err, in, out);
|
||||||
return err ? : mlx5_cmd_check(dev, in, out);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_cmd_exec);
|
EXPORT_SYMBOL(mlx5_cmd_exec);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mlx5_cmd_exec_polling - Executes a fw command, poll for completion
|
||||||
|
* Needed for driver force teardown, when command completion EQ
|
||||||
|
* will not be available to complete the command
|
||||||
|
*
|
||||||
|
* @dev: mlx5 core device
|
||||||
|
* @in: inbox mlx5_ifc command buffer
|
||||||
|
* @in_size: inbox buffer size
|
||||||
|
* @out: outbox mlx5_ifc buffer
|
||||||
|
* @out_size: outbox size
|
||||||
|
*
|
||||||
|
* @return: 0 if no error, FW command execution was successful
|
||||||
|
* and outbox status is ok.
|
||||||
|
*/
|
||||||
|
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||||
|
void *out, int out_size)
|
||||||
|
{
|
||||||
|
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
|
||||||
|
|
||||||
|
err = cmd_status_err(err, out);
|
||||||
|
return mlx5_cmd_check(dev, err, in, out);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mlx5_cmd_exec_polling);
|
||||||
|
|
||||||
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
|
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
|
||||||
struct mlx5_async_ctx *ctx)
|
struct mlx5_async_ctx *ctx)
|
||||||
{
|
{
|
||||||
@ -1894,8 +1988,10 @@ EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
|
|||||||
static void mlx5_cmd_exec_cb_handler(int status, void *_work)
|
static void mlx5_cmd_exec_cb_handler(int status, void *_work)
|
||||||
{
|
{
|
||||||
struct mlx5_async_work *work = _work;
|
struct mlx5_async_work *work = _work;
|
||||||
struct mlx5_async_ctx *ctx = work->ctx;
|
struct mlx5_async_ctx *ctx;
|
||||||
|
|
||||||
|
ctx = work->ctx;
|
||||||
|
status = cmd_status_err(status, work->out);
|
||||||
work->user_callback(status, work);
|
work->user_callback(status, work);
|
||||||
if (atomic_dec_and_test(&ctx->num_inflight))
|
if (atomic_dec_and_test(&ctx->num_inflight))
|
||||||
wake_up(&ctx->wait);
|
wake_up(&ctx->wait);
|
||||||
@ -1909,6 +2005,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
|||||||
|
|
||||||
work->ctx = ctx;
|
work->ctx = ctx;
|
||||||
work->user_callback = callback;
|
work->user_callback = callback;
|
||||||
|
work->out = out;
|
||||||
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
|
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
|
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
|
||||||
@ -1920,17 +2017,6 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
|
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
|
||||||
|
|
||||||
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
|
||||||
void *out, int out_size)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
|
|
||||||
|
|
||||||
return err ? : mlx5_cmd_check(dev, in, out);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(mlx5_cmd_exec_polling);
|
|
||||||
|
|
||||||
static void destroy_msg_cache(struct mlx5_core_dev *dev)
|
static void destroy_msg_cache(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
struct cmd_msg_cache *ch;
|
struct cmd_msg_cache *ch;
|
||||||
|
@ -86,8 +86,9 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
|
|||||||
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
|
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
/* Callers must verify outbox status in case of err */
|
||||||
u32 *in, int inlen, u32 *out, int outlen)
|
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||||
|
u32 *in, int inlen, u32 *out, int outlen)
|
||||||
{
|
{
|
||||||
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
|
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
|
||||||
c_eqn_or_apu_element);
|
c_eqn_or_apu_element);
|
||||||
@ -101,7 +102,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
|||||||
|
|
||||||
memset(out, 0, outlen);
|
memset(out, 0, outlen);
|
||||||
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
|
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
|
||||||
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
|
err = mlx5_cmd_do(dev, in, inlen, out, outlen);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -148,6 +149,16 @@ err_cmd:
|
|||||||
mlx5_cmd_exec_in(dev, destroy_cq, din);
|
mlx5_cmd_exec_in(dev, destroy_cq, din);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(mlx5_create_cq);
|
||||||
|
|
||||||
|
/* oubox is checked and err val is normalized */
|
||||||
|
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||||
|
u32 *in, int inlen, u32 *out, int outlen)
|
||||||
|
{
|
||||||
|
int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen);
|
||||||
|
|
||||||
|
return mlx5_cmd_check(dev, err, in, out);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(mlx5_core_create_cq);
|
EXPORT_SYMBOL(mlx5_core_create_cq);
|
||||||
|
|
||||||
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
|
||||||
|
@ -100,15 +100,11 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
|
|||||||
}
|
}
|
||||||
|
|
||||||
net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
|
net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
|
||||||
err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
|
err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
return err;
|
||||||
|
|
||||||
err = mlx5_fw_reset_wait_reset_done(dev);
|
return mlx5_fw_reset_wait_reset_done(dev);
|
||||||
out:
|
|
||||||
if (err)
|
|
||||||
NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
|
static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
|
||||||
|
@ -139,15 +139,6 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
|
||||||
return mlx5e_ipsec_is_tx_flow(&state->ipsec);
|
|
||||||
#else
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
|
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
|
||||||
struct mlx5e_accel_tx_state *state)
|
struct mlx5e_accel_tx_state *state)
|
||||||
{
|
{
|
||||||
|
@ -92,6 +92,7 @@ static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw,
|
|||||||
|
|
||||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||||
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
|
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
|
||||||
|
flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp;
|
||||||
vport->ingress.offloads.modify_metadata_rule =
|
vport->ingress.offloads.modify_metadata_rule =
|
||||||
mlx5_add_flow_rules(vport->ingress.acl,
|
mlx5_add_flow_rules(vport->ingress.acl,
|
||||||
NULL, &flow_act, NULL, 0);
|
NULL, &flow_act, NULL, 0);
|
||||||
@ -117,6 +118,36 @@ static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw,
|
|||||||
vport->ingress.offloads.modify_metadata_rule = NULL;
|
vport->ingress.offloads.modify_metadata_rule = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
|
||||||
|
struct mlx5_vport *vport)
|
||||||
|
{
|
||||||
|
struct mlx5_flow_act flow_act = {};
|
||||||
|
struct mlx5_flow_handle *flow_rule;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
|
||||||
|
flow_act.fg = vport->ingress.offloads.drop_grp;
|
||||||
|
flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
|
||||||
|
if (IS_ERR(flow_rule)) {
|
||||||
|
err = PTR_ERR(flow_rule);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
vport->ingress.offloads.drop_rule = flow_rule;
|
||||||
|
out:
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw,
|
||||||
|
struct mlx5_vport *vport)
|
||||||
|
{
|
||||||
|
if (!vport->ingress.offloads.drop_rule)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mlx5_del_flow_rules(vport->ingress.offloads.drop_rule);
|
||||||
|
vport->ingress.offloads.drop_rule = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
|
static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw,
|
||||||
struct mlx5_vport *vport)
|
struct mlx5_vport *vport)
|
||||||
{
|
{
|
||||||
@ -154,6 +185,7 @@ static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw,
|
|||||||
{
|
{
|
||||||
esw_acl_ingress_allow_rule_destroy(vport);
|
esw_acl_ingress_allow_rule_destroy(vport);
|
||||||
esw_acl_ingress_mod_metadata_destroy(esw, vport);
|
esw_acl_ingress_mod_metadata_destroy(esw, vport);
|
||||||
|
esw_acl_ingress_src_port_drop_destroy(esw, vport);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
|
static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
|
||||||
@ -170,10 +202,29 @@ static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw,
|
|||||||
if (!flow_group_in)
|
if (!flow_group_in)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (vport->vport == MLX5_VPORT_UPLINK) {
|
||||||
|
/* This group can hold an FTE to drop all traffic.
|
||||||
|
* Need in case LAG is enabled.
|
||||||
|
*/
|
||||||
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
|
||||||
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
|
||||||
|
|
||||||
|
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||||
|
if (IS_ERR(g)) {
|
||||||
|
ret = PTR_ERR(g);
|
||||||
|
esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
|
||||||
|
vport->vport, ret);
|
||||||
|
goto drop_err;
|
||||||
|
}
|
||||||
|
vport->ingress.offloads.drop_grp = g;
|
||||||
|
flow_index++;
|
||||||
|
}
|
||||||
|
|
||||||
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
|
if (esw_acl_ingress_prio_tag_enabled(esw, vport)) {
|
||||||
/* This group is to hold FTE to match untagged packets when prio_tag
|
/* This group is to hold FTE to match untagged packets when prio_tag
|
||||||
* is enabled.
|
* is enabled.
|
||||||
*/
|
*/
|
||||||
|
memset(flow_group_in, 0, inlen);
|
||||||
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
|
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
|
||||||
flow_group_in, match_criteria);
|
flow_group_in, match_criteria);
|
||||||
MLX5_SET(create_flow_group_in, flow_group_in,
|
MLX5_SET(create_flow_group_in, flow_group_in,
|
||||||
@ -221,6 +272,11 @@ metadata_err:
|
|||||||
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
|
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
|
||||||
}
|
}
|
||||||
prio_tag_err:
|
prio_tag_err:
|
||||||
|
if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) {
|
||||||
|
mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
|
||||||
|
vport->ingress.offloads.drop_grp = NULL;
|
||||||
|
}
|
||||||
|
drop_err:
|
||||||
kvfree(flow_group_in);
|
kvfree(flow_group_in);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -236,6 +292,11 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
|
|||||||
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
|
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
|
||||||
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
|
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (vport->ingress.offloads.drop_grp) {
|
||||||
|
mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp);
|
||||||
|
vport->ingress.offloads.drop_grp = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
|
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
|
||||||
@ -252,6 +313,8 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
|
|||||||
|
|
||||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||||
num_ftes++;
|
num_ftes++;
|
||||||
|
if (vport->vport == MLX5_VPORT_UPLINK)
|
||||||
|
num_ftes++;
|
||||||
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
|
if (esw_acl_ingress_prio_tag_enabled(esw, vport))
|
||||||
num_ftes++;
|
num_ftes++;
|
||||||
|
|
||||||
@ -320,3 +383,27 @@ out:
|
|||||||
vport->metadata = vport->default_metadata;
|
vport->metadata = vport->default_metadata;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num)
|
||||||
|
{
|
||||||
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||||
|
|
||||||
|
if (IS_ERR(vport)) {
|
||||||
|
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
|
||||||
|
return PTR_ERR(vport);
|
||||||
|
}
|
||||||
|
|
||||||
|
return esw_acl_ingress_src_port_drop_create(esw, vport);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num)
|
||||||
|
{
|
||||||
|
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(IS_ERR(vport))) {
|
||||||
|
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
esw_acl_ingress_src_port_drop_destroy(esw, vport);
|
||||||
|
}
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include "eswitch.h"
|
#include "eswitch.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_MLX5_ESWITCH
|
||||||
/* Eswitch acl egress external APIs */
|
/* Eswitch acl egress external APIs */
|
||||||
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
|
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
|
||||||
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
|
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
|
||||||
@ -25,5 +26,19 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vpor
|
|||||||
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
|
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
|
||||||
int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
|
int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
|
||||||
u32 metadata);
|
u32 metadata);
|
||||||
|
void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num);
|
||||||
|
int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num);
|
||||||
|
|
||||||
|
#else /* CONFIG_MLX5_ESWITCH */
|
||||||
|
static void
|
||||||
|
mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw,
|
||||||
|
u16 vport_num)
|
||||||
|
{}
|
||||||
|
|
||||||
|
static int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw,
|
||||||
|
u16 vport_num)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_MLX5_ESWITCH */
|
||||||
#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */
|
#endif /* __MLX5_ESWITCH_ACL_OFLD_H__ */
|
||||||
|
@ -113,8 +113,11 @@ struct vport_ingress {
|
|||||||
* packet with metadata.
|
* packet with metadata.
|
||||||
*/
|
*/
|
||||||
struct mlx5_flow_group *metadata_allmatch_grp;
|
struct mlx5_flow_group *metadata_allmatch_grp;
|
||||||
|
/* Optional group to add a drop all rule */
|
||||||
|
struct mlx5_flow_group *drop_grp;
|
||||||
struct mlx5_modify_hdr *modify_metadata;
|
struct mlx5_modify_hdr *modify_metadata;
|
||||||
struct mlx5_flow_handle *modify_metadata_rule;
|
struct mlx5_flow_handle *modify_metadata_rule;
|
||||||
|
struct mlx5_flow_handle *drop_rule;
|
||||||
} offloads;
|
} offloads;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2379,60 +2379,6 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
|
|||||||
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
|
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master,
|
|
||||||
struct mlx5_core_dev *slave)
|
|
||||||
{
|
|
||||||
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
|
|
||||||
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
|
|
||||||
struct mlx5_eswitch *esw;
|
|
||||||
struct mlx5_flow_root_namespace *root;
|
|
||||||
struct mlx5_flow_namespace *ns;
|
|
||||||
struct mlx5_vport *vport;
|
|
||||||
int err;
|
|
||||||
|
|
||||||
MLX5_SET(set_flow_table_root_in, in, opcode,
|
|
||||||
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
|
|
||||||
MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL);
|
|
||||||
MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
|
|
||||||
MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK);
|
|
||||||
|
|
||||||
if (master) {
|
|
||||||
esw = master->priv.eswitch;
|
|
||||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
|
|
||||||
MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1);
|
|
||||||
MLX5_SET(set_flow_table_root_in, in, table_vport_number,
|
|
||||||
MLX5_VPORT_UPLINK);
|
|
||||||
|
|
||||||
ns = mlx5_get_flow_vport_acl_namespace(master,
|
|
||||||
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
|
||||||
vport->index);
|
|
||||||
root = find_root(&ns->node);
|
|
||||||
mutex_lock(&root->chain_lock);
|
|
||||||
|
|
||||||
MLX5_SET(set_flow_table_root_in, in,
|
|
||||||
table_eswitch_owner_vhca_id_valid, 1);
|
|
||||||
MLX5_SET(set_flow_table_root_in, in,
|
|
||||||
table_eswitch_owner_vhca_id,
|
|
||||||
MLX5_CAP_GEN(master, vhca_id));
|
|
||||||
MLX5_SET(set_flow_table_root_in, in, table_id,
|
|
||||||
root->root_ft->id);
|
|
||||||
} else {
|
|
||||||
esw = slave->priv.eswitch;
|
|
||||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
|
|
||||||
ns = mlx5_get_flow_vport_acl_namespace(slave,
|
|
||||||
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
|
||||||
vport->index);
|
|
||||||
root = find_root(&ns->node);
|
|
||||||
mutex_lock(&root->chain_lock);
|
|
||||||
MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id);
|
|
||||||
}
|
|
||||||
|
|
||||||
err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
|
|
||||||
mutex_unlock(&root->chain_lock);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
|
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
|
||||||
struct mlx5_core_dev *slave)
|
struct mlx5_core_dev *slave)
|
||||||
{
|
{
|
||||||
@ -2614,15 +2560,10 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = esw_set_uplink_slave_ingress_root(master_esw->dev,
|
|
||||||
slave_esw->dev);
|
|
||||||
if (err)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
err = esw_set_slave_root_fdb(master_esw->dev,
|
err = esw_set_slave_root_fdb(master_esw->dev,
|
||||||
slave_esw->dev);
|
slave_esw->dev);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_fdb;
|
return err;
|
||||||
|
|
||||||
err = esw_set_master_egress_rule(master_esw->dev,
|
err = esw_set_master_egress_rule(master_esw->dev,
|
||||||
slave_esw->dev);
|
slave_esw->dev);
|
||||||
@ -2634,9 +2575,6 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
|
|||||||
err_acl:
|
err_acl:
|
||||||
esw_set_slave_root_fdb(NULL, slave_esw->dev);
|
esw_set_slave_root_fdb(NULL, slave_esw->dev);
|
||||||
|
|
||||||
err_fdb:
|
|
||||||
esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2645,7 +2583,6 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
|
|||||||
{
|
{
|
||||||
esw_unset_master_egress_rule(master_esw->dev);
|
esw_unset_master_egress_rule(master_esw->dev);
|
||||||
esw_set_slave_root_fdb(NULL, slave_esw->dev);
|
esw_set_slave_root_fdb(NULL, slave_esw->dev);
|
||||||
esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
|
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
|
||||||
@ -2842,6 +2779,19 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MLX5_ESW_METADATA_RSVD_UPLINK 1
|
||||||
|
|
||||||
|
/* Share the same metadata for uplink's. This is fine because:
|
||||||
|
* (a) In shared FDB mode (LAG) both uplink's are treated the
|
||||||
|
* same and tagged with the same metadata.
|
||||||
|
* (b) In non shared FDB mode, packets from physical port0
|
||||||
|
* cannot hit eswitch of PF1 and vice versa.
|
||||||
|
*/
|
||||||
|
static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
|
||||||
|
{
|
||||||
|
return MLX5_ESW_METADATA_RSVD_UPLINK;
|
||||||
|
}
|
||||||
|
|
||||||
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
|
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
|
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
|
||||||
@ -2856,8 +2806,10 @@ u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Metadata is 4 bits of PFNUM and 12 bits of unique id */
|
/* Metadata is 4 bits of PFNUM and 12 bits of unique id */
|
||||||
/* Use only non-zero vport_id (1-4095) for all PF's */
|
/* Use only non-zero vport_id (2-4095) for all PF's */
|
||||||
id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
|
id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
|
||||||
|
MLX5_ESW_METADATA_RSVD_UPLINK + 1,
|
||||||
|
vport_end_ida, GFP_KERNEL);
|
||||||
if (id < 0)
|
if (id < 0)
|
||||||
return 0;
|
return 0;
|
||||||
id = (pf_num << ESW_VPORT_BITS) | id;
|
id = (pf_num << ESW_VPORT_BITS) | id;
|
||||||
@ -2875,7 +2827,11 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
|
|||||||
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
|
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
|
||||||
struct mlx5_vport *vport)
|
struct mlx5_vport *vport)
|
||||||
{
|
{
|
||||||
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
|
if (vport->vport == MLX5_VPORT_UPLINK)
|
||||||
|
vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
|
||||||
|
else
|
||||||
|
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
|
||||||
|
|
||||||
vport->metadata = vport->default_metadata;
|
vport->metadata = vport->default_metadata;
|
||||||
return vport->metadata ? 0 : -ENOSPC;
|
return vport->metadata ? 0 : -ENOSPC;
|
||||||
}
|
}
|
||||||
@ -2886,6 +2842,9 @@ static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
|
|||||||
if (!vport->default_metadata)
|
if (!vport->default_metadata)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (vport->vport == MLX5_VPORT_UPLINK)
|
||||||
|
return;
|
||||||
|
|
||||||
WARN_ON(vport->metadata != vport->default_metadata);
|
WARN_ON(vport->metadata != vport->default_metadata);
|
||||||
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
|
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
|
||||||
}
|
}
|
||||||
|
@ -1696,6 +1696,7 @@ static void free_match_list(struct match_list *head, bool ft_locked)
|
|||||||
static int build_match_list(struct match_list *match_head,
|
static int build_match_list(struct match_list *match_head,
|
||||||
struct mlx5_flow_table *ft,
|
struct mlx5_flow_table *ft,
|
||||||
const struct mlx5_flow_spec *spec,
|
const struct mlx5_flow_spec *spec,
|
||||||
|
struct mlx5_flow_group *fg,
|
||||||
bool ft_locked)
|
bool ft_locked)
|
||||||
{
|
{
|
||||||
struct rhlist_head *tmp, *list;
|
struct rhlist_head *tmp, *list;
|
||||||
@ -1710,6 +1711,9 @@ static int build_match_list(struct match_list *match_head,
|
|||||||
rhl_for_each_entry_rcu(g, tmp, list, hash) {
|
rhl_for_each_entry_rcu(g, tmp, list, hash) {
|
||||||
struct match_list *curr_match;
|
struct match_list *curr_match;
|
||||||
|
|
||||||
|
if (fg && fg != g)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (unlikely(!tree_get_node(&g->node)))
|
if (unlikely(!tree_get_node(&g->node)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -1889,6 +1893,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
|||||||
if (!check_valid_spec(spec))
|
if (!check_valid_spec(spec))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
if (flow_act->fg && ft->autogroup.active)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
for (i = 0; i < dest_num; i++) {
|
for (i = 0; i < dest_num; i++) {
|
||||||
if (!dest_is_valid(&dest[i], flow_act, ft))
|
if (!dest_is_valid(&dest[i], flow_act, ft))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
@ -1898,7 +1905,7 @@ search_again_locked:
|
|||||||
version = atomic_read(&ft->node.version);
|
version = atomic_read(&ft->node.version);
|
||||||
|
|
||||||
/* Collect all fgs which has a matching match_criteria */
|
/* Collect all fgs which has a matching match_criteria */
|
||||||
err = build_match_list(&match_head, ft, spec, take_write);
|
err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (take_write)
|
if (take_write)
|
||||||
up_write_ref_node(&ft->node, false);
|
up_write_ref_node(&ft->node, false);
|
||||||
|
@ -57,7 +57,8 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
|
|||||||
return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
|
return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
|
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
|
||||||
|
u8 *reset_type, u8 *reset_state)
|
||||||
{
|
{
|
||||||
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
|
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
|
||||||
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
|
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
|
||||||
@ -71,25 +72,67 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *r
|
|||||||
*reset_level = MLX5_GET(mfrl_reg, out, reset_level);
|
*reset_level = MLX5_GET(mfrl_reg, out, reset_level);
|
||||||
if (reset_type)
|
if (reset_type)
|
||||||
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
|
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
|
||||||
|
if (reset_state)
|
||||||
|
*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
|
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
|
||||||
{
|
{
|
||||||
return mlx5_reg_mfrl_query(dev, reset_level, reset_type);
|
return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel)
|
static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
|
||||||
|
struct netlink_ext_ack *extack)
|
||||||
|
{
|
||||||
|
u8 reset_state;
|
||||||
|
|
||||||
|
if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
switch (reset_state) {
|
||||||
|
case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION:
|
||||||
|
case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS:
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered");
|
||||||
|
return -EBUSY;
|
||||||
|
case MLX5_MFRL_REG_RESET_STATE_TIMEOUT:
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout");
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
case MLX5_MFRL_REG_RESET_STATE_NACK:
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset");
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "Sync reset failed");
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
|
||||||
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
||||||
|
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
|
||||||
|
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
|
set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
|
||||||
err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true);
|
|
||||||
if (err)
|
MLX5_SET(mfrl_reg, in, reset_level, MLX5_MFRL_REG_RESET_LEVEL3);
|
||||||
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
|
MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
|
||||||
return err;
|
MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, 1);
|
||||||
|
err = mlx5_access_reg(dev, in, sizeof(in), out, sizeof(out),
|
||||||
|
MLX5_REG_MFRL, 0, 1, false);
|
||||||
|
if (!err)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
|
||||||
|
if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state))
|
||||||
|
return mlx5_fw_reset_get_reset_state_err(dev, extack);
|
||||||
|
|
||||||
|
NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed");
|
||||||
|
return mlx5_cmd_check(dev, err, in, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
|
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
|
||||||
|
@ -9,7 +9,8 @@
|
|||||||
void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
|
void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
|
||||||
bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
|
bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
|
||||||
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
|
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
|
||||||
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel);
|
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
|
||||||
|
struct netlink_ext_ack *extack);
|
||||||
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
|
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
|
||||||
|
|
||||||
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
|
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
|
||||||
|
@ -31,15 +31,22 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
|
#include <net/bonding.h>
|
||||||
#include <linux/mlx5/driver.h>
|
#include <linux/mlx5/driver.h>
|
||||||
#include <linux/mlx5/eswitch.h>
|
#include <linux/mlx5/eswitch.h>
|
||||||
#include <linux/mlx5/vport.h>
|
#include <linux/mlx5/vport.h>
|
||||||
#include "lib/devcom.h"
|
#include "lib/devcom.h"
|
||||||
#include "mlx5_core.h"
|
#include "mlx5_core.h"
|
||||||
#include "eswitch.h"
|
#include "eswitch.h"
|
||||||
|
#include "esw/acl/ofld.h"
|
||||||
#include "lag.h"
|
#include "lag.h"
|
||||||
#include "mp.h"
|
#include "mp.h"
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5_LAG_EGRESS_PORT_1 = 1,
|
||||||
|
MLX5_LAG_EGRESS_PORT_2,
|
||||||
|
};
|
||||||
|
|
||||||
/* General purpose, use for short periods of time.
|
/* General purpose, use for short periods of time.
|
||||||
* Beware of lock dependencies (preferably, no locks should be acquired
|
* Beware of lock dependencies (preferably, no locks should be acquired
|
||||||
* under it).
|
* under it).
|
||||||
@ -193,15 +200,71 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
|
|||||||
p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
|
p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
|
||||||
tracker->netdev_state[MLX5_LAG_P2].link_up;
|
tracker->netdev_state[MLX5_LAG_P2].link_up;
|
||||||
|
|
||||||
*port1 = 1;
|
*port1 = MLX5_LAG_EGRESS_PORT_1;
|
||||||
*port2 = 2;
|
*port2 = MLX5_LAG_EGRESS_PORT_2;
|
||||||
if ((!p1en && !p2en) || (p1en && p2en))
|
if ((!p1en && !p2en) || (p1en && p2en))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (p1en)
|
if (p1en)
|
||||||
*port2 = 1;
|
*port2 = MLX5_LAG_EGRESS_PORT_1;
|
||||||
else
|
else
|
||||||
*port1 = 2;
|
*port1 = MLX5_LAG_EGRESS_PORT_2;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
|
||||||
|
{
|
||||||
|
return ldev->pf[MLX5_LAG_P1].has_drop || ldev->pf[MLX5_LAG_P2].has_drop;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < MLX5_MAX_PORTS; i++) {
|
||||||
|
if (!ldev->pf[i].has_drop)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
|
||||||
|
MLX5_VPORT_UPLINK);
|
||||||
|
ldev->pf[i].has_drop = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
|
||||||
|
struct lag_tracker *tracker)
|
||||||
|
{
|
||||||
|
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
|
||||||
|
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
|
||||||
|
struct mlx5_core_dev *inactive;
|
||||||
|
u8 v2p_port1, v2p_port2;
|
||||||
|
int inactive_idx;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/* First delete the current drop rule so there won't be any dropped
|
||||||
|
* packets
|
||||||
|
*/
|
||||||
|
mlx5_lag_drop_rule_cleanup(ldev);
|
||||||
|
|
||||||
|
if (!ldev->tracker.has_inactive)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, &v2p_port2);
|
||||||
|
|
||||||
|
if (v2p_port1 == MLX5_LAG_EGRESS_PORT_1) {
|
||||||
|
inactive = dev1;
|
||||||
|
inactive_idx = MLX5_LAG_P2;
|
||||||
|
} else {
|
||||||
|
inactive = dev0;
|
||||||
|
inactive_idx = MLX5_LAG_P1;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mlx5_esw_acl_ingress_vport_drop_rule_create(inactive->priv.eswitch,
|
||||||
|
MLX5_VPORT_UPLINK);
|
||||||
|
if (!err)
|
||||||
|
ldev->pf[inactive_idx].has_drop = true;
|
||||||
|
else
|
||||||
|
mlx5_core_err(inactive,
|
||||||
|
"Failed to create lag drop rule, error: %d", err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
|
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
|
||||||
@ -238,6 +301,10 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
|
|||||||
ldev->v2p_map[MLX5_LAG_P1],
|
ldev->v2p_map[MLX5_LAG_P1],
|
||||||
ldev->v2p_map[MLX5_LAG_P2]);
|
ldev->v2p_map[MLX5_LAG_P2]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
|
||||||
|
!(ldev->flags & MLX5_LAG_FLAG_ROCE))
|
||||||
|
mlx5_lag_drop_rule_setup(ldev, tracker);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
|
static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
|
||||||
@ -339,6 +406,10 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
|
||||||
|
!roce_lag)
|
||||||
|
mlx5_lag_drop_rule_setup(ldev, tracker);
|
||||||
|
|
||||||
ldev->flags |= flags;
|
ldev->flags |= flags;
|
||||||
ldev->shared_fdb = shared_fdb;
|
ldev->shared_fdb = shared_fdb;
|
||||||
return 0;
|
return 0;
|
||||||
@ -347,6 +418,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
|
|||||||
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
|
static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
|
||||||
{
|
{
|
||||||
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
|
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
|
||||||
|
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
|
||||||
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
|
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
|
||||||
bool roce_lag = __mlx5_lag_is_roce(ldev);
|
bool roce_lag = __mlx5_lag_is_roce(ldev);
|
||||||
u8 flags = ldev->flags;
|
u8 flags = ldev->flags;
|
||||||
@ -356,8 +428,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
|
|||||||
mlx5_lag_mp_reset(ldev);
|
mlx5_lag_mp_reset(ldev);
|
||||||
|
|
||||||
if (ldev->shared_fdb) {
|
if (ldev->shared_fdb) {
|
||||||
mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch,
|
mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
|
||||||
ldev->pf[MLX5_LAG_P2].dev->priv.eswitch);
|
dev1->priv.eswitch);
|
||||||
ldev->shared_fdb = false;
|
ldev->shared_fdb = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -372,11 +444,15 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
|
|||||||
"Failed to deactivate VF LAG; driver restart required\n"
|
"Failed to deactivate VF LAG; driver restart required\n"
|
||||||
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
|
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
|
||||||
}
|
}
|
||||||
} else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
|
return err;
|
||||||
mlx5_lag_port_sel_destroy(ldev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
if (flags & MLX5_LAG_FLAG_HASH_BASED)
|
||||||
|
mlx5_lag_port_sel_destroy(ldev);
|
||||||
|
if (mlx5_lag_has_drop_rule(ldev))
|
||||||
|
mlx5_lag_drop_rule_cleanup(ldev);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
|
static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
|
||||||
@ -613,6 +689,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
|
|||||||
struct net_device *upper = info->upper_dev, *ndev_tmp;
|
struct net_device *upper = info->upper_dev, *ndev_tmp;
|
||||||
struct netdev_lag_upper_info *lag_upper_info = NULL;
|
struct netdev_lag_upper_info *lag_upper_info = NULL;
|
||||||
bool is_bonded, is_in_lag, mode_supported;
|
bool is_bonded, is_in_lag, mode_supported;
|
||||||
|
bool has_inactive = 0;
|
||||||
|
struct slave *slave;
|
||||||
int bond_status = 0;
|
int bond_status = 0;
|
||||||
int num_slaves = 0;
|
int num_slaves = 0;
|
||||||
int changed = 0;
|
int changed = 0;
|
||||||
@ -632,8 +710,12 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
|
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
|
||||||
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
|
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
|
||||||
if (idx >= 0)
|
if (idx >= 0) {
|
||||||
|
slave = bond_slave_get_rcu(ndev_tmp);
|
||||||
|
if (slave)
|
||||||
|
has_inactive |= bond_is_slave_inactive(slave);
|
||||||
bond_status |= (1 << idx);
|
bond_status |= (1 << idx);
|
||||||
|
}
|
||||||
|
|
||||||
num_slaves++;
|
num_slaves++;
|
||||||
}
|
}
|
||||||
@ -648,6 +730,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
|
|||||||
tracker->hash_type = lag_upper_info->hash_type;
|
tracker->hash_type = lag_upper_info->hash_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tracker->has_inactive = has_inactive;
|
||||||
/* Determine bonding status:
|
/* Determine bonding status:
|
||||||
* A device is considered bonded if both its physical ports are slaves
|
* A device is considered bonded if both its physical ports are slaves
|
||||||
* of the same lag master, and only them.
|
* of the same lag master, and only them.
|
||||||
@ -704,6 +787,38 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
|
||||||
|
struct lag_tracker *tracker,
|
||||||
|
struct net_device *ndev)
|
||||||
|
{
|
||||||
|
struct net_device *ndev_tmp;
|
||||||
|
struct slave *slave;
|
||||||
|
bool has_inactive = 0;
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
if (!netif_is_lag_master(ndev))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
|
||||||
|
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
|
||||||
|
if (idx < 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
slave = bond_slave_get_rcu(ndev_tmp);
|
||||||
|
if (slave)
|
||||||
|
has_inactive |= bond_is_slave_inactive(slave);
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
if (tracker->has_inactive == has_inactive)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
tracker->has_inactive = has_inactive;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static int mlx5_lag_netdev_event(struct notifier_block *this,
|
static int mlx5_lag_netdev_event(struct notifier_block *this,
|
||||||
unsigned long event, void *ptr)
|
unsigned long event, void *ptr)
|
||||||
{
|
{
|
||||||
@ -712,7 +827,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
|
|||||||
struct mlx5_lag *ldev;
|
struct mlx5_lag *ldev;
|
||||||
int changed = 0;
|
int changed = 0;
|
||||||
|
|
||||||
if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
|
if (event != NETDEV_CHANGEUPPER &&
|
||||||
|
event != NETDEV_CHANGELOWERSTATE &&
|
||||||
|
event != NETDEV_CHANGEINFODATA)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
ldev = container_of(this, struct mlx5_lag, nb);
|
ldev = container_of(this, struct mlx5_lag, nb);
|
||||||
@ -728,6 +845,9 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
|
|||||||
changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
|
changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
|
||||||
ndev, ptr);
|
ndev, ptr);
|
||||||
break;
|
break;
|
||||||
|
case NETDEV_CHANGEINFODATA:
|
||||||
|
changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ldev->tracker = tracker;
|
ldev->tracker = tracker;
|
||||||
|
@ -28,6 +28,7 @@ enum {
|
|||||||
struct lag_func {
|
struct lag_func {
|
||||||
struct mlx5_core_dev *dev;
|
struct mlx5_core_dev *dev;
|
||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
|
bool has_drop;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Used for collection of netdev event info. */
|
/* Used for collection of netdev event info. */
|
||||||
@ -35,6 +36,7 @@ struct lag_tracker {
|
|||||||
enum netdev_lag_tx_type tx_type;
|
enum netdev_lag_tx_type tx_type;
|
||||||
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
|
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
|
||||||
unsigned int is_bonded:1;
|
unsigned int is_bonded:1;
|
||||||
|
unsigned int has_inactive:1;
|
||||||
enum netdev_lag_hash hash_type;
|
enum netdev_lag_hash hash_type;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
|
|||||||
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
|
static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev,
|
||||||
enum mlx5_lag_port_affinity port)
|
enum mlx5_lag_port_affinity port)
|
||||||
{
|
{
|
||||||
struct lag_tracker tracker;
|
struct lag_tracker tracker = {};
|
||||||
|
|
||||||
if (!__mlx5_lag_is_multipath(ldev))
|
if (!__mlx5_lag_is_multipath(ldev))
|
||||||
return;
|
return;
|
||||||
|
@ -92,13 +92,6 @@ mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
|
|||||||
static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
|
static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
|
||||||
mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent,
|
|
||||||
void *buf, int len)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __LIB_HV_VHCA_H__ */
|
#endif /* __LIB_HV_VHCA_H__ */
|
||||||
|
@ -736,10 +736,9 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
|
|||||||
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
|
MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
|
||||||
err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
|
err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
|
||||||
if (err) {
|
if (err) {
|
||||||
u32 syndrome;
|
u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome);
|
||||||
u8 status;
|
u8 status = MLX5_GET(query_issi_out, query_out, status);
|
||||||
|
|
||||||
mlx5_cmd_mbox_status(query_out, &status, &syndrome);
|
|
||||||
if (!status || syndrome == MLX5_DRIVER_SYND) {
|
if (!status || syndrome == MLX5_DRIVER_SYND) {
|
||||||
mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
|
mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
|
||||||
err, status, syndrome);
|
err, status, syndrome);
|
||||||
|
@ -33,9 +33,10 @@
|
|||||||
#include <linux/mlx5/port.h>
|
#include <linux/mlx5/port.h>
|
||||||
#include "mlx5_core.h"
|
#include "mlx5_core.h"
|
||||||
|
|
||||||
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
|
/* calling with verbose false will not print error to log */
|
||||||
int size_in, void *data_out, int size_out,
|
int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
|
||||||
u16 reg_id, int arg, int write)
|
void *data_out, int size_out, u16 reg_id, int arg,
|
||||||
|
int write, bool verbose)
|
||||||
{
|
{
|
||||||
int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
|
int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
|
||||||
int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
|
int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
|
||||||
@ -57,7 +58,9 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
|
|||||||
MLX5_SET(access_register_in, in, argument, arg);
|
MLX5_SET(access_register_in, in, argument, arg);
|
||||||
MLX5_SET(access_register_in, in, register_id, reg_id);
|
MLX5_SET(access_register_in, in, register_id, reg_id);
|
||||||
|
|
||||||
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
|
err = mlx5_cmd_do(dev, in, inlen, out, outlen);
|
||||||
|
if (verbose)
|
||||||
|
err = mlx5_cmd_check(dev, err, in, out);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -69,6 +72,15 @@ out:
|
|||||||
kvfree(in);
|
kvfree(in);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx5_access_reg);
|
||||||
|
|
||||||
|
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
|
||||||
|
int size_in, void *data_out, int size_out,
|
||||||
|
u16 reg_id, int arg, int write)
|
||||||
|
{
|
||||||
|
return mlx5_access_reg(dev, data_in, size_in, data_out, size_out,
|
||||||
|
reg_id, arg, write, true);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
|
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
|
||||||
|
|
||||||
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
|
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
|
||||||
|
@ -183,6 +183,8 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
|
|||||||
complete(&cq->free);
|
complete(&cq->free);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||||
|
u32 *in, int inlen, u32 *out, int outlen);
|
||||||
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
|
||||||
u32 *in, int inlen, u32 *out, int outlen);
|
u32 *in, int inlen, u32 *out, int outlen);
|
||||||
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
|
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
|
||||||
|
@ -863,20 +863,10 @@ struct mlx5_hca_vport_context {
|
|||||||
bool grh_required;
|
bool grh_required;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
|
|
||||||
{
|
|
||||||
return buf->frags->buf + offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define STRUCT_FIELD(header, field) \
|
#define STRUCT_FIELD(header, field) \
|
||||||
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
|
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
|
||||||
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
|
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
|
||||||
|
|
||||||
static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
|
|
||||||
{
|
|
||||||
return pci_get_drvdata(pdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern struct dentry *mlx5_debugfs_root;
|
extern struct dentry *mlx5_debugfs_root;
|
||||||
|
|
||||||
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
|
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
|
||||||
@ -965,6 +955,7 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
|
|||||||
struct mlx5_async_work {
|
struct mlx5_async_work {
|
||||||
struct mlx5_async_ctx *ctx;
|
struct mlx5_async_ctx *ctx;
|
||||||
mlx5_async_cbk_t user_callback;
|
mlx5_async_cbk_t user_callback;
|
||||||
|
void *out; /* pointer to the cmd output buffer */
|
||||||
};
|
};
|
||||||
|
|
||||||
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
|
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
|
||||||
@ -973,7 +964,9 @@ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
|
|||||||
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
||||||
void *out, int out_size, mlx5_async_cbk_t callback,
|
void *out, int out_size, mlx5_async_cbk_t callback,
|
||||||
struct mlx5_async_work *work);
|
struct mlx5_async_work *work);
|
||||||
|
void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
|
||||||
|
int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
|
||||||
|
int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
|
||||||
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||||
int out_size);
|
int out_size);
|
||||||
|
|
||||||
@ -991,7 +984,6 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
|||||||
|
|
||||||
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||||
void *out, int out_size);
|
void *out, int out_size);
|
||||||
void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
|
|
||||||
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
|
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
|
||||||
|
|
||||||
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
|
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
|
||||||
@ -1039,6 +1031,9 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
|
|||||||
|
|
||||||
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
|
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
|
||||||
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
|
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||||
|
int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
|
||||||
|
void *data_out, int size_out, u16 reg_id, int arg,
|
||||||
|
int write, bool verbose);
|
||||||
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
|
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
|
||||||
int size_in, void *data_out, int size_out,
|
int size_in, void *data_out, int size_out,
|
||||||
u16 reg_num, int arg, int write);
|
u16 reg_num, int arg, int write);
|
||||||
|
@ -224,6 +224,7 @@ struct mlx5_flow_act {
|
|||||||
u32 flags;
|
u32 flags;
|
||||||
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
|
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
|
||||||
struct ib_counters *counters;
|
struct ib_counters *counters;
|
||||||
|
struct mlx5_flow_group *fg;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define MLX5_DECLARE_FLOW_ACT(name) \
|
#define MLX5_DECLARE_FLOW_ACT(name) \
|
||||||
|
@ -9687,7 +9687,8 @@ struct mlx5_ifc_pcam_reg_bits {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_ifc_mcam_enhanced_features_bits {
|
struct mlx5_ifc_mcam_enhanced_features_bits {
|
||||||
u8 reserved_at_0[0x6b];
|
u8 reserved_at_0[0x6a];
|
||||||
|
u8 reset_state[0x1];
|
||||||
u8 ptpcyc2realtime_modify[0x1];
|
u8 ptpcyc2realtime_modify[0x1];
|
||||||
u8 reserved_at_6c[0x2];
|
u8 reserved_at_6c[0x2];
|
||||||
u8 pci_status_and_power[0x1];
|
u8 pci_status_and_power[0x1];
|
||||||
@ -10368,6 +10369,14 @@ struct mlx5_ifc_mcda_reg_bits {
|
|||||||
u8 data[][0x20];
|
u8 data[][0x20];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
|
||||||
|
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
|
||||||
|
MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
|
||||||
|
MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3,
|
||||||
|
MLX5_MFRL_REG_RESET_STATE_NACK = 4,
|
||||||
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0),
|
MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0),
|
||||||
MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1),
|
MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1),
|
||||||
@ -10386,7 +10395,8 @@ struct mlx5_ifc_mfrl_reg_bits {
|
|||||||
u8 pci_sync_for_fw_update_start[0x1];
|
u8 pci_sync_for_fw_update_start[0x1];
|
||||||
u8 pci_sync_for_fw_update_resp[0x2];
|
u8 pci_sync_for_fw_update_resp[0x2];
|
||||||
u8 rst_type_sel[0x3];
|
u8 rst_type_sel[0x3];
|
||||||
u8 reserved_at_28[0x8];
|
u8 reserved_at_28[0x4];
|
||||||
|
u8 reset_state[0x4];
|
||||||
u8 reset_type[0x8];
|
u8 reset_type[0x8];
|
||||||
u8 reset_level[0x8];
|
u8 reset_level[0x8];
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user