mirror of
https://github.com/torvalds/linux.git
synced 2024-12-14 23:25:54 +00:00
IB/mlx5: Disable atomic operations
Currently Atomic operations don't work properly. Disable them for the time being. Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
parent
2f6daec14d
commit
81bea28ffd
@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
props->max_srq_sge = max_rq_sg - 1;
|
||||
props->max_fast_reg_page_list_len = (unsigned int)-1;
|
||||
props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
|
||||
props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
|
||||
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
|
||||
props->masked_atomic_cap = IB_ATOMIC_HCA;
|
||||
props->atomic_cap = IB_ATOMIC_NONE;
|
||||
props->masked_atomic_cap = IB_ATOMIC_NONE;
|
||||
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
|
||||
props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
|
||||
props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
|
||||
|
@ -1661,29 +1661,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
|
||||
rseg->reserved = 0;
|
||||
}
|
||||
|
||||
static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
|
||||
{
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
} else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
|
||||
} else {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->compare = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
|
||||
struct ib_send_wr *wr)
|
||||
{
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
|
||||
aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
|
||||
}
|
||||
|
||||
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
|
||||
struct ib_send_wr *wr)
|
||||
{
|
||||
@ -2073,28 +2050,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
seg += sizeof(struct mlx5_wqe_raddr_seg);
|
||||
|
||||
set_atomic_seg(seg, wr);
|
||||
seg += sizeof(struct mlx5_wqe_atomic_seg);
|
||||
|
||||
size += (sizeof(struct mlx5_wqe_raddr_seg) +
|
||||
sizeof(struct mlx5_wqe_atomic_seg)) / 16;
|
||||
break;
|
||||
|
||||
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
|
||||
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
seg += sizeof(struct mlx5_wqe_raddr_seg);
|
||||
|
||||
set_masked_atomic_seg(seg, wr);
|
||||
seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
|
||||
|
||||
size += (sizeof(struct mlx5_wqe_raddr_seg) +
|
||||
sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
|
||||
break;
|
||||
mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
|
||||
err = -ENOSYS;
|
||||
*bad_wr = wr;
|
||||
goto out;
|
||||
|
||||
case IB_WR_LOCAL_INV:
|
||||
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
|
||||
|
Loading…
Reference in New Issue
Block a user