Skip to content

Commit 81bea28

Browse files
Eli Cohenrolandd
authored andcommitted
IB/mlx5: Disable atomic operations
Currently Atomic operations don't work properly. Disable them for the time being. Signed-off-by: Eli Cohen <[email protected]> Signed-off-by: Roland Dreier <[email protected]>
1 parent 2f6daec commit 81bea28

File tree

2 files changed

+6
-47
lines changed

2 files changed

+6
-47
lines changed

drivers/infiniband/hw/mlx5/main.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
301301
props->max_srq_sge = max_rq_sg - 1;
302302
props->max_fast_reg_page_list_len = (unsigned int)-1;
303303
props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
304-
props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
305-
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
306-
props->masked_atomic_cap = IB_ATOMIC_HCA;
304+
props->atomic_cap = IB_ATOMIC_NONE;
305+
props->masked_atomic_cap = IB_ATOMIC_NONE;
307306
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
308307
props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
309308
props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 4 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1661,29 +1661,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
16611661
rseg->reserved = 0;
16621662
}
16631663

1664-
static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
1665-
{
1666-
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1667-
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1668-
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1669-
} else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1670-
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1671-
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1672-
} else {
1673-
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1674-
aseg->compare = 0;
1675-
}
1676-
}
1677-
1678-
static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
1679-
struct ib_send_wr *wr)
1680-
{
1681-
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1682-
aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1683-
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1684-
aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1685-
}
1686-
16871664
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
16881665
struct ib_send_wr *wr)
16891666
{
@@ -2073,28 +2050,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
20732050

20742051
case IB_WR_ATOMIC_CMP_AND_SWP:
20752052
case IB_WR_ATOMIC_FETCH_AND_ADD:
2076-
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2077-
wr->wr.atomic.rkey);
2078-
seg += sizeof(struct mlx5_wqe_raddr_seg);
2079-
2080-
set_atomic_seg(seg, wr);
2081-
seg += sizeof(struct mlx5_wqe_atomic_seg);
2082-
2083-
size += (sizeof(struct mlx5_wqe_raddr_seg) +
2084-
sizeof(struct mlx5_wqe_atomic_seg)) / 16;
2085-
break;
2086-
20872053
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2088-
set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2089-
wr->wr.atomic.rkey);
2090-
seg += sizeof(struct mlx5_wqe_raddr_seg);
2091-
2092-
set_masked_atomic_seg(seg, wr);
2093-
seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
2094-
2095-
size += (sizeof(struct mlx5_wqe_raddr_seg) +
2096-
sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
2097-
break;
2054+
mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2055+
err = -ENOSYS;
2056+
*bad_wr = wr;
2057+
goto out;
20982058

20992059
case IB_WR_LOCAL_INV:
21002060
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;

0 commit comments

Comments
 (0)