Skip to content

Commit 841b07f

Browse files
monis410dledford
authored andcommitted
IB/mlx5: Block MR WR if UMR is not possible
Check conditions that are mandatory to post_send UMR WQEs. 1. Modifying page size. 2. Modifying remote atomic permissions if atomic access is required. If either condition is not fulfilled then fail to post_send() flow. Fixes: c8d75a9 ("IB/mlx5: Respect new UMR capabilities") Signed-off-by: Moni Shoua <[email protected]> Reviewed-by: Guy Levi <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Doug Ledford <[email protected]>
1 parent 25a4517 commit 841b07f

File tree

1 file changed

+19
-5
lines changed
  • drivers/infiniband/hw/mlx5

1 file changed

+19
-5
lines changed

drivers/infiniband/hw/mlx5/qp.c

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
41624162
MLX5_IB_UMR_OCTOWORD;
41634163
}
41644164

4165-
static __be64 frwr_mkey_mask(void)
4165+
static __be64 frwr_mkey_mask(bool atomic)
41664166
{
41674167
u64 result;
41684168

@@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
41754175
MLX5_MKEY_MASK_LW |
41764176
MLX5_MKEY_MASK_RR |
41774177
MLX5_MKEY_MASK_RW |
4178-
MLX5_MKEY_MASK_A |
41794178
MLX5_MKEY_MASK_SMALL_FENCE |
41804179
MLX5_MKEY_MASK_FREE;
41814180

4181+
if (atomic)
4182+
result |= MLX5_MKEY_MASK_A;
4183+
41824184
return cpu_to_be64(result);
41834185
}
41844186

@@ -4204,15 +4206,15 @@ static __be64 sig_mkey_mask(void)
42044206
}
42054207

42064208
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4207-
struct mlx5_ib_mr *mr, u8 flags)
4209+
struct mlx5_ib_mr *mr, u8 flags, bool atomic)
42084210
{
42094211
int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
42104212

42114213
memset(umr, 0, sizeof(*umr));
42124214

42134215
umr->flags = flags;
42144216
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4215-
umr->mkey_mask = frwr_mkey_mask();
4217+
umr->mkey_mask = frwr_mkey_mask(atomic);
42164218
}
42174219

42184220
static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
@@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
48114813
{
48124814
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
48134815
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4816+
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
48144817
int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
48154818
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4819+
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
48164820
u8 flags = 0;
48174821

4822+
if (!mlx5_ib_can_use_umr(dev, atomic)) {
4823+
mlx5_ib_warn(to_mdev(qp->ibqp.device),
4824+
"Fast update of %s for MR is disabled\n",
4825+
(MLX5_CAP_GEN(dev->mdev,
4826+
umr_modify_entity_size_disabled)) ?
4827+
"entity size" :
4828+
"atomic access");
4829+
return -EINVAL;
4830+
}
4831+
48184832
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
48194833
mlx5_ib_warn(to_mdev(qp->ibqp.device),
48204834
"Invalid IB_SEND_INLINE send flag\n");
@@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
48264840
if (umr_inline)
48274841
flags |= MLX5_UMR_INLINE;
48284842

4829-
set_reg_umr_seg(*seg, mr, flags);
4843+
set_reg_umr_seg(*seg, mr, flags, atomic);
48304844
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
48314845
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
48324846
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);

0 commit comments

Comments
 (0)