Skip to content

Commit a74d241

Browse files
raindel-mellanoxrolandd
authored andcommitted
IB/mlx5: Refactor UMR to have its own context struct
Instead of having the UMR context part of each memory region, allocate a struct on the stack. This allows queuing multiple UMRs that access the same memory region. Signed-off-by: Shachar Raindel <[email protected]> Signed-off-by: Haggai Eran <[email protected]> Signed-off-by: Roland Dreier <[email protected]>
1 parent 48fea83 commit a74d241

File tree

2 files changed

+31
-22
lines changed

2 files changed

+31
-22
lines changed

drivers/infiniband/hw/mlx5/mlx5_ib.h

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -264,8 +264,6 @@ struct mlx5_ib_mr {
264264
__be64 *pas;
265265
dma_addr_t dma;
266266
int npages;
267-
struct completion done;
268-
enum ib_wc_status status;
269267
struct mlx5_ib_dev *dev;
270268
struct mlx5_create_mkey_mbox_out out;
271269
struct mlx5_core_sig_ctx *sig;
@@ -277,6 +275,17 @@ struct mlx5_ib_fast_reg_page_list {
277275
dma_addr_t map;
278276
};
279277

278+
struct mlx5_ib_umr_context {
279+
enum ib_wc_status status;
280+
struct completion done;
281+
};
282+
283+
static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
284+
{
285+
context->status = -1;
286+
init_completion(&context->done);
287+
}
288+
280289
struct umr_common {
281290
struct ib_pd *pd;
282291
struct ib_cq *cq;

drivers/infiniband/hw/mlx5/mr.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -708,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
708708

709709
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
710710
{
711-
struct mlx5_ib_mr *mr;
711+
struct mlx5_ib_umr_context *context;
712712
struct ib_wc wc;
713713
int err;
714714

@@ -721,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
721721
if (err == 0)
722722
break;
723723

724-
mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id;
725-
mr->status = wc.status;
726-
complete(&mr->done);
724+
context = (struct mlx5_ib_umr_context *)wc.wr_id;
725+
context->status = wc.status;
726+
complete(&context->done);
727727
}
728728
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
729729
}
@@ -735,6 +735,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
735735
struct mlx5_ib_dev *dev = to_mdev(pd->device);
736736
struct device *ddev = dev->ib_dev.dma_device;
737737
struct umr_common *umrc = &dev->umrc;
738+
struct mlx5_ib_umr_context umr_context;
738739
struct ib_send_wr wr, *bad;
739740
struct mlx5_ib_mr *mr;
740741
struct ib_sge sg;
@@ -774,24 +775,21 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
774775
}
775776

776777
memset(&wr, 0, sizeof(wr));
777-
wr.wr_id = (u64)(unsigned long)mr;
778+
wr.wr_id = (u64)(unsigned long)&umr_context;
778779
prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
779780

780-
/* We serialize polls so one process does not kidnap another's
781-
* completion. This is not a problem since wr is completed in
782-
* around 1 usec
783-
*/
781+
mlx5_ib_init_umr_context(&umr_context);
784782
down(&umrc->sem);
785-
init_completion(&mr->done);
786783
err = ib_post_send(umrc->qp, &wr, &bad);
787784
if (err) {
788785
mlx5_ib_warn(dev, "post send failed, err %d\n", err);
789786
goto unmap_dma;
790-
}
791-
wait_for_completion(&mr->done);
792-
if (mr->status != IB_WC_SUCCESS) {
793-
mlx5_ib_warn(dev, "reg umr failed\n");
794-
err = -EFAULT;
787+
} else {
788+
wait_for_completion(&umr_context.done);
789+
if (umr_context.status != IB_WC_SUCCESS) {
790+
mlx5_ib_warn(dev, "reg umr failed\n");
791+
err = -EFAULT;
792+
}
795793
}
796794

797795
mr->mmr.iova = virt_addr;
@@ -940,24 +938,26 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
940938
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
941939
{
942940
struct umr_common *umrc = &dev->umrc;
941+
struct mlx5_ib_umr_context umr_context;
943942
struct ib_send_wr wr, *bad;
944943
int err;
945944

946945
memset(&wr, 0, sizeof(wr));
947-
wr.wr_id = (u64)(unsigned long)mr;
946+
wr.wr_id = (u64)(unsigned long)&umr_context;
948947
prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
949948

949+
mlx5_ib_init_umr_context(&umr_context);
950950
down(&umrc->sem);
951-
init_completion(&mr->done);
952951
err = ib_post_send(umrc->qp, &wr, &bad);
953952
if (err) {
954953
up(&umrc->sem);
955954
mlx5_ib_dbg(dev, "err %d\n", err);
956955
goto error;
956+
} else {
957+
wait_for_completion(&umr_context.done);
958+
up(&umrc->sem);
957959
}
958-
wait_for_completion(&mr->done);
959-
up(&umrc->sem);
960-
if (mr->status != IB_WC_SUCCESS) {
960+
if (umr_context.status != IB_WC_SUCCESS) {
961961
mlx5_ib_warn(dev, "unreg umr failed\n");
962962
err = -EFAULT;
963963
goto error;

0 commit comments

Comments
 (0)