@@ -708,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
708
708
709
709
void mlx5_umr_cq_handler (struct ib_cq * cq , void * cq_context )
710
710
{
711
- struct mlx5_ib_mr * mr ;
711
+ struct mlx5_ib_umr_context * context ;
712
712
struct ib_wc wc ;
713
713
int err ;
714
714
@@ -721,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
721
721
if (err == 0 )
722
722
break ;
723
723
724
- mr = (struct mlx5_ib_mr * )( unsigned long )wc .wr_id ;
725
- mr -> status = wc .status ;
726
- complete (& mr -> done );
724
+ context = (struct mlx5_ib_umr_context * )wc .wr_id ;
725
+ context -> status = wc .status ;
726
+ complete (& context -> done );
727
727
}
728
728
ib_req_notify_cq (cq , IB_CQ_NEXT_COMP );
729
729
}
@@ -735,6 +735,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
735
735
struct mlx5_ib_dev * dev = to_mdev (pd -> device );
736
736
struct device * ddev = dev -> ib_dev .dma_device ;
737
737
struct umr_common * umrc = & dev -> umrc ;
738
+ struct mlx5_ib_umr_context umr_context ;
738
739
struct ib_send_wr wr , * bad ;
739
740
struct mlx5_ib_mr * mr ;
740
741
struct ib_sge sg ;
@@ -774,24 +775,21 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
774
775
}
775
776
776
777
memset (& wr , 0 , sizeof (wr ));
777
- wr .wr_id = (u64 )(unsigned long )mr ;
778
+ wr .wr_id = (u64 )(unsigned long )& umr_context ;
778
779
prep_umr_reg_wqe (pd , & wr , & sg , mr -> dma , npages , mr -> mmr .key , page_shift , virt_addr , len , access_flags );
779
780
780
- /* We serialize polls so one process does not kidnap another's
781
- * completion. This is not a problem since wr is completed in
782
- * around 1 usec
783
- */
781
+ mlx5_ib_init_umr_context (& umr_context );
784
782
down (& umrc -> sem );
785
- init_completion (& mr -> done );
786
783
err = ib_post_send (umrc -> qp , & wr , & bad );
787
784
if (err ) {
788
785
mlx5_ib_warn (dev , "post send failed, err %d\n" , err );
789
786
goto unmap_dma ;
790
- }
791
- wait_for_completion (& mr -> done );
792
- if (mr -> status != IB_WC_SUCCESS ) {
793
- mlx5_ib_warn (dev , "reg umr failed\n" );
794
- err = - EFAULT ;
787
+ } else {
788
+ wait_for_completion (& umr_context .done );
789
+ if (umr_context .status != IB_WC_SUCCESS ) {
790
+ mlx5_ib_warn (dev , "reg umr failed\n" );
791
+ err = - EFAULT ;
792
+ }
795
793
}
796
794
797
795
mr -> mmr .iova = virt_addr ;
@@ -940,24 +938,26 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
940
938
static int unreg_umr (struct mlx5_ib_dev * dev , struct mlx5_ib_mr * mr )
941
939
{
942
940
struct umr_common * umrc = & dev -> umrc ;
941
+ struct mlx5_ib_umr_context umr_context ;
943
942
struct ib_send_wr wr , * bad ;
944
943
int err ;
945
944
946
945
memset (& wr , 0 , sizeof (wr ));
947
- wr .wr_id = (u64 )(unsigned long )mr ;
946
+ wr .wr_id = (u64 )(unsigned long )& umr_context ;
948
947
prep_umr_unreg_wqe (dev , & wr , mr -> mmr .key );
949
948
949
+ mlx5_ib_init_umr_context (& umr_context );
950
950
down (& umrc -> sem );
951
- init_completion (& mr -> done );
952
951
err = ib_post_send (umrc -> qp , & wr , & bad );
953
952
if (err ) {
954
953
up (& umrc -> sem );
955
954
mlx5_ib_dbg (dev , "err %d\n" , err );
956
955
goto error ;
956
+ } else {
957
+ wait_for_completion (& umr_context .done );
958
+ up (& umrc -> sem );
957
959
}
958
- wait_for_completion (& mr -> done );
959
- up (& umrc -> sem );
960
- if (mr -> status != IB_WC_SUCCESS ) {
960
+ if (umr_context .status != IB_WC_SUCCESS ) {
961
961
mlx5_ib_warn (dev , "unreg umr failed\n" );
962
962
err = - EFAULT ;
963
963
goto error ;
0 commit comments