@@ -836,26 +836,20 @@ static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
836
836
return umem ;
837
837
}
838
838
839
- void mlx5_umr_cq_handler (struct ib_cq * cq , void * cq_context )
839
+ static void mlx5_ib_umr_done (struct ib_cq * cq , struct ib_wc * wc )
840
840
{
841
- struct mlx5_ib_umr_context * context ;
842
- struct ib_wc wc ;
843
- int err ;
841
+ struct mlx5_ib_umr_context * context =
842
+ container_of (wc -> wr_cqe , struct mlx5_ib_umr_context , cqe );
844
843
845
- while (1 ) {
846
- err = ib_poll_cq (cq , 1 , & wc );
847
- if (err < 0 ) {
848
- pr_warn ("poll cq error %d\n" , err );
849
- return ;
850
- }
851
- if (err == 0 )
852
- break ;
844
+ context -> status = wc -> status ;
845
+ complete (& context -> done );
846
+ }
853
847
854
- context = (struct mlx5_ib_umr_context * ) ( unsigned long ) wc . wr_id ;
855
- context -> status = wc . status ;
856
- complete ( & context -> done ) ;
857
- }
858
- ib_req_notify_cq ( cq , IB_CQ_NEXT_COMP );
848
+ static inline void mlx5_ib_init_umr_context (struct mlx5_ib_umr_context * context )
849
+ {
850
+ context -> cqe . done = mlx5_ib_umr_done ;
851
+ context -> status = -1 ;
852
+ init_completion ( & context -> done );
859
853
}
860
854
861
855
static struct mlx5_ib_mr * reg_umr (struct ib_pd * pd , struct ib_umem * umem ,
@@ -896,12 +890,13 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
896
890
if (err )
897
891
goto free_mr ;
898
892
893
+ mlx5_ib_init_umr_context (& umr_context );
894
+
899
895
memset (& umrwr , 0 , sizeof (umrwr ));
900
- umrwr .wr .wr_id = ( u64 )( unsigned long ) & umr_context ;
896
+ umrwr .wr .wr_cqe = & umr_context . cqe ;
901
897
prep_umr_reg_wqe (pd , & umrwr .wr , & sg , dma , npages , mr -> mmkey .key ,
902
898
page_shift , virt_addr , len , access_flags );
903
899
904
- mlx5_ib_init_umr_context (& umr_context );
905
900
down (& umrc -> sem );
906
901
err = ib_post_send (umrc -> qp , & umrwr .wr , & bad );
907
902
if (err ) {
@@ -1013,8 +1008,10 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
1013
1008
1014
1009
dma_sync_single_for_device (ddev , dma , size , DMA_TO_DEVICE );
1015
1010
1011
+ mlx5_ib_init_umr_context (& umr_context );
1012
+
1016
1013
memset (& wr , 0 , sizeof (wr ));
1017
- wr .wr .wr_id = ( u64 )( unsigned long ) & umr_context ;
1014
+ wr .wr .wr_cqe = & umr_context . cqe ;
1018
1015
1019
1016
sg .addr = dma ;
1020
1017
sg .length = ALIGN (npages * sizeof (u64 ),
@@ -1031,7 +1028,6 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
1031
1028
wr .mkey = mr -> mmkey .key ;
1032
1029
wr .target .offset = start_page_index ;
1033
1030
1034
- mlx5_ib_init_umr_context (& umr_context );
1035
1031
down (& umrc -> sem );
1036
1032
err = ib_post_send (umrc -> qp , & wr .wr , & bad );
1037
1033
if (err ) {
@@ -1204,11 +1200,12 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1204
1200
struct ib_send_wr * bad ;
1205
1201
int err ;
1206
1202
1203
+ mlx5_ib_init_umr_context (& umr_context );
1204
+
1207
1205
memset (& umrwr .wr , 0 , sizeof (umrwr ));
1208
- umrwr .wr .wr_id = ( u64 )( unsigned long ) & umr_context ;
1206
+ umrwr .wr .wr_cqe = & umr_context . cqe ;
1209
1207
prep_umr_unreg_wqe (dev , & umrwr .wr , mr -> mmkey .key );
1210
1208
1211
- mlx5_ib_init_umr_context (& umr_context );
1212
1209
down (& umrc -> sem );
1213
1210
err = ib_post_send (umrc -> qp , & umrwr .wr , & bad );
1214
1211
if (err ) {
@@ -1246,7 +1243,9 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1246
1243
int size ;
1247
1244
int err ;
1248
1245
1249
- umrwr .wr .wr_id = (u64 )(unsigned long )& umr_context ;
1246
+ mlx5_ib_init_umr_context (& umr_context );
1247
+
1248
+ umrwr .wr .wr_cqe = & umr_context .cqe ;
1250
1249
umrwr .wr .send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE ;
1251
1250
1252
1251
if (flags & IB_MR_REREG_TRANS ) {
@@ -1273,8 +1272,6 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1273
1272
umrwr .wr .send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS ;
1274
1273
}
1275
1274
1276
- mlx5_ib_init_umr_context (& umr_context );
1277
-
1278
1275
/* post send request to UMR QP */
1279
1276
down (& umrc -> sem );
1280
1277
err = ib_post_send (umrc -> qp , & umrwr .wr , & bad );
0 commit comments