@@ -128,6 +128,10 @@ frwr_op_release_mr(struct rpcrdma_mw *r)
128
128
{
129
129
int rc ;
130
130
131
+ /* Ensure MW is not on any rl_registered list */
132
+ if (!list_empty (& r -> mw_list ))
133
+ list_del (& r -> mw_list );
134
+
131
135
rc = ib_dereg_mr (r -> frmr .fr_mr );
132
136
if (rc )
133
137
pr_err ("rpcrdma: final ib_dereg_mr for %p returned %i\n" ,
@@ -333,10 +337,9 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
333
337
*/
334
338
static int
335
339
frwr_op_map (struct rpcrdma_xprt * r_xprt , struct rpcrdma_mr_seg * seg ,
336
- int nsegs , bool writing )
340
+ int nsegs , bool writing , struct rpcrdma_mw * * out )
337
341
{
338
342
struct rpcrdma_ia * ia = & r_xprt -> rx_ia ;
339
- struct rpcrdma_mr_seg * seg1 = seg ;
340
343
struct rpcrdma_mw * mw ;
341
344
struct rpcrdma_frmr * frmr ;
342
345
struct ib_mr * mr ;
@@ -345,8 +348,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
345
348
int rc , i , n , dma_nents ;
346
349
u8 key ;
347
350
348
- mw = seg1 -> rl_mw ;
349
- seg1 -> rl_mw = NULL ;
351
+ mw = NULL ;
350
352
do {
351
353
if (mw )
352
354
rpcrdma_defer_mr_recovery (mw );
@@ -416,12 +418,11 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
416
418
if (rc )
417
419
goto out_senderr ;
418
420
419
- seg1 -> rl_mw = mw ;
420
- seg1 -> mr_rkey = mr -> rkey ;
421
- seg1 -> mr_base = mr -> iova ;
422
- seg1 -> mr_nsegs = mw -> mw_nents ;
423
- seg1 -> mr_len = mr -> length ;
421
+ mw -> mw_handle = mr -> rkey ;
422
+ mw -> mw_length = mr -> length ;
423
+ mw -> mw_offset = mr -> iova ;
424
424
425
+ * out = mw ;
425
426
return mw -> mw_nents ;
426
427
427
428
out_dmamap_err :
@@ -443,9 +444,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
443
444
}
444
445
445
446
static struct ib_send_wr *
446
- __frwr_prepare_linv_wr (struct rpcrdma_mr_seg * seg )
447
+ __frwr_prepare_linv_wr (struct rpcrdma_mw * mw )
447
448
{
448
- struct rpcrdma_mw * mw = seg -> rl_mw ;
449
449
struct rpcrdma_frmr * f = & mw -> frmr ;
450
450
struct ib_send_wr * invalidate_wr ;
451
451
@@ -465,16 +465,16 @@ __frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
465
465
*
466
466
* Sleeps until it is safe for the host CPU to access the
467
467
* previously mapped memory regions.
468
+ *
469
+ * Caller ensures that req->rl_registered is not empty.
468
470
*/
469
471
static void
470
472
frwr_op_unmap_sync (struct rpcrdma_xprt * r_xprt , struct rpcrdma_req * req )
471
473
{
472
474
struct ib_send_wr * invalidate_wrs , * pos , * prev , * bad_wr ;
473
475
struct rpcrdma_ia * ia = & r_xprt -> rx_ia ;
474
- struct rpcrdma_mr_seg * seg ;
475
- unsigned int i , nchunks ;
476
+ struct rpcrdma_mw * mw , * tmp ;
476
477
struct rpcrdma_frmr * f ;
477
- struct rpcrdma_mw * mw ;
478
478
int rc ;
479
479
480
480
dprintk ("RPC: %s: req %p\n" , __func__ , req );
@@ -484,22 +484,18 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
484
484
* Chain the LOCAL_INV Work Requests and post them with
485
485
* a single ib_post_send() call.
486
486
*/
487
+ f = NULL ;
487
488
invalidate_wrs = pos = prev = NULL ;
488
- seg = NULL ;
489
- for (i = 0 , nchunks = req -> rl_nchunks ; nchunks ; nchunks -- ) {
490
- seg = & req -> rl_segments [i ];
491
-
492
- pos = __frwr_prepare_linv_wr (seg );
489
+ list_for_each_entry (mw , & req -> rl_registered , mw_list ) {
490
+ pos = __frwr_prepare_linv_wr (mw );
493
491
494
492
if (!invalidate_wrs )
495
493
invalidate_wrs = pos ;
496
494
else
497
495
prev -> next = pos ;
498
496
prev = pos ;
499
-
500
- i += seg -> mr_nsegs ;
497
+ f = & mw -> frmr ;
501
498
}
502
- f = & seg -> rl_mw -> frmr ;
503
499
504
500
/* Strong send queue ordering guarantees that when the
505
501
* last WR in the chain completes, all WRs in the chain
@@ -524,20 +520,12 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
524
520
* them to the free MW list.
525
521
*/
526
522
unmap :
527
- for (i = 0 , nchunks = req -> rl_nchunks ; nchunks ; nchunks -- ) {
528
- seg = & req -> rl_segments [i ];
529
- mw = seg -> rl_mw ;
530
- seg -> rl_mw = NULL ;
531
-
523
+ list_for_each_entry_safe (mw , tmp , & req -> rl_registered , mw_list ) {
524
+ list_del_init (& mw -> mw_list );
532
525
ib_dma_unmap_sg (ia -> ri_device ,
533
526
mw -> mw_sg , mw -> mw_nents , mw -> mw_dir );
534
527
rpcrdma_put_mw (r_xprt , mw );
535
-
536
- i += seg -> mr_nsegs ;
537
- seg -> mr_nsegs = 0 ;
538
528
}
539
-
540
- req -> rl_nchunks = 0 ;
541
529
return ;
542
530
543
531
reset_mrs :
@@ -547,17 +535,12 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
547
535
/* Find and reset the MRs in the LOCAL_INV WRs that did not
548
536
* get posted. This is synchronous, and slow.
549
537
*/
550
- for (i = 0 , nchunks = req -> rl_nchunks ; nchunks ; nchunks -- ) {
551
- seg = & req -> rl_segments [i ];
552
- mw = seg -> rl_mw ;
538
+ list_for_each_entry (mw , & req -> rl_registered , mw_list ) {
553
539
f = & mw -> frmr ;
554
-
555
540
if (mw -> frmr .fr_mr -> rkey == bad_wr -> ex .invalidate_rkey ) {
556
541
__frwr_reset_mr (ia , mw );
557
542
bad_wr = bad_wr -> next ;
558
543
}
559
-
560
- i += seg -> mr_nsegs ;
561
544
}
562
545
goto unmap ;
563
546
}
@@ -569,22 +552,17 @@ static void
569
552
frwr_op_unmap_safe (struct rpcrdma_xprt * r_xprt , struct rpcrdma_req * req ,
570
553
bool sync )
571
554
{
572
- struct rpcrdma_mr_seg * seg ;
573
555
struct rpcrdma_mw * mw ;
574
- unsigned int i ;
575
556
576
- for (i = 0 ; req -> rl_nchunks ; req -> rl_nchunks -- ) {
577
- seg = & req -> rl_segments [i ];
578
- mw = seg -> rl_mw ;
557
+ while (!list_empty (& req -> rl_registered )) {
558
+ mw = list_first_entry (& req -> rl_registered ,
559
+ struct rpcrdma_mw , mw_list );
560
+ list_del_init (& mw -> mw_list );
579
561
580
562
if (sync )
581
563
frwr_op_recover_mr (mw );
582
564
else
583
565
rpcrdma_defer_mr_recovery (mw );
584
-
585
- i += seg -> mr_nsegs ;
586
- seg -> mr_nsegs = 0 ;
587
- seg -> rl_mw = NULL ;
588
566
}
589
567
}
590
568
0 commit comments