19
19
RDMA_RW_SINGLE_WR ,
20
20
RDMA_RW_MULTI_WR ,
21
21
RDMA_RW_MR ,
22
+ RDMA_RW_SIG_MR ,
22
23
};
23
24
24
25
static bool rdma_rw_force_mr ;
@@ -325,6 +326,146 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
325
326
}
326
327
EXPORT_SYMBOL (rdma_rw_ctx_init );
327
328
329
+ /**
330
+ * rdma_rw_ctx_signature init - initialize a RW context with signature offload
331
+ * @ctx: context to initialize
332
+ * @qp: queue pair to operate on
333
+ * @port_num: port num to which the connection is bound
334
+ * @sg: scatterlist to READ/WRITE from/to
335
+ * @sg_cnt: number of entries in @sg
336
+ * @prot_sg: scatterlist to READ/WRITE protection information from/to
337
+ * @prot_sg_cnt: number of entries in @prot_sg
338
+ * @sig_attrs: signature offloading algorithms
339
+ * @remote_addr:remote address to read/write (relative to @rkey)
340
+ * @rkey: remote key to operate on
341
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
342
+ *
343
+ * Returns the number of WQEs that will be needed on the workqueue if
344
+ * successful, or a negative error code.
345
+ */
346
+ int rdma_rw_ctx_signature_init (struct rdma_rw_ctx * ctx , struct ib_qp * qp ,
347
+ u8 port_num , struct scatterlist * sg , u32 sg_cnt ,
348
+ struct scatterlist * prot_sg , u32 prot_sg_cnt ,
349
+ struct ib_sig_attrs * sig_attrs ,
350
+ u64 remote_addr , u32 rkey , enum dma_data_direction dir )
351
+ {
352
+ struct ib_device * dev = qp -> pd -> device ;
353
+ u32 pages_per_mr = rdma_rw_fr_page_list_len (qp -> pd -> device );
354
+ struct ib_rdma_wr * rdma_wr ;
355
+ struct ib_send_wr * prev_wr = NULL ;
356
+ int count = 0 , ret ;
357
+
358
+ if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr ) {
359
+ pr_err ("SG count too large\n" );
360
+ return - EINVAL ;
361
+ }
362
+
363
+ ret = ib_dma_map_sg (dev , sg , sg_cnt , dir );
364
+ if (!ret )
365
+ return - ENOMEM ;
366
+ sg_cnt = ret ;
367
+
368
+ ret = ib_dma_map_sg (dev , prot_sg , prot_sg_cnt , dir );
369
+ if (!ret ) {
370
+ ret = - ENOMEM ;
371
+ goto out_unmap_sg ;
372
+ }
373
+ prot_sg_cnt = ret ;
374
+
375
+ ctx -> type = RDMA_RW_SIG_MR ;
376
+ ctx -> nr_ops = 1 ;
377
+ ctx -> sig = kcalloc (1 , sizeof (* ctx -> sig ), GFP_KERNEL );
378
+ if (!ctx -> sig ) {
379
+ ret = - ENOMEM ;
380
+ goto out_unmap_prot_sg ;
381
+ }
382
+
383
+ ret = rdma_rw_init_one_mr (qp , port_num , & ctx -> sig -> data , sg , sg_cnt , 0 );
384
+ if (ret < 0 )
385
+ goto out_free_ctx ;
386
+ count += ret ;
387
+ prev_wr = & ctx -> sig -> data .reg_wr .wr ;
388
+
389
+ if (prot_sg_cnt ) {
390
+ ret = rdma_rw_init_one_mr (qp , port_num , & ctx -> sig -> prot ,
391
+ prot_sg , prot_sg_cnt , 0 );
392
+ if (ret < 0 )
393
+ goto out_destroy_data_mr ;
394
+ count += ret ;
395
+
396
+ if (ctx -> sig -> prot .inv_wr .next )
397
+ prev_wr -> next = & ctx -> sig -> prot .inv_wr ;
398
+ else
399
+ prev_wr -> next = & ctx -> sig -> prot .reg_wr .wr ;
400
+ prev_wr = & ctx -> sig -> prot .reg_wr .wr ;
401
+ } else {
402
+ ctx -> sig -> prot .mr = NULL ;
403
+ }
404
+
405
+ ctx -> sig -> sig_mr = ib_mr_pool_get (qp , & qp -> sig_mrs );
406
+ if (!ctx -> sig -> sig_mr ) {
407
+ ret = - EAGAIN ;
408
+ goto out_destroy_prot_mr ;
409
+ }
410
+
411
+ if (ctx -> sig -> sig_mr -> need_inval ) {
412
+ memset (& ctx -> sig -> sig_inv_wr , 0 , sizeof (ctx -> sig -> sig_inv_wr ));
413
+
414
+ ctx -> sig -> sig_inv_wr .opcode = IB_WR_LOCAL_INV ;
415
+ ctx -> sig -> sig_inv_wr .ex .invalidate_rkey = ctx -> sig -> sig_mr -> rkey ;
416
+
417
+ prev_wr -> next = & ctx -> sig -> sig_inv_wr ;
418
+ prev_wr = & ctx -> sig -> sig_inv_wr ;
419
+ }
420
+
421
+ ctx -> sig -> sig_wr .wr .opcode = IB_WR_REG_SIG_MR ;
422
+ ctx -> sig -> sig_wr .wr .wr_cqe = NULL ;
423
+ ctx -> sig -> sig_wr .wr .sg_list = & ctx -> sig -> data .sge ;
424
+ ctx -> sig -> sig_wr .wr .num_sge = 1 ;
425
+ ctx -> sig -> sig_wr .access_flags = IB_ACCESS_LOCAL_WRITE ;
426
+ ctx -> sig -> sig_wr .sig_attrs = sig_attrs ;
427
+ ctx -> sig -> sig_wr .sig_mr = ctx -> sig -> sig_mr ;
428
+ if (prot_sg_cnt )
429
+ ctx -> sig -> sig_wr .prot = & ctx -> sig -> prot .sge ;
430
+ prev_wr -> next = & ctx -> sig -> sig_wr .wr ;
431
+ prev_wr = & ctx -> sig -> sig_wr .wr ;
432
+ count ++ ;
433
+
434
+ ctx -> sig -> sig_sge .addr = 0 ;
435
+ ctx -> sig -> sig_sge .length = ctx -> sig -> data .sge .length ;
436
+ if (sig_attrs -> wire .sig_type != IB_SIG_TYPE_NONE )
437
+ ctx -> sig -> sig_sge .length += ctx -> sig -> prot .sge .length ;
438
+
439
+ rdma_wr = & ctx -> sig -> data .wr ;
440
+ rdma_wr -> wr .sg_list = & ctx -> sig -> sig_sge ;
441
+ rdma_wr -> wr .num_sge = 1 ;
442
+ rdma_wr -> remote_addr = remote_addr ;
443
+ rdma_wr -> rkey = rkey ;
444
+ if (dir == DMA_TO_DEVICE )
445
+ rdma_wr -> wr .opcode = IB_WR_RDMA_WRITE ;
446
+ else
447
+ rdma_wr -> wr .opcode = IB_WR_RDMA_READ ;
448
+ prev_wr -> next = & rdma_wr -> wr ;
449
+ prev_wr = & rdma_wr -> wr ;
450
+ count ++ ;
451
+
452
+ return count ;
453
+
454
+ out_destroy_prot_mr :
455
+ if (prot_sg_cnt )
456
+ ib_mr_pool_put (qp , & qp -> rdma_mrs , ctx -> sig -> prot .mr );
457
+ out_destroy_data_mr :
458
+ ib_mr_pool_put (qp , & qp -> rdma_mrs , ctx -> sig -> data .mr );
459
+ out_free_ctx :
460
+ kfree (ctx -> sig );
461
+ out_unmap_prot_sg :
462
+ ib_dma_unmap_sg (dev , prot_sg , prot_sg_cnt , dir );
463
+ out_unmap_sg :
464
+ ib_dma_unmap_sg (dev , sg , sg_cnt , dir );
465
+ return ret ;
466
+ }
467
+ EXPORT_SYMBOL (rdma_rw_ctx_signature_init );
468
+
328
469
/*
329
470
* Now that we are going to post the WRs we can update the lkey and need_inval
330
471
* state on the MRs. If we were doing this at init time, we would get double
@@ -360,6 +501,22 @@ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
360
501
int i ;
361
502
362
503
switch (ctx -> type ) {
504
+ case RDMA_RW_SIG_MR :
505
+ rdma_rw_update_lkey (& ctx -> sig -> data , true);
506
+ if (ctx -> sig -> prot .mr )
507
+ rdma_rw_update_lkey (& ctx -> sig -> prot , true);
508
+
509
+ ctx -> sig -> sig_mr -> need_inval = true;
510
+ ib_update_fast_reg_key (ctx -> sig -> sig_mr ,
511
+ ib_inc_rkey (ctx -> sig -> sig_mr -> lkey ));
512
+ ctx -> sig -> sig_sge .lkey = ctx -> sig -> sig_mr -> lkey ;
513
+
514
+ if (ctx -> sig -> data .inv_wr .next )
515
+ first_wr = & ctx -> sig -> data .inv_wr ;
516
+ else
517
+ first_wr = & ctx -> sig -> data .reg_wr .wr ;
518
+ last_wr = & ctx -> sig -> data .wr .wr ;
519
+ break ;
363
520
case RDMA_RW_MR :
364
521
for (i = 0 ; i < ctx -> nr_ops ; i ++ ) {
365
522
rdma_rw_update_lkey (& ctx -> reg [i ],
@@ -455,6 +612,39 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
455
612
}
456
613
EXPORT_SYMBOL (rdma_rw_ctx_destroy );
457
614
615
+ /**
616
+ * rdma_rw_ctx_destroy_signature - release all resources allocated by
617
+ * rdma_rw_ctx_init_signature
618
+ * @ctx: context to release
619
+ * @qp: queue pair to operate on
620
+ * @port_num: port num to which the connection is bound
621
+ * @sg: scatterlist that was used for the READ/WRITE
622
+ * @sg_cnt: number of entries in @sg
623
+ * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
624
+ * @prot_sg_cnt: number of entries in @prot_sg
625
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
626
+ */
627
+ void rdma_rw_ctx_destroy_signature (struct rdma_rw_ctx * ctx , struct ib_qp * qp ,
628
+ u8 port_num , struct scatterlist * sg , u32 sg_cnt ,
629
+ struct scatterlist * prot_sg , u32 prot_sg_cnt ,
630
+ enum dma_data_direction dir )
631
+ {
632
+ if (WARN_ON_ONCE (ctx -> type != RDMA_RW_SIG_MR ))
633
+ return ;
634
+
635
+ ib_mr_pool_put (qp , & qp -> rdma_mrs , ctx -> sig -> data .mr );
636
+ ib_dma_unmap_sg (qp -> pd -> device , sg , sg_cnt , dir );
637
+
638
+ if (ctx -> sig -> prot .mr ) {
639
+ ib_mr_pool_put (qp , & qp -> rdma_mrs , ctx -> sig -> prot .mr );
640
+ ib_dma_unmap_sg (qp -> pd -> device , prot_sg , prot_sg_cnt , dir );
641
+ }
642
+
643
+ ib_mr_pool_put (qp , & qp -> sig_mrs , ctx -> sig -> sig_mr );
644
+ kfree (ctx -> sig );
645
+ }
646
+ EXPORT_SYMBOL (rdma_rw_ctx_destroy_signature );
647
+
458
648
void rdma_rw_init_qp (struct ib_device * dev , struct ib_qp_init_attr * attr )
459
649
{
460
650
u32 factor ;
@@ -474,7 +664,9 @@ void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
474
664
* we'll need two additional MRs for the registrations and the
475
665
* invalidation.
476
666
*/
477
- if (rdma_rw_can_use_mr (dev , attr -> port_num ))
667
+ if (attr -> create_flags & IB_QP_CREATE_SIGNATURE_EN )
668
+ factor += 6 ; /* (inv + reg) * (data + prot + sig) */
669
+ else if (rdma_rw_can_use_mr (dev , attr -> port_num ))
478
670
factor += 2 ; /* inv + reg */
479
671
480
672
attr -> cap .max_send_wr += factor * attr -> cap .max_rdma_ctxs ;
@@ -490,20 +682,46 @@ void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
490
682
int rdma_rw_init_mrs (struct ib_qp * qp , struct ib_qp_init_attr * attr )
491
683
{
492
684
struct ib_device * dev = qp -> pd -> device ;
685
+ u32 nr_mrs = 0 , nr_sig_mrs = 0 ;
493
686
int ret = 0 ;
494
687
495
- if (rdma_rw_can_use_mr (dev , attr -> port_num )) {
496
- ret = ib_mr_pool_init (qp , & qp -> rdma_mrs ,
497
- attr -> cap .max_rdma_ctxs , IB_MR_TYPE_MEM_REG ,
688
+ if (attr -> create_flags & IB_QP_CREATE_SIGNATURE_EN ) {
689
+ nr_sig_mrs = attr -> cap .max_rdma_ctxs ;
690
+ nr_mrs = attr -> cap .max_rdma_ctxs * 2 ;
691
+ } else if (rdma_rw_can_use_mr (dev , attr -> port_num )) {
692
+ nr_mrs = attr -> cap .max_rdma_ctxs ;
693
+ }
694
+
695
+ if (nr_mrs ) {
696
+ ret = ib_mr_pool_init (qp , & qp -> rdma_mrs , nr_mrs ,
697
+ IB_MR_TYPE_MEM_REG ,
498
698
rdma_rw_fr_page_list_len (dev ));
499
- if (ret )
699
+ if (ret ) {
700
+ pr_err ("%s: failed to allocated %d MRs\n" ,
701
+ __func__ , nr_mrs );
500
702
return ret ;
703
+ }
704
+ }
705
+
706
+ if (nr_sig_mrs ) {
707
+ ret = ib_mr_pool_init (qp , & qp -> sig_mrs , nr_sig_mrs ,
708
+ IB_MR_TYPE_SIGNATURE , 2 );
709
+ if (ret ) {
710
+ pr_err ("%s: failed to allocated %d SIG MRs\n" ,
711
+ __func__ , nr_mrs );
712
+ goto out_free_rdma_mrs ;
713
+ }
501
714
}
502
715
716
+ return 0 ;
717
+
718
+ out_free_rdma_mrs :
719
+ ib_mr_pool_destroy (qp , & qp -> rdma_mrs );
503
720
return ret ;
504
721
}
505
722
506
723
void rdma_rw_cleanup_mrs (struct ib_qp * qp )
507
724
{
725
+ ib_mr_pool_destroy (qp , & qp -> sig_mrs );
508
726
ib_mr_pool_destroy (qp , & qp -> rdma_mrs );
509
727
}
0 commit comments