@@ -431,6 +431,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
431
431
struct srp_fr_desc * d ;
432
432
struct ib_mr * mr ;
433
433
int i , ret = - EINVAL ;
434
+ enum ib_mr_type mr_type ;
434
435
435
436
if (pool_size <= 0 )
436
437
goto err ;
@@ -444,9 +445,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
444
445
spin_lock_init (& pool -> lock );
445
446
INIT_LIST_HEAD (& pool -> free_list );
446
447
448
+ if (device -> attrs .device_cap_flags & IB_DEVICE_SG_GAPS_REG )
449
+ mr_type = IB_MR_TYPE_SG_GAPS ;
450
+ else
451
+ mr_type = IB_MR_TYPE_MEM_REG ;
452
+
447
453
for (i = 0 , d = & pool -> desc [0 ]; i < pool -> size ; i ++ , d ++ ) {
448
- mr = ib_alloc_mr (pd , IB_MR_TYPE_MEM_REG ,
449
- max_page_list_len );
454
+ mr = ib_alloc_mr (pd , mr_type , max_page_list_len );
450
455
if (IS_ERR (mr )) {
451
456
ret = PTR_ERR (mr );
452
457
if (ret == - ENOMEM )
@@ -2996,8 +3001,9 @@ static int srp_slave_alloc(struct scsi_device *sdev)
2996
3001
struct Scsi_Host * shost = sdev -> host ;
2997
3002
struct srp_target_port * target = host_to_target (shost );
2998
3003
struct srp_device * srp_dev = target -> srp_host -> srp_dev ;
3004
+ struct ib_device * ibdev = srp_dev -> dev ;
2999
3005
3000
- if (true )
3006
+ if (!( ibdev -> attrs . device_cap_flags & IB_DEVICE_SG_GAPS_REG ) )
3001
3007
blk_queue_virt_boundary (sdev -> request_queue ,
3002
3008
~srp_dev -> mr_page_mask );
3003
3009
@@ -3775,26 +3781,36 @@ static ssize_t srp_create_target(struct device *dev,
3775
3781
}
3776
3782
3777
3783
if (srp_dev -> use_fast_reg || srp_dev -> use_fmr ) {
3778
- /*
3779
- * FR and FMR can only map one HCA page per entry. If the
3780
- * start address is not aligned on a HCA page boundary two
3781
- * entries will be used for the head and the tail although
3782
- * these two entries combined contain at most one HCA page of
3783
- * data. Hence the "+ 1" in the calculation below.
3784
- *
3785
- * The indirect data buffer descriptor is contiguous so the
3786
- * memory for that buffer will only be registered if
3787
- * register_always is true. Hence add one to mr_per_cmd if
3788
- * register_always has been set.
3789
- */
3784
+ bool gaps_reg = (ibdev -> attrs .device_cap_flags &
3785
+ IB_DEVICE_SG_GAPS_REG );
3786
+
3790
3787
max_sectors_per_mr = srp_dev -> max_pages_per_mr <<
3791
3788
(ilog2 (srp_dev -> mr_page_size ) - 9 );
3792
- mr_per_cmd = register_always +
3793
- (target -> scsi_host -> max_sectors + 1 +
3794
- max_sectors_per_mr - 1 ) / max_sectors_per_mr ;
3789
+ if (!gaps_reg ) {
3790
+ /*
3791
+ * FR and FMR can only map one HCA page per entry. If
3792
+ * the start address is not aligned on a HCA page
3793
+ * boundary two entries will be used for the head and
3794
+ * the tail although these two entries combined
3795
+ * contain at most one HCA page of data. Hence the "+
3796
+ * 1" in the calculation below.
3797
+ *
3798
+ * The indirect data buffer descriptor is contiguous
3799
+ * so the memory for that buffer will only be
3800
+ * registered if register_always is true. Hence add
3801
+ * one to mr_per_cmd if register_always has been set.
3802
+ */
3803
+ mr_per_cmd = register_always +
3804
+ (target -> scsi_host -> max_sectors + 1 +
3805
+ max_sectors_per_mr - 1 ) / max_sectors_per_mr ;
3806
+ } else {
3807
+ mr_per_cmd = register_always +
3808
+ (target -> sg_tablesize +
3809
+ srp_dev -> max_pages_per_mr - 1 ) /
3810
+ srp_dev -> max_pages_per_mr ;
3811
+ }
3795
3812
pr_debug ("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n" ,
3796
- target -> scsi_host -> max_sectors ,
3797
- srp_dev -> max_pages_per_mr , srp_dev -> mr_page_size ,
3813
+ target -> scsi_host -> max_sectors , srp_dev -> max_pages_per_mr , srp_dev -> mr_page_size ,
3798
3814
max_sectors_per_mr , mr_per_cmd );
3799
3815
}
3800
3816
0 commit comments