@@ -823,7 +823,8 @@ static u8 srp_get_subnet_timeout(struct srp_host *host)
823
823
return subnet_timeout ;
824
824
}
825
825
826
- static int srp_send_req (struct srp_rdma_ch * ch , bool multich )
826
+ static int srp_send_req (struct srp_rdma_ch * ch , uint32_t max_iu_len ,
827
+ bool multich )
827
828
{
828
829
struct srp_target_port * target = ch -> target ;
829
830
struct {
@@ -852,7 +853,7 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
852
853
853
854
req -> ib_req .opcode = SRP_LOGIN_REQ ;
854
855
req -> ib_req .tag = 0 ;
855
- req -> ib_req .req_it_iu_len = cpu_to_be32 (target -> max_iu_len );
856
+ req -> ib_req .req_it_iu_len = cpu_to_be32 (max_iu_len );
856
857
req -> ib_req .req_buf_fmt = cpu_to_be16 (SRP_BUF_FORMAT_DIRECT |
857
858
SRP_BUF_FORMAT_INDIRECT );
858
859
req -> ib_req .req_flags = (multich ? SRP_MULTICHAN_MULTI :
@@ -1145,7 +1146,8 @@ static int srp_connected_ch(struct srp_target_port *target)
1145
1146
return c ;
1146
1147
}
1147
1148
1148
- static int srp_connect_ch (struct srp_rdma_ch * ch , bool multich )
1149
+ static int srp_connect_ch (struct srp_rdma_ch * ch , uint32_t max_iu_len ,
1150
+ bool multich )
1149
1151
{
1150
1152
struct srp_target_port * target = ch -> target ;
1151
1153
int ret ;
@@ -1158,7 +1160,7 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1158
1160
1159
1161
while (1 ) {
1160
1162
init_completion (& ch -> done );
1161
- ret = srp_send_req (ch , multich );
1163
+ ret = srp_send_req (ch , max_iu_len , multich );
1162
1164
if (ret )
1163
1165
goto out ;
1164
1166
ret = wait_for_completion_interruptible (& ch -> done );
@@ -1344,6 +1346,16 @@ static void srp_terminate_io(struct srp_rport *rport)
1344
1346
}
1345
1347
}
1346
1348
1349
+ /* Calculate maximum initiator to target information unit length. */
1350
+ static uint32_t srp_max_it_iu_len (int cmd_sg_cnt )
1351
+ {
1352
+ uint32_t max_iu_len = sizeof (struct srp_cmd ) + SRP_MAX_ADD_CDB_LEN +
1353
+ sizeof (struct srp_indirect_buf ) +
1354
+ cmd_sg_cnt * sizeof (struct srp_direct_buf );
1355
+
1356
+ return max_iu_len ;
1357
+ }
1358
+
1347
1359
/*
1348
1360
* It is up to the caller to ensure that srp_rport_reconnect() calls are
1349
1361
* serialized and that no concurrent srp_queuecommand(), srp_abort(),
@@ -1357,6 +1369,7 @@ static int srp_rport_reconnect(struct srp_rport *rport)
1357
1369
{
1358
1370
struct srp_target_port * target = rport -> lld_data ;
1359
1371
struct srp_rdma_ch * ch ;
1372
+ uint32_t max_iu_len = srp_max_it_iu_len (target -> cmd_sg_cnt );
1360
1373
int i , j , ret = 0 ;
1361
1374
bool multich = false;
1362
1375
@@ -1402,7 +1415,7 @@ static int srp_rport_reconnect(struct srp_rport *rport)
1402
1415
ch = & target -> ch [i ];
1403
1416
if (ret )
1404
1417
break ;
1405
- ret = srp_connect_ch (ch , multich );
1418
+ ret = srp_connect_ch (ch , max_iu_len , multich );
1406
1419
multich = true;
1407
1420
}
1408
1421
@@ -2316,7 +2329,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2316
2329
2317
2330
req = & ch -> req_ring [idx ];
2318
2331
dev = target -> srp_host -> srp_dev -> dev ;
2319
- ib_dma_sync_single_for_cpu (dev , iu -> dma , target -> max_iu_len ,
2332
+ ib_dma_sync_single_for_cpu (dev , iu -> dma , ch -> max_it_iu_len ,
2320
2333
DMA_TO_DEVICE );
2321
2334
2322
2335
scmnd -> host_scribble = (void * ) req ;
@@ -2353,7 +2366,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2353
2366
goto err_iu ;
2354
2367
}
2355
2368
2356
- ib_dma_sync_single_for_device (dev , iu -> dma , target -> max_iu_len ,
2369
+ ib_dma_sync_single_for_device (dev , iu -> dma , ch -> max_it_iu_len ,
2357
2370
DMA_TO_DEVICE );
2358
2371
2359
2372
if (srp_post_send (ch , iu , len )) {
@@ -2421,7 +2434,7 @@ static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2421
2434
2422
2435
for (i = 0 ; i < target -> queue_size ; ++ i ) {
2423
2436
ch -> tx_ring [i ] = srp_alloc_iu (target -> srp_host ,
2424
- target -> max_iu_len ,
2437
+ ch -> max_it_iu_len ,
2425
2438
GFP_KERNEL , DMA_TO_DEVICE );
2426
2439
if (!ch -> tx_ring [i ])
2427
2440
goto err ;
@@ -2487,6 +2500,9 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2487
2500
if (lrsp -> opcode == SRP_LOGIN_RSP ) {
2488
2501
ch -> max_ti_iu_len = be32_to_cpu (lrsp -> max_ti_iu_len );
2489
2502
ch -> req_lim = be32_to_cpu (lrsp -> req_lim_delta );
2503
+ ch -> max_it_iu_len = srp_max_it_iu_len (target -> cmd_sg_cnt );
2504
+ WARN_ON_ONCE (ch -> max_it_iu_len >
2505
+ be32_to_cpu (lrsp -> max_it_iu_len ));
2490
2506
2491
2507
/*
2492
2508
* Reserve credits for task management so we don't
@@ -3734,6 +3750,7 @@ static ssize_t srp_create_target(struct device *dev,
3734
3750
int ret , node_idx , node , cpu , i ;
3735
3751
unsigned int max_sectors_per_mr , mr_per_cmd = 0 ;
3736
3752
bool multich = false;
3753
+ uint32_t max_iu_len ;
3737
3754
3738
3755
target_host = scsi_host_alloc (& srp_template ,
3739
3756
sizeof (struct srp_target_port ));
@@ -3839,10 +3856,7 @@ static ssize_t srp_create_target(struct device *dev,
3839
3856
target -> mr_per_cmd = mr_per_cmd ;
3840
3857
target -> indirect_size = target -> sg_tablesize *
3841
3858
sizeof (struct srp_direct_buf );
3842
- target -> max_iu_len = sizeof (struct srp_cmd ) +
3843
- SRP_MAX_ADD_CDB_LEN +
3844
- sizeof (struct srp_indirect_buf ) +
3845
- target -> cmd_sg_cnt * sizeof (struct srp_direct_buf );
3859
+ max_iu_len = srp_max_it_iu_len (target -> cmd_sg_cnt );
3846
3860
3847
3861
INIT_WORK (& target -> tl_err_work , srp_tl_err_work );
3848
3862
INIT_WORK (& target -> remove_work , srp_remove_work );
@@ -3897,7 +3911,7 @@ static ssize_t srp_create_target(struct device *dev,
3897
3911
if (ret )
3898
3912
goto err_disconnect ;
3899
3913
3900
- ret = srp_connect_ch (ch , multich );
3914
+ ret = srp_connect_ch (ch , max_iu_len , multich );
3901
3915
if (ret ) {
3902
3916
char dst [64 ];
3903
3917
0 commit comments