@@ -1392,19 +1392,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1392
1392
}
1393
1393
}
1394
1394
1395
+ static int
1396
+ isert_map_data_buf (struct isert_conn * isert_conn , struct isert_cmd * isert_cmd ,
1397
+ struct scatterlist * sg , u32 nents , u32 length , u32 offset ,
1398
+ enum iser_ib_op_code op , struct isert_data_buf * data )
1399
+ {
1400
+ struct ib_device * ib_dev = isert_conn -> conn_cm_id -> device ;
1401
+
1402
+ data -> dma_dir = op == ISER_IB_RDMA_WRITE ?
1403
+ DMA_TO_DEVICE : DMA_FROM_DEVICE ;
1404
+
1405
+ data -> len = length - offset ;
1406
+ data -> offset = offset ;
1407
+ data -> sg_off = data -> offset / PAGE_SIZE ;
1408
+
1409
+ data -> sg = & sg [data -> sg_off ];
1410
+ data -> nents = min_t (unsigned int , nents - data -> sg_off ,
1411
+ ISCSI_ISER_SG_TABLESIZE );
1412
+ data -> len = min_t (unsigned int , data -> len , ISCSI_ISER_SG_TABLESIZE *
1413
+ PAGE_SIZE );
1414
+
1415
+ data -> dma_nents = ib_dma_map_sg (ib_dev , data -> sg , data -> nents ,
1416
+ data -> dma_dir );
1417
+ if (unlikely (!data -> dma_nents )) {
1418
+ pr_err ("Cmd: unable to dma map SGs %p\n" , sg );
1419
+ return - EINVAL ;
1420
+ }
1421
+
1422
+ pr_debug ("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n" ,
1423
+ isert_cmd , data -> dma_nents , data -> sg , data -> nents , data -> len );
1424
+
1425
+ return 0 ;
1426
+ }
1427
+
1428
+ static void
1429
+ isert_unmap_data_buf (struct isert_conn * isert_conn , struct isert_data_buf * data )
1430
+ {
1431
+ struct ib_device * ib_dev = isert_conn -> conn_cm_id -> device ;
1432
+
1433
+ ib_dma_unmap_sg (ib_dev , data -> sg , data -> nents , data -> dma_dir );
1434
+ memset (data , 0 , sizeof (* data ));
1435
+ }
1436
+
1437
+
1438
+
1395
1439
static void
1396
1440
isert_unmap_cmd (struct isert_cmd * isert_cmd , struct isert_conn * isert_conn )
1397
1441
{
1398
1442
struct isert_rdma_wr * wr = & isert_cmd -> rdma_wr ;
1399
- struct ib_device * ib_dev = isert_conn -> conn_cm_id -> device ;
1400
1443
1401
1444
pr_debug ("isert_unmap_cmd: %p\n" , isert_cmd );
1402
- if (wr -> sge ) {
1445
+
1446
+ if (wr -> data .sg ) {
1403
1447
pr_debug ("isert_unmap_cmd: %p unmap_sg op\n" , isert_cmd );
1404
- ib_dma_unmap_sg (ib_dev , wr -> sge , wr -> num_sge ,
1405
- (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) ?
1406
- DMA_TO_DEVICE : DMA_FROM_DEVICE );
1407
- wr -> sge = NULL ;
1448
+ isert_unmap_data_buf (isert_conn , & wr -> data );
1408
1449
}
1409
1450
1410
1451
if (wr -> send_wr ) {
@@ -1424,7 +1465,6 @@ static void
1424
1465
isert_unreg_rdma (struct isert_cmd * isert_cmd , struct isert_conn * isert_conn )
1425
1466
{
1426
1467
struct isert_rdma_wr * wr = & isert_cmd -> rdma_wr ;
1427
- struct ib_device * ib_dev = isert_conn -> conn_cm_id -> device ;
1428
1468
LIST_HEAD (unmap_list );
1429
1469
1430
1470
pr_debug ("unreg_fastreg_cmd: %p\n" , isert_cmd );
@@ -1438,12 +1478,9 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1438
1478
wr -> fr_desc = NULL ;
1439
1479
}
1440
1480
1441
- if (wr -> sge ) {
1481
+ if (wr -> data . sg ) {
1442
1482
pr_debug ("unreg_fastreg_cmd: %p unmap_sg op\n" , isert_cmd );
1443
- ib_dma_unmap_sg (ib_dev , wr -> sge , wr -> num_sge ,
1444
- (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) ?
1445
- DMA_TO_DEVICE : DMA_FROM_DEVICE );
1446
- wr -> sge = NULL ;
1483
+ isert_unmap_data_buf (isert_conn , & wr -> data );
1447
1484
}
1448
1485
1449
1486
wr -> ib_sge = NULL ;
@@ -1548,7 +1585,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1548
1585
1549
1586
iscsit_stop_dataout_timer (cmd );
1550
1587
device -> unreg_rdma_mem (isert_cmd , isert_conn );
1551
- cmd -> write_data_done = wr -> cur_rdma_length ;
1588
+ cmd -> write_data_done = wr -> data . len ;
1552
1589
wr -> send_wr_num = 0 ;
1553
1590
1554
1591
pr_debug ("Cmd: %p RDMA_READ comp calling execute_cmd\n" , isert_cmd );
@@ -2099,54 +2136,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2099
2136
struct se_cmd * se_cmd = & cmd -> se_cmd ;
2100
2137
struct isert_cmd * isert_cmd = iscsit_priv_cmd (cmd );
2101
2138
struct isert_conn * isert_conn = (struct isert_conn * )conn -> context ;
2102
- struct ib_device * ib_dev = isert_conn -> conn_cm_id -> device ;
2139
+ struct isert_data_buf * data = & wr -> data ;
2103
2140
struct ib_send_wr * send_wr ;
2104
2141
struct ib_sge * ib_sge ;
2105
- struct scatterlist * sg_start ;
2106
- u32 sg_off = 0 , sg_nents ;
2107
- u32 offset = 0 , data_len , data_left , rdma_write_max , va_offset = 0 ;
2108
- int ret = 0 , count , i , ib_sge_cnt ;
2142
+ u32 offset , data_len , data_left , rdma_write_max , va_offset = 0 ;
2143
+ int ret = 0 , i , ib_sge_cnt ;
2109
2144
2110
- if (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) {
2111
- data_left = se_cmd -> data_length ;
2112
- } else {
2113
- sg_off = cmd -> write_data_done / PAGE_SIZE ;
2114
- data_left = se_cmd -> data_length - cmd -> write_data_done ;
2115
- offset = cmd -> write_data_done ;
2116
- isert_cmd -> tx_desc .isert_cmd = isert_cmd ;
2117
- }
2145
+ isert_cmd -> tx_desc .isert_cmd = isert_cmd ;
2118
2146
2119
- sg_start = & cmd -> se_cmd .t_data_sg [sg_off ];
2120
- sg_nents = se_cmd -> t_data_nents - sg_off ;
2147
+ offset = wr -> iser_ib_op == ISER_IB_RDMA_READ ? cmd -> write_data_done : 0 ;
2148
+ ret = isert_map_data_buf (isert_conn , isert_cmd , se_cmd -> t_data_sg ,
2149
+ se_cmd -> t_data_nents , se_cmd -> data_length ,
2150
+ offset , wr -> iser_ib_op , & wr -> data );
2151
+ if (ret )
2152
+ return ret ;
2121
2153
2122
- count = ib_dma_map_sg (ib_dev , sg_start , sg_nents ,
2123
- (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) ?
2124
- DMA_TO_DEVICE : DMA_FROM_DEVICE );
2125
- if (unlikely (!count )) {
2126
- pr_err ("Cmd: %p unrable to map SGs\n" , isert_cmd );
2127
- return - EINVAL ;
2128
- }
2129
- wr -> sge = sg_start ;
2130
- wr -> num_sge = sg_nents ;
2131
- wr -> cur_rdma_length = data_left ;
2132
- pr_debug ("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n" ,
2133
- isert_cmd , count , sg_start , sg_nents , data_left );
2154
+ data_left = data -> len ;
2155
+ offset = data -> offset ;
2134
2156
2135
- ib_sge = kzalloc (sizeof (struct ib_sge ) * sg_nents , GFP_KERNEL );
2157
+ ib_sge = kzalloc (sizeof (struct ib_sge ) * data -> nents , GFP_KERNEL );
2136
2158
if (!ib_sge ) {
2137
2159
pr_warn ("Unable to allocate ib_sge\n" );
2138
2160
ret = - ENOMEM ;
2139
- goto unmap_sg ;
2161
+ goto unmap_cmd ;
2140
2162
}
2141
2163
wr -> ib_sge = ib_sge ;
2142
2164
2143
- wr -> send_wr_num = DIV_ROUND_UP (sg_nents , isert_conn -> max_sge );
2165
+ wr -> send_wr_num = DIV_ROUND_UP (data -> nents , isert_conn -> max_sge );
2144
2166
wr -> send_wr = kzalloc (sizeof (struct ib_send_wr ) * wr -> send_wr_num ,
2145
2167
GFP_KERNEL );
2146
2168
if (!wr -> send_wr ) {
2147
2169
pr_debug ("Unable to allocate wr->send_wr\n" );
2148
2170
ret = - ENOMEM ;
2149
- goto unmap_sg ;
2171
+ goto unmap_cmd ;
2150
2172
}
2151
2173
2152
2174
wr -> isert_cmd = isert_cmd ;
@@ -2185,10 +2207,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2185
2207
}
2186
2208
2187
2209
return 0 ;
2188
- unmap_sg :
2189
- ib_dma_unmap_sg (ib_dev , sg_start , sg_nents ,
2190
- (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) ?
2191
- DMA_TO_DEVICE : DMA_FROM_DEVICE );
2210
+ unmap_cmd :
2211
+ isert_unmap_data_buf (isert_conn , data );
2212
+
2192
2213
return ret ;
2193
2214
}
2194
2215
@@ -2232,10 +2253,10 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2232
2253
}
2233
2254
2234
2255
static int
2235
- isert_fast_reg_mr (struct fast_reg_descriptor * fr_desc ,
2236
- struct isert_conn * isert_conn , struct scatterlist * sg_start ,
2237
- struct ib_sge * ib_sge , u32 sg_nents , u32 offset ,
2238
- unsigned int data_len )
2256
+ isert_fast_reg_mr (struct isert_conn * isert_conn ,
2257
+ struct fast_reg_descriptor * fr_desc ,
2258
+ struct isert_data_buf * mem ,
2259
+ struct ib_sge * sge )
2239
2260
{
2240
2261
struct ib_device * ib_dev = isert_conn -> conn_cm_id -> device ;
2241
2262
struct ib_send_wr fr_wr , inv_wr ;
@@ -2244,13 +2265,19 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2244
2265
u32 page_off ;
2245
2266
u8 key ;
2246
2267
2247
- sg_nents = min_t (unsigned int , sg_nents , ISCSI_ISER_SG_TABLESIZE );
2248
- page_off = offset % PAGE_SIZE ;
2268
+ if (mem -> dma_nents == 1 ) {
2269
+ sge -> lkey = isert_conn -> conn_mr -> lkey ;
2270
+ sge -> addr = ib_sg_dma_address (ib_dev , & mem -> sg [0 ]);
2271
+ sge -> length = ib_sg_dma_len (ib_dev , & mem -> sg [0 ]);
2272
+ return 0 ;
2273
+ }
2274
+
2275
+ page_off = mem -> offset % PAGE_SIZE ;
2249
2276
2250
2277
pr_debug ("Use fr_desc %p sg_nents %d offset %u\n" ,
2251
- fr_desc , sg_nents , offset );
2278
+ fr_desc , mem -> nents , mem -> offset );
2252
2279
2253
- pagelist_len = isert_map_fr_pagelist (ib_dev , sg_start , sg_nents ,
2280
+ pagelist_len = isert_map_fr_pagelist (ib_dev , mem -> sg , mem -> nents ,
2254
2281
& fr_desc -> data_frpl -> page_list [0 ]);
2255
2282
2256
2283
if (!fr_desc -> valid ) {
@@ -2273,7 +2300,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2273
2300
fr_wr .wr .fast_reg .page_list = fr_desc -> data_frpl ;
2274
2301
fr_wr .wr .fast_reg .page_list_len = pagelist_len ;
2275
2302
fr_wr .wr .fast_reg .page_shift = PAGE_SHIFT ;
2276
- fr_wr .wr .fast_reg .length = data_len ;
2303
+ fr_wr .wr .fast_reg .length = mem -> len ;
2277
2304
fr_wr .wr .fast_reg .rkey = fr_desc -> data_mr -> rkey ;
2278
2305
fr_wr .wr .fast_reg .access_flags = IB_ACCESS_LOCAL_WRITE ;
2279
2306
@@ -2289,12 +2316,12 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
2289
2316
}
2290
2317
fr_desc -> valid = false;
2291
2318
2292
- ib_sge -> lkey = fr_desc -> data_mr -> lkey ;
2293
- ib_sge -> addr = fr_desc -> data_frpl -> page_list [0 ] + page_off ;
2294
- ib_sge -> length = data_len ;
2319
+ sge -> lkey = fr_desc -> data_mr -> lkey ;
2320
+ sge -> addr = fr_desc -> data_frpl -> page_list [0 ] + page_off ;
2321
+ sge -> length = mem -> len ;
2295
2322
2296
2323
pr_debug ("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n" ,
2297
- ib_sge -> addr , ib_sge -> length , ib_sge -> lkey );
2324
+ sge -> addr , sge -> length , sge -> lkey );
2298
2325
2299
2326
return ret ;
2300
2327
}
@@ -2305,54 +2332,43 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2305
2332
{
2306
2333
struct se_cmd * se_cmd = & cmd -> se_cmd ;
2307
2334
struct isert_cmd * isert_cmd = iscsit_priv_cmd (cmd );
2308
- struct isert_conn * isert_conn = (struct isert_conn * )conn -> context ;
2309
- struct ib_device * ib_dev = isert_conn -> conn_cm_id -> device ;
2335
+ struct isert_conn * isert_conn = conn -> context ;
2310
2336
struct ib_send_wr * send_wr ;
2311
- struct ib_sge * ib_sge ;
2312
- struct scatterlist * sg_start ;
2313
- struct fast_reg_descriptor * fr_desc ;
2314
- u32 sg_off = 0 , sg_nents ;
2315
- u32 offset = 0 , data_len , data_left , rdma_write_max ;
2316
- int ret = 0 , count ;
2337
+ struct fast_reg_descriptor * fr_desc = NULL ;
2338
+ u32 offset ;
2339
+ int ret = 0 ;
2317
2340
unsigned long flags ;
2318
2341
2319
- if (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) {
2320
- data_left = se_cmd -> data_length ;
2321
- } else {
2322
- offset = cmd -> write_data_done ;
2323
- sg_off = offset / PAGE_SIZE ;
2324
- data_left = se_cmd -> data_length - cmd -> write_data_done ;
2325
- isert_cmd -> tx_desc .isert_cmd = isert_cmd ;
2326
- }
2342
+ isert_cmd -> tx_desc .isert_cmd = isert_cmd ;
2327
2343
2328
- sg_start = & cmd -> se_cmd .t_data_sg [sg_off ];
2329
- sg_nents = se_cmd -> t_data_nents - sg_off ;
2344
+ offset = wr -> iser_ib_op == ISER_IB_RDMA_READ ? cmd -> write_data_done : 0 ;
2345
+ ret = isert_map_data_buf (isert_conn , isert_cmd , se_cmd -> t_data_sg ,
2346
+ se_cmd -> t_data_nents , se_cmd -> data_length ,
2347
+ offset , wr -> iser_ib_op , & wr -> data );
2348
+ if (ret )
2349
+ return ret ;
2330
2350
2331
- count = ib_dma_map_sg (ib_dev , sg_start , sg_nents ,
2332
- (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) ?
2333
- DMA_TO_DEVICE : DMA_FROM_DEVICE );
2334
- if (unlikely (!count )) {
2335
- pr_err ("Cmd: %p unrable to map SGs\n" , isert_cmd );
2336
- return - EINVAL ;
2351
+ if (wr -> data .dma_nents != 1 ) {
2352
+ spin_lock_irqsave (& isert_conn -> conn_lock , flags );
2353
+ fr_desc = list_first_entry (& isert_conn -> conn_fr_pool ,
2354
+ struct fast_reg_descriptor , list );
2355
+ list_del (& fr_desc -> list );
2356
+ spin_unlock_irqrestore (& isert_conn -> conn_lock , flags );
2357
+ wr -> fr_desc = fr_desc ;
2337
2358
}
2338
- wr -> sge = sg_start ;
2339
- wr -> num_sge = sg_nents ;
2340
- pr_debug ("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n" ,
2341
- isert_cmd , count , sg_start , sg_nents , data_left );
2342
2359
2343
- memset ( & wr -> s_ib_sge , 0 , sizeof ( * ib_sge ) );
2344
- ib_sge = & wr -> s_ib_sge ;
2345
- wr -> ib_sge = ib_sge ;
2360
+ ret = isert_fast_reg_mr ( isert_conn , fr_desc , & wr -> data , & wr -> s_ib_sge );
2361
+ if ( ret )
2362
+ goto unmap_cmd ;
2346
2363
2364
+ wr -> ib_sge = & wr -> s_ib_sge ;
2347
2365
wr -> send_wr_num = 1 ;
2348
2366
memset (& wr -> s_send_wr , 0 , sizeof (* send_wr ));
2349
2367
wr -> send_wr = & wr -> s_send_wr ;
2350
-
2351
2368
wr -> isert_cmd = isert_cmd ;
2352
- rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE ;
2353
2369
2354
2370
send_wr = & isert_cmd -> rdma_wr .s_send_wr ;
2355
- send_wr -> sg_list = ib_sge ;
2371
+ send_wr -> sg_list = & wr -> s_ib_sge ;
2356
2372
send_wr -> num_sge = 1 ;
2357
2373
send_wr -> wr_id = (unsigned long )& isert_cmd -> tx_desc ;
2358
2374
if (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) {
@@ -2368,37 +2384,15 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2368
2384
send_wr -> send_flags = IB_SEND_SIGNALED ;
2369
2385
}
2370
2386
2371
- data_len = min (data_left , rdma_write_max );
2372
- wr -> cur_rdma_length = data_len ;
2373
-
2374
- /* if there is a single dma entry, dma mr is sufficient */
2375
- if (count == 1 ) {
2376
- ib_sge -> addr = ib_sg_dma_address (ib_dev , & sg_start [0 ]);
2377
- ib_sge -> length = ib_sg_dma_len (ib_dev , & sg_start [0 ]);
2378
- ib_sge -> lkey = isert_conn -> conn_mr -> lkey ;
2379
- wr -> fr_desc = NULL ;
2380
- } else {
2387
+ return 0 ;
2388
+ unmap_cmd :
2389
+ if (fr_desc ) {
2381
2390
spin_lock_irqsave (& isert_conn -> conn_lock , flags );
2382
- fr_desc = list_first_entry (& isert_conn -> conn_fr_pool ,
2383
- struct fast_reg_descriptor , list );
2384
- list_del (& fr_desc -> list );
2391
+ list_add_tail (& fr_desc -> list , & isert_conn -> conn_fr_pool );
2385
2392
spin_unlock_irqrestore (& isert_conn -> conn_lock , flags );
2386
- wr -> fr_desc = fr_desc ;
2387
-
2388
- ret = isert_fast_reg_mr (fr_desc , isert_conn , sg_start ,
2389
- ib_sge , sg_nents , offset , data_len );
2390
- if (ret ) {
2391
- list_add_tail (& fr_desc -> list , & isert_conn -> conn_fr_pool );
2392
- goto unmap_sg ;
2393
- }
2394
2393
}
2394
+ isert_unmap_data_buf (isert_conn , & wr -> data );
2395
2395
2396
- return 0 ;
2397
-
2398
- unmap_sg :
2399
- ib_dma_unmap_sg (ib_dev , sg_start , sg_nents ,
2400
- (wr -> iser_ib_op == ISER_IB_RDMA_WRITE ) ?
2401
- DMA_TO_DEVICE : DMA_FROM_DEVICE );
2402
2396
return ret ;
2403
2397
}
2404
2398
0 commit comments