@@ -177,14 +177,12 @@ struct ntb_transport_qp {
177
177
u64 rx_err_ver ;
178
178
u64 rx_memcpy ;
179
179
u64 rx_async ;
180
- u64 dma_rx_prep_err ;
181
180
u64 tx_bytes ;
182
181
u64 tx_pkts ;
183
182
u64 tx_ring_full ;
184
183
u64 tx_err_no_buf ;
185
184
u64 tx_memcpy ;
186
185
u64 tx_async ;
187
- u64 dma_tx_prep_err ;
188
186
};
189
187
190
188
struct ntb_transport_mw {
@@ -254,8 +252,6 @@ enum {
254
252
#define QP_TO_MW (nt , qp ) ((qp) % nt->mw_count)
255
253
#define NTB_QP_DEF_NUM_ENTRIES 100
256
254
#define NTB_LINK_DOWN_TIMEOUT 10
257
- #define DMA_RETRIES 20
258
- #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
259
255
260
256
static void ntb_transport_rxc_db (unsigned long data );
261
257
static const struct ntb_ctx_ops ntb_transport_ops ;
@@ -516,12 +512,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
516
512
out_offset += snprintf (buf + out_offset , out_count - out_offset ,
517
513
"free tx - \t%u\n" ,
518
514
ntb_transport_tx_free_entry (qp ));
519
- out_offset += snprintf (buf + out_offset , out_count - out_offset ,
520
- "DMA tx prep err - \t%llu\n" ,
521
- qp -> dma_tx_prep_err );
522
- out_offset += snprintf (buf + out_offset , out_count - out_offset ,
523
- "DMA rx prep err - \t%llu\n" ,
524
- qp -> dma_rx_prep_err );
525
515
526
516
out_offset += snprintf (buf + out_offset , out_count - out_offset ,
527
517
"\n" );
@@ -623,7 +613,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
623
613
if (!mw -> virt_addr )
624
614
return - ENOMEM ;
625
615
626
- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count )
616
+ if (mw_num < qp_count % mw_count )
627
617
num_qps_mw = qp_count / mw_count + 1 ;
628
618
else
629
619
num_qps_mw = qp_count / mw_count ;
@@ -768,8 +758,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
768
758
qp -> tx_err_no_buf = 0 ;
769
759
qp -> tx_memcpy = 0 ;
770
760
qp -> tx_async = 0 ;
771
- qp -> dma_tx_prep_err = 0 ;
772
- qp -> dma_rx_prep_err = 0 ;
773
761
}
774
762
775
763
static void ntb_qp_link_cleanup (struct ntb_transport_qp * qp )
@@ -1000,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
1000
988
qp -> event_handler = NULL ;
1001
989
ntb_qp_link_down_reset (qp );
1002
990
1003
- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count )
991
+ if (mw_num < qp_count % mw_count )
1004
992
num_qps_mw = qp_count / mw_count + 1 ;
1005
993
else
1006
994
num_qps_mw = qp_count / mw_count ;
@@ -1128,8 +1116,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1128
1116
qp_count = ilog2 (qp_bitmap );
1129
1117
if (max_num_clients && max_num_clients < qp_count )
1130
1118
qp_count = max_num_clients ;
1131
- else if (mw_count < qp_count )
1132
- qp_count = mw_count ;
1119
+ else if (nt -> mw_count < qp_count )
1120
+ qp_count = nt -> mw_count ;
1133
1121
1134
1122
qp_bitmap &= BIT_ULL (qp_count ) - 1 ;
1135
1123
@@ -1317,7 +1305,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1317
1305
struct dmaengine_unmap_data * unmap ;
1318
1306
dma_cookie_t cookie ;
1319
1307
void * buf = entry -> buf ;
1320
- int retries = 0 ;
1321
1308
1322
1309
len = entry -> len ;
1323
1310
device = chan -> device ;
@@ -1346,22 +1333,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
1346
1333
1347
1334
unmap -> from_cnt = 1 ;
1348
1335
1349
- for (retries = 0 ; retries < DMA_RETRIES ; retries ++ ) {
1350
- txd = device -> device_prep_dma_memcpy (chan ,
1351
- unmap -> addr [1 ],
1352
- unmap -> addr [0 ], len ,
1353
- DMA_PREP_INTERRUPT );
1354
- if (txd )
1355
- break ;
1356
-
1357
- set_current_state (TASK_INTERRUPTIBLE );
1358
- schedule_timeout (DMA_OUT_RESOURCE_TO );
1359
- }
1360
-
1361
- if (!txd ) {
1362
- qp -> dma_rx_prep_err ++ ;
1336
+ txd = device -> device_prep_dma_memcpy (chan , unmap -> addr [1 ],
1337
+ unmap -> addr [0 ], len ,
1338
+ DMA_PREP_INTERRUPT );
1339
+ if (!txd )
1363
1340
goto err_get_unmap ;
1364
- }
1365
1341
1366
1342
txd -> callback_result = ntb_rx_copy_callback ;
1367
1343
txd -> callback_param = entry ;
@@ -1606,7 +1582,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1606
1582
struct dmaengine_unmap_data * unmap ;
1607
1583
dma_addr_t dest ;
1608
1584
dma_cookie_t cookie ;
1609
- int retries = 0 ;
1610
1585
1611
1586
device = chan -> device ;
1612
1587
dest = qp -> tx_mw_phys + qp -> tx_max_frame * entry -> tx_index ;
@@ -1628,21 +1603,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1628
1603
1629
1604
unmap -> to_cnt = 1 ;
1630
1605
1631
- for (retries = 0 ; retries < DMA_RETRIES ; retries ++ ) {
1632
- txd = device -> device_prep_dma_memcpy (chan , dest ,
1633
- unmap -> addr [0 ], len ,
1634
- DMA_PREP_INTERRUPT );
1635
- if (txd )
1636
- break ;
1637
-
1638
- set_current_state (TASK_INTERRUPTIBLE );
1639
- schedule_timeout (DMA_OUT_RESOURCE_TO );
1640
- }
1641
-
1642
- if (!txd ) {
1643
- qp -> dma_tx_prep_err ++ ;
1606
+ txd = device -> device_prep_dma_memcpy (chan , dest , unmap -> addr [0 ], len ,
1607
+ DMA_PREP_INTERRUPT );
1608
+ if (!txd )
1644
1609
goto err_get_unmap ;
1645
- }
1646
1610
1647
1611
txd -> callback_result = ntb_tx_copy_callback ;
1648
1612
txd -> callback_param = entry ;
0 commit comments