@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
587
587
& adapter -> pdev -> dev ,
588
588
rbi -> skb -> data , rbi -> len ,
589
589
PCI_DMA_FROMDEVICE );
590
+ if (dma_mapping_error (& adapter -> pdev -> dev ,
591
+ rbi -> dma_addr )) {
592
+ dev_kfree_skb_any (rbi -> skb );
593
+ rq -> stats .rx_buf_alloc_failure ++ ;
594
+ break ;
595
+ }
590
596
} else {
591
597
/* rx buffer skipped by the device */
592
598
}
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
605
611
& adapter -> pdev -> dev ,
606
612
rbi -> page , 0 , PAGE_SIZE ,
607
613
PCI_DMA_FROMDEVICE );
614
+ if (dma_mapping_error (& adapter -> pdev -> dev ,
615
+ rbi -> dma_addr )) {
616
+ put_page (rbi -> page );
617
+ rq -> stats .rx_buf_alloc_failure ++ ;
618
+ break ;
619
+ }
608
620
} else {
609
621
/* rx buffers skipped by the device */
610
622
}
611
623
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT ;
612
624
}
613
625
614
- BUG_ON (rbi -> dma_addr == 0 );
615
626
gd -> rxd .addr = cpu_to_le64 (rbi -> dma_addr );
616
627
gd -> dword [2 ] = cpu_to_le32 ((!ring -> gen << VMXNET3_RXD_GEN_SHIFT )
617
628
| val | rbi -> len );
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
655
666
}
656
667
657
668
658
- static void
669
+ static int
659
670
vmxnet3_map_pkt (struct sk_buff * skb , struct vmxnet3_tx_ctx * ctx ,
660
671
struct vmxnet3_tx_queue * tq , struct pci_dev * pdev ,
661
672
struct vmxnet3_adapter * adapter )
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
715
726
tbi -> dma_addr = dma_map_single (& adapter -> pdev -> dev ,
716
727
skb -> data + buf_offset , buf_size ,
717
728
PCI_DMA_TODEVICE );
729
+ if (dma_mapping_error (& adapter -> pdev -> dev , tbi -> dma_addr ))
730
+ return - EFAULT ;
718
731
719
732
tbi -> len = buf_size ;
720
733
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
755
768
tbi -> dma_addr = skb_frag_dma_map (& adapter -> pdev -> dev , frag ,
756
769
buf_offset , buf_size ,
757
770
DMA_TO_DEVICE );
771
+ if (dma_mapping_error (& adapter -> pdev -> dev , tbi -> dma_addr ))
772
+ return - EFAULT ;
758
773
759
774
tbi -> len = buf_size ;
760
775
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
782
797
/* set the last buf_info for the pkt */
783
798
tbi -> skb = skb ;
784
799
tbi -> sop_idx = ctx -> sop_txd - tq -> tx_ring .base ;
800
+
801
+ return 0 ;
785
802
}
786
803
787
804
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1020
1037
}
1021
1038
1022
1039
/* fill tx descs related to addr & len */
1023
- vmxnet3_map_pkt (skb , & ctx , tq , adapter -> pdev , adapter );
1040
+ if (vmxnet3_map_pkt (skb , & ctx , tq , adapter -> pdev , adapter ))
1041
+ goto unlock_drop_pkt ;
1024
1042
1025
1043
/* setup the EOP desc */
1026
1044
ctx .eop_txd -> dword [3 ] = cpu_to_le32 (VMXNET3_TXD_CQ | VMXNET3_TXD_EOP );
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1231
1249
struct vmxnet3_rx_buf_info * rbi ;
1232
1250
struct sk_buff * skb , * new_skb = NULL ;
1233
1251
struct page * new_page = NULL ;
1252
+ dma_addr_t new_dma_addr ;
1234
1253
int num_to_alloc ;
1235
1254
struct Vmxnet3_RxDesc * rxd ;
1236
1255
u32 idx , ring_idx ;
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1287
1306
skip_page_frags = true;
1288
1307
goto rcd_done ;
1289
1308
}
1309
+ new_dma_addr = dma_map_single (& adapter -> pdev -> dev ,
1310
+ new_skb -> data , rbi -> len ,
1311
+ PCI_DMA_FROMDEVICE );
1312
+ if (dma_mapping_error (& adapter -> pdev -> dev ,
1313
+ new_dma_addr )) {
1314
+ dev_kfree_skb (new_skb );
1315
+ /* Skb allocation failed, do not handover this
1316
+ * skb to stack. Reuse it. Drop the existing pkt
1317
+ */
1318
+ rq -> stats .rx_buf_alloc_failure ++ ;
1319
+ ctx -> skb = NULL ;
1320
+ rq -> stats .drop_total ++ ;
1321
+ skip_page_frags = true;
1322
+ goto rcd_done ;
1323
+ }
1290
1324
1291
1325
dma_unmap_single (& adapter -> pdev -> dev , rbi -> dma_addr ,
1292
1326
rbi -> len ,
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1303
1337
1304
1338
/* Immediate refill */
1305
1339
rbi -> skb = new_skb ;
1306
- rbi -> dma_addr = dma_map_single (& adapter -> pdev -> dev ,
1307
- rbi -> skb -> data , rbi -> len ,
1308
- PCI_DMA_FROMDEVICE );
1340
+ rbi -> dma_addr = new_dma_addr ;
1309
1341
rxd -> addr = cpu_to_le64 (rbi -> dma_addr );
1310
1342
rxd -> len = rbi -> len ;
1311
1343
if (adapter -> version == 2 &&
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1348
1380
skip_page_frags = true;
1349
1381
goto rcd_done ;
1350
1382
}
1383
+ new_dma_addr = dma_map_page (& adapter -> pdev -> dev
1384
+ , rbi -> page ,
1385
+ 0 , PAGE_SIZE ,
1386
+ PCI_DMA_FROMDEVICE );
1387
+ if (dma_mapping_error (& adapter -> pdev -> dev ,
1388
+ new_dma_addr )) {
1389
+ put_page (new_page );
1390
+ rq -> stats .rx_buf_alloc_failure ++ ;
1391
+ dev_kfree_skb (ctx -> skb );
1392
+ ctx -> skb = NULL ;
1393
+ skip_page_frags = true;
1394
+ goto rcd_done ;
1395
+ }
1351
1396
1352
1397
dma_unmap_page (& adapter -> pdev -> dev ,
1353
1398
rbi -> dma_addr , rbi -> len ,
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1357
1402
1358
1403
/* Immediate refill */
1359
1404
rbi -> page = new_page ;
1360
- rbi -> dma_addr = dma_map_page (& adapter -> pdev -> dev
1361
- , rbi -> page ,
1362
- 0 , PAGE_SIZE ,
1363
- PCI_DMA_FROMDEVICE );
1405
+ rbi -> dma_addr = new_dma_addr ;
1364
1406
rxd -> addr = cpu_to_le64 (rbi -> dma_addr );
1365
1407
rxd -> len = rbi -> len ;
1366
1408
}
@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev)
2167
2209
PCI_DMA_TODEVICE );
2168
2210
}
2169
2211
2170
- if (new_table_pa ) {
2212
+ if (!dma_mapping_error (& adapter -> pdev -> dev ,
2213
+ new_table_pa )) {
2171
2214
new_mode |= VMXNET3_RXM_MCAST ;
2172
2215
rxConf -> mfTablePA = cpu_to_le64 (new_table_pa );
2173
2216
} else {
@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3075
3118
adapter -> adapter_pa = dma_map_single (& adapter -> pdev -> dev , adapter ,
3076
3119
sizeof (struct vmxnet3_adapter ),
3077
3120
PCI_DMA_TODEVICE );
3121
+ if (dma_mapping_error (& adapter -> pdev -> dev , adapter -> adapter_pa )) {
3122
+ dev_err (& pdev -> dev , "Failed to map dma\n" );
3123
+ err = - EFAULT ;
3124
+ goto err_dma_map ;
3125
+ }
3078
3126
adapter -> shared = dma_alloc_coherent (
3079
3127
& adapter -> pdev -> dev ,
3080
3128
sizeof (struct Vmxnet3_DriverShared ),
@@ -3233,6 +3281,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3233
3281
err_alloc_shared :
3234
3282
dma_unmap_single (& adapter -> pdev -> dev , adapter -> adapter_pa ,
3235
3283
sizeof (struct vmxnet3_adapter ), PCI_DMA_TODEVICE );
3284
+ err_dma_map :
3236
3285
free_netdev (netdev );
3237
3286
return err ;
3238
3287
}
0 commit comments