Skip to content

Commit 5738a09

Browse files
khoroshilovdavem330
authored andcommitted
vmxnet3: fix checks for dma mapping errors
vmxnet3_drv does not check dma_addr with dma_mapping_error() after mapping dma memory. The patch adds the checks and tries to handle failures. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov <[email protected]> Acked-by: Shrikrishna Khare <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent ee9159d commit 5738a09

File tree

1 file changed

+60
-11
lines changed

1 file changed

+60
-11
lines changed

drivers/net/vmxnet3/vmxnet3_drv.c

Lines changed: 60 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
587587
&adapter->pdev->dev,
588588
rbi->skb->data, rbi->len,
589589
PCI_DMA_FROMDEVICE);
590+
if (dma_mapping_error(&adapter->pdev->dev,
591+
rbi->dma_addr)) {
592+
dev_kfree_skb_any(rbi->skb);
593+
rq->stats.rx_buf_alloc_failure++;
594+
break;
595+
}
590596
} else {
591597
/* rx buffer skipped by the device */
592598
}
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
605611
&adapter->pdev->dev,
606612
rbi->page, 0, PAGE_SIZE,
607613
PCI_DMA_FROMDEVICE);
614+
if (dma_mapping_error(&adapter->pdev->dev,
615+
rbi->dma_addr)) {
616+
put_page(rbi->page);
617+
rq->stats.rx_buf_alloc_failure++;
618+
break;
619+
}
608620
} else {
609621
/* rx buffers skipped by the device */
610622
}
611623
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
612624
}
613625

614-
BUG_ON(rbi->dma_addr == 0);
615626
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
616627
gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
617628
| val | rbi->len);
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
655666
}
656667

657668

658-
static void
669+
static int
659670
vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
660671
struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
661672
struct vmxnet3_adapter *adapter)
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
715726
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
716727
skb->data + buf_offset, buf_size,
717728
PCI_DMA_TODEVICE);
729+
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
730+
return -EFAULT;
718731

719732
tbi->len = buf_size;
720733

@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
755768
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
756769
buf_offset, buf_size,
757770
DMA_TO_DEVICE);
771+
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
772+
return -EFAULT;
758773

759774
tbi->len = buf_size;
760775

@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
782797
/* set the last buf_info for the pkt */
783798
tbi->skb = skb;
784799
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
800+
801+
return 0;
785802
}
786803

787804

@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
10201037
}
10211038

10221039
/* fill tx descs related to addr & len */
1023-
vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
1040+
if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1041+
goto unlock_drop_pkt;
10241042

10251043
/* setup the EOP desc */
10261044
ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
12311249
struct vmxnet3_rx_buf_info *rbi;
12321250
struct sk_buff *skb, *new_skb = NULL;
12331251
struct page *new_page = NULL;
1252+
dma_addr_t new_dma_addr;
12341253
int num_to_alloc;
12351254
struct Vmxnet3_RxDesc *rxd;
12361255
u32 idx, ring_idx;
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
12871306
skip_page_frags = true;
12881307
goto rcd_done;
12891308
}
1309+
new_dma_addr = dma_map_single(&adapter->pdev->dev,
1310+
new_skb->data, rbi->len,
1311+
PCI_DMA_FROMDEVICE);
1312+
if (dma_mapping_error(&adapter->pdev->dev,
1313+
new_dma_addr)) {
1314+
dev_kfree_skb(new_skb);
1315+
/* Skb allocation failed, do not handover this
1316+
* skb to stack. Reuse it. Drop the existing pkt
1317+
*/
1318+
rq->stats.rx_buf_alloc_failure++;
1319+
ctx->skb = NULL;
1320+
rq->stats.drop_total++;
1321+
skip_page_frags = true;
1322+
goto rcd_done;
1323+
}
12901324

12911325
dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
12921326
rbi->len,
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
13031337

13041338
/* Immediate refill */
13051339
rbi->skb = new_skb;
1306-
rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
1307-
rbi->skb->data, rbi->len,
1308-
PCI_DMA_FROMDEVICE);
1340+
rbi->dma_addr = new_dma_addr;
13091341
rxd->addr = cpu_to_le64(rbi->dma_addr);
13101342
rxd->len = rbi->len;
13111343
if (adapter->version == 2 &&
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
13481380
skip_page_frags = true;
13491381
goto rcd_done;
13501382
}
1383+
new_dma_addr = dma_map_page(&adapter->pdev->dev
1384+
, rbi->page,
1385+
0, PAGE_SIZE,
1386+
PCI_DMA_FROMDEVICE);
1387+
if (dma_mapping_error(&adapter->pdev->dev,
1388+
new_dma_addr)) {
1389+
put_page(new_page);
1390+
rq->stats.rx_buf_alloc_failure++;
1391+
dev_kfree_skb(ctx->skb);
1392+
ctx->skb = NULL;
1393+
skip_page_frags = true;
1394+
goto rcd_done;
1395+
}
13511396

13521397
dma_unmap_page(&adapter->pdev->dev,
13531398
rbi->dma_addr, rbi->len,
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
13571402

13581403
/* Immediate refill */
13591404
rbi->page = new_page;
1360-
rbi->dma_addr = dma_map_page(&adapter->pdev->dev
1361-
, rbi->page,
1362-
0, PAGE_SIZE,
1363-
PCI_DMA_FROMDEVICE);
1405+
rbi->dma_addr = new_dma_addr;
13641406
rxd->addr = cpu_to_le64(rbi->dma_addr);
13651407
rxd->len = rbi->len;
13661408
}
@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev)
21672209
PCI_DMA_TODEVICE);
21682210
}
21692211

2170-
if (new_table_pa) {
2212+
if (!dma_mapping_error(&adapter->pdev->dev,
2213+
new_table_pa)) {
21712214
new_mode |= VMXNET3_RXM_MCAST;
21722215
rxConf->mfTablePA = cpu_to_le64(new_table_pa);
21732216
} else {
@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
30753118
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
30763119
sizeof(struct vmxnet3_adapter),
30773120
PCI_DMA_TODEVICE);
3121+
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3122+
dev_err(&pdev->dev, "Failed to map dma\n");
3123+
err = -EFAULT;
3124+
goto err_dma_map;
3125+
}
30783126
adapter->shared = dma_alloc_coherent(
30793127
&adapter->pdev->dev,
30803128
sizeof(struct Vmxnet3_DriverShared),
@@ -3233,6 +3281,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
32333281
err_alloc_shared:
32343282
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
32353283
sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3284+
err_dma_map:
32363285
free_netdev(netdev);
32373286
return err;
32383287
}

0 commit comments

Comments
 (0)