Skip to content

Commit 39536ff

Browse files
Ondrej Zarydavem330
authored andcommitted
dl2k: Handle memory allocation errors in alloc_list
If memory allocation fails in alloc_list(), free the already allocated memory and return -ENOMEM. In rio_open(), call alloc_list() first and abort if it fails. Move HW access (set RFDListPtr) out ot alloc_list(). Signed-off-by: Ondrej Zary <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 6b99c6d commit 39536ff

File tree

1 file changed

+97
-85
lines changed

1 file changed

+97
-85
lines changed

drivers/net/ethernet/dlink/dl2k.c

Lines changed: 97 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ static const int multicast_filter_limit = 0x40;
7070
static int rio_open (struct net_device *dev);
7171
static void rio_timer (unsigned long data);
7272
static void rio_tx_timeout (struct net_device *dev);
73-
static void alloc_list (struct net_device *dev);
7473
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
7574
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
7675
static void rio_free_tx (struct net_device *dev, int irq);
@@ -446,6 +445,92 @@ static void rio_set_led_mode(struct net_device *dev)
446445
dw32(ASICCtrl, mode);
447446
}
448447

448+
static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
449+
{
450+
return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
451+
}
452+
453+
static void free_list(struct net_device *dev)
454+
{
455+
struct netdev_private *np = netdev_priv(dev);
456+
struct sk_buff *skb;
457+
int i;
458+
459+
/* Free all the skbuffs in the queue. */
460+
for (i = 0; i < RX_RING_SIZE; i++) {
461+
skb = np->rx_skbuff[i];
462+
if (skb) {
463+
pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]),
464+
skb->len, PCI_DMA_FROMDEVICE);
465+
dev_kfree_skb(skb);
466+
np->rx_skbuff[i] = NULL;
467+
}
468+
np->rx_ring[i].status = 0;
469+
np->rx_ring[i].fraginfo = 0;
470+
}
471+
for (i = 0; i < TX_RING_SIZE; i++) {
472+
skb = np->tx_skbuff[i];
473+
if (skb) {
474+
pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]),
475+
skb->len, PCI_DMA_TODEVICE);
476+
dev_kfree_skb(skb);
477+
np->tx_skbuff[i] = NULL;
478+
}
479+
}
480+
}
481+
482+
/* allocate and initialize Tx and Rx descriptors */
483+
static int alloc_list(struct net_device *dev)
484+
{
485+
struct netdev_private *np = netdev_priv(dev);
486+
int i;
487+
488+
np->cur_rx = np->cur_tx = 0;
489+
np->old_rx = np->old_tx = 0;
490+
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
491+
492+
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
493+
for (i = 0; i < TX_RING_SIZE; i++) {
494+
np->tx_skbuff[i] = NULL;
495+
np->tx_ring[i].status = cpu_to_le64(TFDDone);
496+
np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma +
497+
((i + 1) % TX_RING_SIZE) *
498+
sizeof(struct netdev_desc));
499+
}
500+
501+
/* Initialize Rx descriptors */
502+
for (i = 0; i < RX_RING_SIZE; i++) {
503+
np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
504+
((i + 1) % RX_RING_SIZE) *
505+
sizeof(struct netdev_desc));
506+
np->rx_ring[i].status = 0;
507+
np->rx_ring[i].fraginfo = 0;
508+
np->rx_skbuff[i] = NULL;
509+
}
510+
511+
/* Allocate the rx buffers */
512+
for (i = 0; i < RX_RING_SIZE; i++) {
513+
/* Allocated fixed size of skbuff */
514+
struct sk_buff *skb;
515+
516+
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
517+
np->rx_skbuff[i] = skb;
518+
if (!skb) {
519+
free_list(dev);
520+
return -ENOMEM;
521+
}
522+
523+
/* Rubicon now supports 40 bits of addressing space. */
524+
np->rx_ring[i].fraginfo =
525+
cpu_to_le64(pci_map_single(
526+
np->pdev, skb->data, np->rx_buf_sz,
527+
PCI_DMA_FROMDEVICE));
528+
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
529+
}
530+
531+
return 0;
532+
}
533+
449534
static int
450535
rio_open (struct net_device *dev)
451536
{
@@ -455,10 +540,16 @@ rio_open (struct net_device *dev)
455540
int i;
456541
u16 macctrl;
457542

458-
i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
543+
i = alloc_list(dev);
459544
if (i)
460545
return i;
461546

547+
i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
548+
if (i) {
549+
free_list(dev);
550+
return i;
551+
}
552+
462553
/* Reset all logic functions */
463554
dw16(ASICCtrl + 2,
464555
GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
@@ -473,7 +564,9 @@ rio_open (struct net_device *dev)
473564
if (np->jumbo != 0)
474565
dw16(MaxFrameSize, MAX_JUMBO+14);
475566

476-
alloc_list (dev);
567+
/* Set RFDListPtr */
568+
dw32(RFDListPtr0, np->rx_ring_dma);
569+
dw32(RFDListPtr1, 0);
477570

478571
/* Set station address */
479572
/* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
@@ -586,60 +679,6 @@ rio_tx_timeout (struct net_device *dev)
586679
dev->trans_start = jiffies; /* prevent tx timeout */
587680
}
588681

589-
/* allocate and initialize Tx and Rx descriptors */
590-
static void
591-
alloc_list (struct net_device *dev)
592-
{
593-
struct netdev_private *np = netdev_priv(dev);
594-
void __iomem *ioaddr = np->ioaddr;
595-
int i;
596-
597-
np->cur_rx = np->cur_tx = 0;
598-
np->old_rx = np->old_tx = 0;
599-
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
600-
601-
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
602-
for (i = 0; i < TX_RING_SIZE; i++) {
603-
np->tx_skbuff[i] = NULL;
604-
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
605-
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
606-
((i+1)%TX_RING_SIZE) *
607-
sizeof (struct netdev_desc));
608-
}
609-
610-
/* Initialize Rx descriptors */
611-
for (i = 0; i < RX_RING_SIZE; i++) {
612-
np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
613-
((i + 1) % RX_RING_SIZE) *
614-
sizeof (struct netdev_desc));
615-
np->rx_ring[i].status = 0;
616-
np->rx_ring[i].fraginfo = 0;
617-
np->rx_skbuff[i] = NULL;
618-
}
619-
620-
/* Allocate the rx buffers */
621-
for (i = 0; i < RX_RING_SIZE; i++) {
622-
/* Allocated fixed size of skbuff */
623-
struct sk_buff *skb;
624-
625-
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
626-
np->rx_skbuff[i] = skb;
627-
if (skb == NULL)
628-
break;
629-
630-
/* Rubicon now supports 40 bits of addressing space. */
631-
np->rx_ring[i].fraginfo =
632-
cpu_to_le64 ( pci_map_single (
633-
np->pdev, skb->data, np->rx_buf_sz,
634-
PCI_DMA_FROMDEVICE));
635-
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
636-
}
637-
638-
/* Set RFDListPtr */
639-
dw32(RFDListPtr0, np->rx_ring_dma);
640-
dw32(RFDListPtr1, 0);
641-
}
642-
643682
static netdev_tx_t
644683
start_xmit (struct sk_buff *skb, struct net_device *dev)
645684
{
@@ -748,11 +787,6 @@ rio_interrupt (int irq, void *dev_instance)
748787
return IRQ_RETVAL(handled);
749788
}
750789

751-
static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
752-
{
753-
return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
754-
}
755-
756790
static void
757791
rio_free_tx (struct net_device *dev, int irq)
758792
{
@@ -1733,8 +1767,6 @@ rio_close (struct net_device *dev)
17331767
void __iomem *ioaddr = np->ioaddr;
17341768

17351769
struct pci_dev *pdev = np->pdev;
1736-
struct sk_buff *skb;
1737-
int i;
17381770

17391771
netif_stop_queue (dev);
17401772

@@ -1747,27 +1779,7 @@ rio_close (struct net_device *dev)
17471779
free_irq(pdev->irq, dev);
17481780
del_timer_sync (&np->timer);
17491781

1750-
/* Free all the skbuffs in the queue. */
1751-
for (i = 0; i < RX_RING_SIZE; i++) {
1752-
skb = np->rx_skbuff[i];
1753-
if (skb) {
1754-
pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
1755-
skb->len, PCI_DMA_FROMDEVICE);
1756-
dev_kfree_skb (skb);
1757-
np->rx_skbuff[i] = NULL;
1758-
}
1759-
np->rx_ring[i].status = 0;
1760-
np->rx_ring[i].fraginfo = 0;
1761-
}
1762-
for (i = 0; i < TX_RING_SIZE; i++) {
1763-
skb = np->tx_skbuff[i];
1764-
if (skb) {
1765-
pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
1766-
skb->len, PCI_DMA_TODEVICE);
1767-
dev_kfree_skb (skb);
1768-
np->tx_skbuff[i] = NULL;
1769-
}
1770-
}
1782+
free_list(dev);
17711783

17721784
return 0;
17731785
}

0 commit comments

Comments
 (0)