@@ -70,7 +70,6 @@ static const int multicast_filter_limit = 0x40;
70
70
static int rio_open (struct net_device * dev );
71
71
static void rio_timer (unsigned long data );
72
72
static void rio_tx_timeout (struct net_device * dev );
73
- static void alloc_list (struct net_device * dev );
74
73
static netdev_tx_t start_xmit (struct sk_buff * skb , struct net_device * dev );
75
74
static irqreturn_t rio_interrupt (int irq , void * dev_instance );
76
75
static void rio_free_tx (struct net_device * dev , int irq );
@@ -446,6 +445,92 @@ static void rio_set_led_mode(struct net_device *dev)
446
445
dw32 (ASICCtrl , mode );
447
446
}
448
447
448
+ static inline dma_addr_t desc_to_dma (struct netdev_desc * desc )
449
+ {
450
+ return le64_to_cpu (desc -> fraginfo ) & DMA_BIT_MASK (48 );
451
+ }
452
+
453
+ static void free_list (struct net_device * dev )
454
+ {
455
+ struct netdev_private * np = netdev_priv (dev );
456
+ struct sk_buff * skb ;
457
+ int i ;
458
+
459
+ /* Free all the skbuffs in the queue. */
460
+ for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
461
+ skb = np -> rx_skbuff [i ];
462
+ if (skb ) {
463
+ pci_unmap_single (np -> pdev , desc_to_dma (& np -> rx_ring [i ]),
464
+ skb -> len , PCI_DMA_FROMDEVICE );
465
+ dev_kfree_skb (skb );
466
+ np -> rx_skbuff [i ] = NULL ;
467
+ }
468
+ np -> rx_ring [i ].status = 0 ;
469
+ np -> rx_ring [i ].fraginfo = 0 ;
470
+ }
471
+ for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
472
+ skb = np -> tx_skbuff [i ];
473
+ if (skb ) {
474
+ pci_unmap_single (np -> pdev , desc_to_dma (& np -> tx_ring [i ]),
475
+ skb -> len , PCI_DMA_TODEVICE );
476
+ dev_kfree_skb (skb );
477
+ np -> tx_skbuff [i ] = NULL ;
478
+ }
479
+ }
480
+ }
481
+
482
+ /* allocate and initialize Tx and Rx descriptors */
483
+ static int alloc_list (struct net_device * dev )
484
+ {
485
+ struct netdev_private * np = netdev_priv (dev );
486
+ int i ;
487
+
488
+ np -> cur_rx = np -> cur_tx = 0 ;
489
+ np -> old_rx = np -> old_tx = 0 ;
490
+ np -> rx_buf_sz = (dev -> mtu <= 1500 ? PACKET_SIZE : dev -> mtu + 32 );
491
+
492
+ /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
493
+ for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
494
+ np -> tx_skbuff [i ] = NULL ;
495
+ np -> tx_ring [i ].status = cpu_to_le64 (TFDDone );
496
+ np -> tx_ring [i ].next_desc = cpu_to_le64 (np -> tx_ring_dma +
497
+ ((i + 1 ) % TX_RING_SIZE ) *
498
+ sizeof (struct netdev_desc ));
499
+ }
500
+
501
+ /* Initialize Rx descriptors */
502
+ for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
503
+ np -> rx_ring [i ].next_desc = cpu_to_le64 (np -> rx_ring_dma +
504
+ ((i + 1 ) % RX_RING_SIZE ) *
505
+ sizeof (struct netdev_desc ));
506
+ np -> rx_ring [i ].status = 0 ;
507
+ np -> rx_ring [i ].fraginfo = 0 ;
508
+ np -> rx_skbuff [i ] = NULL ;
509
+ }
510
+
511
+ /* Allocate the rx buffers */
512
+ for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
513
+ /* Allocated fixed size of skbuff */
514
+ struct sk_buff * skb ;
515
+
516
+ skb = netdev_alloc_skb_ip_align (dev , np -> rx_buf_sz );
517
+ np -> rx_skbuff [i ] = skb ;
518
+ if (!skb ) {
519
+ free_list (dev );
520
+ return - ENOMEM ;
521
+ }
522
+
523
+ /* Rubicon now supports 40 bits of addressing space. */
524
+ np -> rx_ring [i ].fraginfo =
525
+ cpu_to_le64 (pci_map_single (
526
+ np -> pdev , skb -> data , np -> rx_buf_sz ,
527
+ PCI_DMA_FROMDEVICE ));
528
+ np -> rx_ring [i ].fraginfo |= cpu_to_le64 ((u64 )np -> rx_buf_sz << 48 );
529
+ }
530
+
531
+ return 0 ;
532
+ }
533
+
449
534
static int
450
535
rio_open (struct net_device * dev )
451
536
{
@@ -455,10 +540,16 @@ rio_open (struct net_device *dev)
455
540
int i ;
456
541
u16 macctrl ;
457
542
458
- i = request_irq ( irq , rio_interrupt , IRQF_SHARED , dev -> name , dev );
543
+ i = alloc_list ( dev );
459
544
if (i )
460
545
return i ;
461
546
547
+ i = request_irq (irq , rio_interrupt , IRQF_SHARED , dev -> name , dev );
548
+ if (i ) {
549
+ free_list (dev );
550
+ return i ;
551
+ }
552
+
462
553
/* Reset all logic functions */
463
554
dw16 (ASICCtrl + 2 ,
464
555
GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset );
@@ -473,7 +564,9 @@ rio_open (struct net_device *dev)
473
564
if (np -> jumbo != 0 )
474
565
dw16 (MaxFrameSize , MAX_JUMBO + 14 );
475
566
476
- alloc_list (dev );
567
+ /* Set RFDListPtr */
568
+ dw32 (RFDListPtr0 , np -> rx_ring_dma );
569
+ dw32 (RFDListPtr1 , 0 );
477
570
478
571
/* Set station address */
479
572
/* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
@@ -586,60 +679,6 @@ rio_tx_timeout (struct net_device *dev)
586
679
dev -> trans_start = jiffies ; /* prevent tx timeout */
587
680
}
588
681
589
- /* allocate and initialize Tx and Rx descriptors */
590
- static void
591
- alloc_list (struct net_device * dev )
592
- {
593
- struct netdev_private * np = netdev_priv (dev );
594
- void __iomem * ioaddr = np -> ioaddr ;
595
- int i ;
596
-
597
- np -> cur_rx = np -> cur_tx = 0 ;
598
- np -> old_rx = np -> old_tx = 0 ;
599
- np -> rx_buf_sz = (dev -> mtu <= 1500 ? PACKET_SIZE : dev -> mtu + 32 );
600
-
601
- /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
602
- for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
603
- np -> tx_skbuff [i ] = NULL ;
604
- np -> tx_ring [i ].status = cpu_to_le64 (TFDDone );
605
- np -> tx_ring [i ].next_desc = cpu_to_le64 (np -> tx_ring_dma +
606
- ((i + 1 )%TX_RING_SIZE ) *
607
- sizeof (struct netdev_desc ));
608
- }
609
-
610
- /* Initialize Rx descriptors */
611
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
612
- np -> rx_ring [i ].next_desc = cpu_to_le64 (np -> rx_ring_dma +
613
- ((i + 1 ) % RX_RING_SIZE ) *
614
- sizeof (struct netdev_desc ));
615
- np -> rx_ring [i ].status = 0 ;
616
- np -> rx_ring [i ].fraginfo = 0 ;
617
- np -> rx_skbuff [i ] = NULL ;
618
- }
619
-
620
- /* Allocate the rx buffers */
621
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
622
- /* Allocated fixed size of skbuff */
623
- struct sk_buff * skb ;
624
-
625
- skb = netdev_alloc_skb_ip_align (dev , np -> rx_buf_sz );
626
- np -> rx_skbuff [i ] = skb ;
627
- if (skb == NULL )
628
- break ;
629
-
630
- /* Rubicon now supports 40 bits of addressing space. */
631
- np -> rx_ring [i ].fraginfo =
632
- cpu_to_le64 ( pci_map_single (
633
- np -> pdev , skb -> data , np -> rx_buf_sz ,
634
- PCI_DMA_FROMDEVICE ));
635
- np -> rx_ring [i ].fraginfo |= cpu_to_le64 ((u64 )np -> rx_buf_sz << 48 );
636
- }
637
-
638
- /* Set RFDListPtr */
639
- dw32 (RFDListPtr0 , np -> rx_ring_dma );
640
- dw32 (RFDListPtr1 , 0 );
641
- }
642
-
643
682
static netdev_tx_t
644
683
start_xmit (struct sk_buff * skb , struct net_device * dev )
645
684
{
@@ -748,11 +787,6 @@ rio_interrupt (int irq, void *dev_instance)
748
787
return IRQ_RETVAL (handled );
749
788
}
750
789
751
- static inline dma_addr_t desc_to_dma (struct netdev_desc * desc )
752
- {
753
- return le64_to_cpu (desc -> fraginfo ) & DMA_BIT_MASK (48 );
754
- }
755
-
756
790
static void
757
791
rio_free_tx (struct net_device * dev , int irq )
758
792
{
@@ -1733,8 +1767,6 @@ rio_close (struct net_device *dev)
1733
1767
void __iomem * ioaddr = np -> ioaddr ;
1734
1768
1735
1769
struct pci_dev * pdev = np -> pdev ;
1736
- struct sk_buff * skb ;
1737
- int i ;
1738
1770
1739
1771
netif_stop_queue (dev );
1740
1772
@@ -1747,27 +1779,7 @@ rio_close (struct net_device *dev)
1747
1779
free_irq (pdev -> irq , dev );
1748
1780
del_timer_sync (& np -> timer );
1749
1781
1750
- /* Free all the skbuffs in the queue. */
1751
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
1752
- skb = np -> rx_skbuff [i ];
1753
- if (skb ) {
1754
- pci_unmap_single (pdev , desc_to_dma (& np -> rx_ring [i ]),
1755
- skb -> len , PCI_DMA_FROMDEVICE );
1756
- dev_kfree_skb (skb );
1757
- np -> rx_skbuff [i ] = NULL ;
1758
- }
1759
- np -> rx_ring [i ].status = 0 ;
1760
- np -> rx_ring [i ].fraginfo = 0 ;
1761
- }
1762
- for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
1763
- skb = np -> tx_skbuff [i ];
1764
- if (skb ) {
1765
- pci_unmap_single (pdev , desc_to_dma (& np -> tx_ring [i ]),
1766
- skb -> len , PCI_DMA_TODEVICE );
1767
- dev_kfree_skb (skb );
1768
- np -> tx_skbuff [i ] = NULL ;
1769
- }
1770
- }
1782
+ free_list (dev );
1771
1783
1772
1784
return 0 ;
1773
1785
}
0 commit comments