@@ -464,36 +464,40 @@ static void free_list(struct net_device *dev)
464
464
}
465
465
}
466
466
467
+ static void rio_reset_ring (struct netdev_private * np )
468
+ {
469
+ int i ;
470
+
471
+ np -> cur_rx = 0 ;
472
+ np -> cur_tx = 0 ;
473
+ np -> old_rx = 0 ;
474
+ np -> old_tx = 0 ;
475
+
476
+ for (i = 0 ; i < TX_RING_SIZE ; i ++ )
477
+ np -> tx_ring [i ].status = cpu_to_le64 (TFDDone );
478
+
479
+ for (i = 0 ; i < RX_RING_SIZE ; i ++ )
480
+ np -> rx_ring [i ].status = 0 ;
481
+ }
482
+
467
483
/* allocate and initialize Tx and Rx descriptors */
468
484
static int alloc_list (struct net_device * dev )
469
485
{
470
486
struct netdev_private * np = netdev_priv (dev );
471
487
int i ;
472
488
473
- np -> cur_rx = np -> cur_tx = 0 ;
474
- np -> old_rx = np -> old_tx = 0 ;
489
+ rio_reset_ring (np );
475
490
np -> rx_buf_sz = (dev -> mtu <= 1500 ? PACKET_SIZE : dev -> mtu + 32 );
476
491
477
492
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
478
493
for (i = 0 ; i < TX_RING_SIZE ; i ++ ) {
479
494
np -> tx_skbuff [i ] = NULL ;
480
- np -> tx_ring [i ].status = cpu_to_le64 (TFDDone );
481
495
np -> tx_ring [i ].next_desc = cpu_to_le64 (np -> tx_ring_dma +
482
496
((i + 1 ) % TX_RING_SIZE ) *
483
497
sizeof (struct netdev_desc ));
484
498
}
485
499
486
- /* Initialize Rx descriptors */
487
- for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
488
- np -> rx_ring [i ].next_desc = cpu_to_le64 (np -> rx_ring_dma +
489
- ((i + 1 ) % RX_RING_SIZE ) *
490
- sizeof (struct netdev_desc ));
491
- np -> rx_ring [i ].status = 0 ;
492
- np -> rx_ring [i ].fraginfo = 0 ;
493
- np -> rx_skbuff [i ] = NULL ;
494
- }
495
-
496
- /* Allocate the rx buffers */
500
+ /* Initialize Rx descriptors & allocate buffers */
497
501
for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
498
502
/* Allocated fixed size of skbuff */
499
503
struct sk_buff * skb ;
@@ -505,6 +509,9 @@ static int alloc_list(struct net_device *dev)
505
509
return - ENOMEM ;
506
510
}
507
511
512
+ np -> rx_ring [i ].next_desc = cpu_to_le64 (np -> rx_ring_dma +
513
+ ((i + 1 ) % RX_RING_SIZE ) *
514
+ sizeof (struct netdev_desc ));
508
515
/* Rubicon now supports 40 bits of addressing space. */
509
516
np -> rx_ring [i ].fraginfo =
510
517
cpu_to_le64 (pci_map_single (
@@ -1824,11 +1831,55 @@ rio_remove1 (struct pci_dev *pdev)
1824
1831
}
1825
1832
}
1826
1833
1834
+ #ifdef CONFIG_PM_SLEEP
1835
+ static int rio_suspend (struct device * device )
1836
+ {
1837
+ struct net_device * dev = dev_get_drvdata (device );
1838
+ struct netdev_private * np = netdev_priv (dev );
1839
+
1840
+ if (!netif_running (dev ))
1841
+ return 0 ;
1842
+
1843
+ netif_device_detach (dev );
1844
+ del_timer_sync (& np -> timer );
1845
+ rio_hw_stop (dev );
1846
+
1847
+ return 0 ;
1848
+ }
1849
+
1850
+ static int rio_resume (struct device * device )
1851
+ {
1852
+ struct net_device * dev = dev_get_drvdata (device );
1853
+ struct netdev_private * np = netdev_priv (dev );
1854
+
1855
+ if (!netif_running (dev ))
1856
+ return 0 ;
1857
+
1858
+ rio_reset_ring (np );
1859
+ rio_hw_init (dev );
1860
+ np -> timer .expires = jiffies + 1 * HZ ;
1861
+ add_timer (& np -> timer );
1862
+ netif_device_attach (dev );
1863
+ dl2k_enable_int (np );
1864
+
1865
+ return 0 ;
1866
+ }
1867
+
1868
+ static SIMPLE_DEV_PM_OPS (rio_pm_ops , rio_suspend , rio_resume ) ;
1869
+ #define RIO_PM_OPS (&rio_pm_ops)
1870
+
1871
+ #else
1872
+
1873
+ #define RIO_PM_OPS NULL
1874
+
1875
+ #endif /* CONFIG_PM_SLEEP */
1876
+
1827
1877
static struct pci_driver rio_driver = {
1828
1878
.name = "dl2k" ,
1829
1879
.id_table = rio_pci_tbl ,
1830
1880
.probe = rio_probe1 ,
1831
1881
.remove = rio_remove1 ,
1882
+ .driver .pm = RIO_PM_OPS ,
1832
1883
};
1833
1884
1834
1885
module_pci_driver (rio_driver );
0 commit comments