@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
176
176
}
177
177
}
178
178
179
- /**
180
- * stmmac_stop_all_queues - Stop all queues
181
- * @priv: driver private structure
182
- */
183
- static void stmmac_stop_all_queues (struct stmmac_priv * priv )
184
- {
185
- u32 tx_queues_cnt = priv -> plat -> tx_queues_to_use ;
186
- u32 queue ;
187
-
188
- for (queue = 0 ; queue < tx_queues_cnt ; queue ++ )
189
- netif_tx_stop_queue (netdev_get_tx_queue (priv -> dev , queue ));
190
- }
191
-
192
- /**
193
- * stmmac_start_all_queues - Start all queues
194
- * @priv: driver private structure
195
- */
196
- static void stmmac_start_all_queues (struct stmmac_priv * priv )
197
- {
198
- u32 tx_queues_cnt = priv -> plat -> tx_queues_to_use ;
199
- u32 queue ;
200
-
201
- for (queue = 0 ; queue < tx_queues_cnt ; queue ++ )
202
- netif_tx_start_queue (netdev_get_tx_queue (priv -> dev , queue ));
203
- }
204
-
205
179
static void stmmac_service_event_schedule (struct stmmac_priv * priv )
206
180
{
207
181
if (!test_bit (STMMAC_DOWN , & priv -> state ) &&
@@ -2732,6 +2706,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2732
2706
stmmac_enable_tbs (priv , priv -> ioaddr , enable , chan );
2733
2707
}
2734
2708
2709
+ /* Configure real RX and TX queues */
2710
+ netif_set_real_num_rx_queues (dev , priv -> plat -> rx_queues_to_use );
2711
+ netif_set_real_num_tx_queues (dev , priv -> plat -> tx_queues_to_use );
2712
+
2735
2713
/* Start the ball rolling... */
2736
2714
stmmac_start_all_dma (priv );
2737
2715
@@ -2860,7 +2838,7 @@ static int stmmac_open(struct net_device *dev)
2860
2838
}
2861
2839
2862
2840
stmmac_enable_all_queues (priv );
2863
- stmmac_start_all_queues (priv );
2841
+ netif_tx_start_all_queues (priv -> dev );
2864
2842
2865
2843
return 0 ;
2866
2844
@@ -2903,8 +2881,6 @@ static int stmmac_release(struct net_device *dev)
2903
2881
phylink_stop (priv -> phylink );
2904
2882
phylink_disconnect_phy (priv -> phylink );
2905
2883
2906
- stmmac_stop_all_queues (priv );
2907
-
2908
2884
stmmac_disable_all_queues (priv );
2909
2885
2910
2886
for (chan = 0 ; chan < priv -> plat -> tx_queues_to_use ; chan ++ )
@@ -4739,6 +4715,69 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
4739
4715
return 0 ;
4740
4716
}
4741
4717
4718
+ static void stmmac_napi_add (struct net_device * dev )
4719
+ {
4720
+ struct stmmac_priv * priv = netdev_priv (dev );
4721
+ u32 queue , maxq ;
4722
+
4723
+ maxq = max (priv -> plat -> rx_queues_to_use , priv -> plat -> tx_queues_to_use );
4724
+
4725
+ for (queue = 0 ; queue < maxq ; queue ++ ) {
4726
+ struct stmmac_channel * ch = & priv -> channel [queue ];
4727
+
4728
+ ch -> priv_data = priv ;
4729
+ ch -> index = queue ;
4730
+
4731
+ if (queue < priv -> plat -> rx_queues_to_use ) {
4732
+ netif_napi_add (dev , & ch -> rx_napi , stmmac_napi_poll_rx ,
4733
+ NAPI_POLL_WEIGHT );
4734
+ }
4735
+ if (queue < priv -> plat -> tx_queues_to_use ) {
4736
+ netif_tx_napi_add (dev , & ch -> tx_napi ,
4737
+ stmmac_napi_poll_tx ,
4738
+ NAPI_POLL_WEIGHT );
4739
+ }
4740
+ }
4741
+ }
4742
+
4743
+ static void stmmac_napi_del (struct net_device * dev )
4744
+ {
4745
+ struct stmmac_priv * priv = netdev_priv (dev );
4746
+ u32 queue , maxq ;
4747
+
4748
+ maxq = max (priv -> plat -> rx_queues_to_use , priv -> plat -> tx_queues_to_use );
4749
+
4750
+ for (queue = 0 ; queue < maxq ; queue ++ ) {
4751
+ struct stmmac_channel * ch = & priv -> channel [queue ];
4752
+
4753
+ if (queue < priv -> plat -> rx_queues_to_use )
4754
+ netif_napi_del (& ch -> rx_napi );
4755
+ if (queue < priv -> plat -> tx_queues_to_use )
4756
+ netif_napi_del (& ch -> tx_napi );
4757
+ }
4758
+ }
4759
+
4760
+ int stmmac_reinit_queues (struct net_device * dev , u32 rx_cnt , u32 tx_cnt )
4761
+ {
4762
+ struct stmmac_priv * priv = netdev_priv (dev );
4763
+ int ret = 0 ;
4764
+
4765
+ if (netif_running (dev ))
4766
+ stmmac_release (dev );
4767
+
4768
+ stmmac_napi_del (dev );
4769
+
4770
+ priv -> plat -> rx_queues_to_use = rx_cnt ;
4771
+ priv -> plat -> tx_queues_to_use = tx_cnt ;
4772
+
4773
+ stmmac_napi_add (dev );
4774
+
4775
+ if (netif_running (dev ))
4776
+ ret = stmmac_open (dev );
4777
+
4778
+ return ret ;
4779
+ }
4780
+
4742
4781
/**
4743
4782
* stmmac_dvr_probe
4744
4783
* @device: device pointer
@@ -4755,7 +4794,7 @@ int stmmac_dvr_probe(struct device *device,
4755
4794
{
4756
4795
struct net_device * ndev = NULL ;
4757
4796
struct stmmac_priv * priv ;
4758
- u32 queue , rxq , maxq ;
4797
+ u32 rxq ;
4759
4798
int i , ret = 0 ;
4760
4799
4761
4800
ndev = devm_alloc_etherdev_mqs (device , sizeof (struct stmmac_priv ),
@@ -4819,10 +4858,6 @@ int stmmac_dvr_probe(struct device *device,
4819
4858
4820
4859
stmmac_check_ether_addr (priv );
4821
4860
4822
- /* Configure real RX and TX queues */
4823
- netif_set_real_num_rx_queues (ndev , priv -> plat -> rx_queues_to_use );
4824
- netif_set_real_num_tx_queues (ndev , priv -> plat -> tx_queues_to_use );
4825
-
4826
4861
ndev -> netdev_ops = & stmmac_netdev_ops ;
4827
4862
4828
4863
ndev -> hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -4920,25 +4955,7 @@ int stmmac_dvr_probe(struct device *device,
4920
4955
priv -> flow_ctrl = FLOW_AUTO ; /* RX/TX pause on */
4921
4956
4922
4957
/* Setup channels NAPI */
4923
- maxq = max (priv -> plat -> rx_queues_to_use , priv -> plat -> tx_queues_to_use );
4924
-
4925
- for (queue = 0 ; queue < maxq ; queue ++ ) {
4926
- struct stmmac_channel * ch = & priv -> channel [queue ];
4927
-
4928
- spin_lock_init (& ch -> lock );
4929
- ch -> priv_data = priv ;
4930
- ch -> index = queue ;
4931
-
4932
- if (queue < priv -> plat -> rx_queues_to_use ) {
4933
- netif_napi_add (ndev , & ch -> rx_napi , stmmac_napi_poll_rx ,
4934
- NAPI_POLL_WEIGHT );
4935
- }
4936
- if (queue < priv -> plat -> tx_queues_to_use ) {
4937
- netif_tx_napi_add (ndev , & ch -> tx_napi ,
4938
- stmmac_napi_poll_tx ,
4939
- NAPI_POLL_WEIGHT );
4940
- }
4941
- }
4958
+ stmmac_napi_add (ndev );
4942
4959
4943
4960
mutex_init (& priv -> lock );
4944
4961
@@ -5003,14 +5020,7 @@ int stmmac_dvr_probe(struct device *device,
5003
5020
priv -> hw -> pcs != STMMAC_PCS_RTBI )
5004
5021
stmmac_mdio_unregister (ndev );
5005
5022
error_mdio_register :
5006
- for (queue = 0 ; queue < maxq ; queue ++ ) {
5007
- struct stmmac_channel * ch = & priv -> channel [queue ];
5008
-
5009
- if (queue < priv -> plat -> rx_queues_to_use )
5010
- netif_napi_del (& ch -> rx_napi );
5011
- if (queue < priv -> plat -> tx_queues_to_use )
5012
- netif_napi_del (& ch -> tx_napi );
5013
- }
5023
+ stmmac_napi_del (ndev );
5014
5024
error_hw_init :
5015
5025
destroy_workqueue (priv -> wq );
5016
5026
@@ -5078,7 +5088,6 @@ int stmmac_suspend(struct device *dev)
5078
5088
mutex_lock (& priv -> lock );
5079
5089
5080
5090
netif_device_detach (ndev );
5081
- stmmac_stop_all_queues (priv );
5082
5091
5083
5092
stmmac_disable_all_queues (priv );
5084
5093
@@ -5204,8 +5213,6 @@ int stmmac_resume(struct device *dev)
5204
5213
5205
5214
stmmac_enable_all_queues (priv );
5206
5215
5207
- stmmac_start_all_queues (priv );
5208
-
5209
5216
mutex_unlock (& priv -> lock );
5210
5217
5211
5218
if (!device_may_wakeup (priv -> device ) || !priv -> plat -> pmt ) {
0 commit comments