@@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
536
536
struct mtk_eth * eth = mac -> hw ;
537
537
struct mtk_tx_dma * itxd , * txd ;
538
538
struct mtk_tx_buf * tx_buf ;
539
- unsigned long flags ;
540
539
dma_addr_t mapped_addr ;
541
540
unsigned int nr_frags ;
542
541
int i , n_desc = 1 ;
@@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
568
567
if (unlikely (dma_mapping_error (& dev -> dev , mapped_addr )))
569
568
return - ENOMEM ;
570
569
571
- /* normally we can rely on the stack not calling this more than once,
572
- * however we have 2 queues running ont he same ring so we need to lock
573
- * the ring access
574
- */
575
- spin_lock_irqsave (& eth -> page_lock , flags );
576
570
WRITE_ONCE (itxd -> txd1 , mapped_addr );
577
571
tx_buf -> flags |= MTK_TX_FLAGS_SINGLE0 ;
578
572
dma_unmap_addr_set (tx_buf , dma_addr0 , mapped_addr );
@@ -609,8 +603,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
609
603
WRITE_ONCE (txd -> txd1 , mapped_addr );
610
604
WRITE_ONCE (txd -> txd3 , (TX_DMA_SWC |
611
605
TX_DMA_PLEN0 (frag_map_size ) |
612
- last_frag * TX_DMA_LS0 ) |
613
- mac -> id );
606
+ last_frag * TX_DMA_LS0 ));
614
607
WRITE_ONCE (txd -> txd4 , 0 );
615
608
616
609
tx_buf -> skb = (struct sk_buff * )MTK_DMA_DUMMY_DESC ;
@@ -632,8 +625,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
632
625
WRITE_ONCE (itxd -> txd3 , (TX_DMA_SWC | TX_DMA_PLEN0 (skb_headlen (skb )) |
633
626
(!nr_frags * TX_DMA_LS0 )));
634
627
635
- spin_unlock_irqrestore (& eth -> page_lock , flags );
636
-
637
628
netdev_sent_queue (dev , skb -> len );
638
629
skb_tx_timestamp (skb );
639
630
@@ -661,8 +652,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
661
652
itxd = mtk_qdma_phys_to_virt (ring , itxd -> txd2 );
662
653
} while (itxd != txd );
663
654
664
- spin_unlock_irqrestore (& eth -> page_lock , flags );
665
-
666
655
return - ENOMEM ;
667
656
}
668
657
@@ -681,7 +670,29 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
681
670
nfrags += skb_shinfo (skb )-> nr_frags ;
682
671
}
683
672
684
- return DIV_ROUND_UP (nfrags , 2 );
673
+ return nfrags ;
674
+ }
675
+
676
+ static void mtk_wake_queue (struct mtk_eth * eth )
677
+ {
678
+ int i ;
679
+
680
+ for (i = 0 ; i < MTK_MAC_COUNT ; i ++ ) {
681
+ if (!eth -> netdev [i ])
682
+ continue ;
683
+ netif_wake_queue (eth -> netdev [i ]);
684
+ }
685
+ }
686
+
687
+ static void mtk_stop_queue (struct mtk_eth * eth )
688
+ {
689
+ int i ;
690
+
691
+ for (i = 0 ; i < MTK_MAC_COUNT ; i ++ ) {
692
+ if (!eth -> netdev [i ])
693
+ continue ;
694
+ netif_stop_queue (eth -> netdev [i ]);
695
+ }
685
696
}
686
697
687
698
static int mtk_start_xmit (struct sk_buff * skb , struct net_device * dev )
@@ -690,14 +701,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
690
701
struct mtk_eth * eth = mac -> hw ;
691
702
struct mtk_tx_ring * ring = & eth -> tx_ring ;
692
703
struct net_device_stats * stats = & dev -> stats ;
704
+ unsigned long flags ;
693
705
bool gso = false;
694
706
int tx_num ;
695
707
708
+ /* normally we can rely on the stack not calling this more than once,
709
+ * however we have 2 queues running on the same ring so we need to lock
710
+ * the ring access
711
+ */
712
+ spin_lock_irqsave (& eth -> page_lock , flags );
713
+
696
714
tx_num = mtk_cal_txd_req (skb );
697
715
if (unlikely (atomic_read (& ring -> free_count ) <= tx_num )) {
698
- netif_stop_queue ( dev );
716
+ mtk_stop_queue ( eth );
699
717
netif_err (eth , tx_queued , dev ,
700
718
"Tx Ring full when queue awake!\n" );
719
+ spin_unlock_irqrestore (& eth -> page_lock , flags );
701
720
return NETDEV_TX_BUSY ;
702
721
}
703
722
@@ -720,15 +739,17 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
720
739
goto drop ;
721
740
722
741
if (unlikely (atomic_read (& ring -> free_count ) <= ring -> thresh )) {
723
- netif_stop_queue ( dev );
742
+ mtk_stop_queue ( eth );
724
743
if (unlikely (atomic_read (& ring -> free_count ) >
725
744
ring -> thresh ))
726
- netif_wake_queue ( dev );
745
+ mtk_wake_queue ( eth );
727
746
}
747
+ spin_unlock_irqrestore (& eth -> page_lock , flags );
728
748
729
749
return NETDEV_TX_OK ;
730
750
731
751
drop :
752
+ spin_unlock_irqrestore (& eth -> page_lock , flags );
732
753
stats -> tx_dropped ++ ;
733
754
dev_kfree_skb (skb );
734
755
return NETDEV_TX_OK ;
@@ -897,13 +918,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
897
918
if (!total )
898
919
return 0 ;
899
920
900
- for (i = 0 ; i < MTK_MAC_COUNT ; i ++ ) {
901
- if (!eth -> netdev [i ] ||
902
- unlikely (!netif_queue_stopped (eth -> netdev [i ])))
903
- continue ;
904
- if (atomic_read (& ring -> free_count ) > ring -> thresh )
905
- netif_wake_queue (eth -> netdev [i ]);
906
- }
921
+ if (atomic_read (& ring -> free_count ) > ring -> thresh )
922
+ mtk_wake_queue (eth );
907
923
908
924
return total ;
909
925
}
@@ -1176,7 +1192,7 @@ static void mtk_tx_timeout(struct net_device *dev)
1176
1192
eth -> netdev [mac -> id ]-> stats .tx_errors ++ ;
1177
1193
netif_err (eth , tx_err , dev ,
1178
1194
"transmit timed out\n" );
1179
- schedule_work (& mac -> pending_work );
1195
+ schedule_work (& eth -> pending_work );
1180
1196
}
1181
1197
1182
1198
static irqreturn_t mtk_handle_irq (int irq , void * _eth )
@@ -1413,19 +1429,30 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1413
1429
1414
1430
static void mtk_pending_work (struct work_struct * work )
1415
1431
{
1416
- struct mtk_mac * mac = container_of (work , struct mtk_mac , pending_work );
1417
- struct mtk_eth * eth = mac -> hw ;
1418
- struct net_device * dev = eth -> netdev [mac -> id ];
1419
- int err ;
1432
+ struct mtk_eth * eth = container_of (work , struct mtk_eth , pending_work );
1433
+ int err , i ;
1434
+ unsigned long restart = 0 ;
1420
1435
1421
1436
rtnl_lock ();
1422
- mtk_stop (dev );
1423
1437
1424
- err = mtk_open (dev );
1425
- if (err ) {
1426
- netif_alert (eth , ifup , dev ,
1427
- "Driver up/down cycle failed, closing device.\n" );
1428
- dev_close (dev );
1438
+ /* stop all devices to make sure that dma is properly shut down */
1439
+ for (i = 0 ; i < MTK_MAC_COUNT ; i ++ ) {
1440
+ if (!eth -> netdev [i ])
1441
+ continue ;
1442
+ mtk_stop (eth -> netdev [i ]);
1443
+ __set_bit (i , & restart );
1444
+ }
1445
+
1446
+ /* restart DMA and enable IRQs */
1447
+ for (i = 0 ; i < MTK_MAC_COUNT ; i ++ ) {
1448
+ if (!test_bit (i , & restart ))
1449
+ continue ;
1450
+ err = mtk_open (eth -> netdev [i ]);
1451
+ if (err ) {
1452
+ netif_alert (eth , ifup , eth -> netdev [i ],
1453
+ "Driver up/down cycle failed, closing device.\n" );
1454
+ dev_close (eth -> netdev [i ]);
1455
+ }
1429
1456
}
1430
1457
rtnl_unlock ();
1431
1458
}
@@ -1435,15 +1462,13 @@ static int mtk_cleanup(struct mtk_eth *eth)
1435
1462
int i ;
1436
1463
1437
1464
for (i = 0 ; i < MTK_MAC_COUNT ; i ++ ) {
1438
- struct mtk_mac * mac = netdev_priv (eth -> netdev [i ]);
1439
-
1440
1465
if (!eth -> netdev [i ])
1441
1466
continue ;
1442
1467
1443
1468
unregister_netdev (eth -> netdev [i ]);
1444
1469
free_netdev (eth -> netdev [i ]);
1445
- cancel_work_sync (& mac -> pending_work );
1446
1470
}
1471
+ cancel_work_sync (& eth -> pending_work );
1447
1472
1448
1473
return 0 ;
1449
1474
}
@@ -1631,7 +1656,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1631
1656
mac -> id = id ;
1632
1657
mac -> hw = eth ;
1633
1658
mac -> of_node = np ;
1634
- INIT_WORK (& mac -> pending_work , mtk_pending_work );
1635
1659
1636
1660
mac -> hw_stats = devm_kzalloc (eth -> dev ,
1637
1661
sizeof (* mac -> hw_stats ),
@@ -1645,6 +1669,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1645
1669
mac -> hw_stats -> reg_offset = id * MTK_STAT_OFFSET ;
1646
1670
1647
1671
SET_NETDEV_DEV (eth -> netdev [id ], eth -> dev );
1672
+ eth -> netdev [id ]-> watchdog_timeo = HZ ;
1648
1673
eth -> netdev [id ]-> netdev_ops = & mtk_netdev_ops ;
1649
1674
eth -> netdev [id ]-> base_addr = (unsigned long )eth -> base ;
1650
1675
eth -> netdev [id ]-> vlan_features = MTK_HW_FEATURES &
@@ -1678,10 +1703,6 @@ static int mtk_probe(struct platform_device *pdev)
1678
1703
struct mtk_eth * eth ;
1679
1704
int err ;
1680
1705
1681
- err = device_reset (& pdev -> dev );
1682
- if (err )
1683
- return err ;
1684
-
1685
1706
match = of_match_device (of_mtk_match , & pdev -> dev );
1686
1707
soc = (struct mtk_soc_data * )match -> data ;
1687
1708
@@ -1736,6 +1757,7 @@ static int mtk_probe(struct platform_device *pdev)
1736
1757
1737
1758
eth -> dev = & pdev -> dev ;
1738
1759
eth -> msg_enable = netif_msg_init (mtk_msg_level , MTK_DEFAULT_MSG_ENABLE );
1760
+ INIT_WORK (& eth -> pending_work , mtk_pending_work );
1739
1761
1740
1762
err = mtk_hw_init (eth );
1741
1763
if (err )
0 commit comments