41
41
#include "xilinx_axienet.h"
42
42
43
43
/* Descriptors defines for Tx and Rx DMA */
44
- #define TX_BD_NUM_DEFAULT 64
44
+ #define TX_BD_NUM_DEFAULT 128
45
45
#define RX_BD_NUM_DEFAULT 1024
46
+ #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
46
47
#define TX_BD_NUM_MAX 4096
47
48
#define RX_BD_NUM_MAX 4096
48
49
@@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
496
497
497
498
static int __axienet_device_reset (struct axienet_local * lp )
498
499
{
499
- u32 timeout ;
500
+ u32 value ;
501
+ int ret ;
500
502
501
503
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
502
504
* process of Axi DMA takes a while to complete as all pending
@@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp)
506
508
* they both reset the entire DMA core, so only one needs to be used.
507
509
*/
508
510
axienet_dma_out32 (lp , XAXIDMA_TX_CR_OFFSET , XAXIDMA_CR_RESET_MASK );
509
- timeout = DELAY_OF_ONE_MILLISEC ;
510
- while (axienet_dma_in32 (lp , XAXIDMA_TX_CR_OFFSET ) &
511
- XAXIDMA_CR_RESET_MASK ) {
512
- udelay (1 );
513
- if (-- timeout == 0 ) {
514
- netdev_err (lp -> ndev , "%s: DMA reset timeout!\n" ,
515
- __func__ );
516
- return - ETIMEDOUT ;
517
- }
511
+ ret = read_poll_timeout (axienet_dma_in32 , value ,
512
+ !(value & XAXIDMA_CR_RESET_MASK ),
513
+ DELAY_OF_ONE_MILLISEC , 50000 , false, lp ,
514
+ XAXIDMA_TX_CR_OFFSET );
515
+ if (ret ) {
516
+ dev_err (lp -> dev , "%s: DMA reset timeout!\n" , __func__ );
517
+ return ret ;
518
+ }
519
+
520
+ /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
521
+ ret = read_poll_timeout (axienet_ior , value ,
522
+ value & XAE_INT_PHYRSTCMPLT_MASK ,
523
+ DELAY_OF_ONE_MILLISEC , 50000 , false, lp ,
524
+ XAE_IS_OFFSET );
525
+ if (ret ) {
526
+ dev_err (lp -> dev , "%s: timeout waiting for PhyRstCmplt\n" , __func__ );
527
+ return ret ;
518
528
}
519
529
520
530
return 0 ;
@@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
623
633
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK ))
624
634
break ;
625
635
636
+ /* Ensure we see complete descriptor update */
637
+ dma_rmb ();
626
638
phys = desc_get_phys_addr (lp , cur_p );
627
639
dma_unmap_single (ndev -> dev .parent , phys ,
628
640
(cur_p -> cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK ),
@@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
631
643
if (cur_p -> skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK ))
632
644
dev_consume_skb_irq (cur_p -> skb );
633
645
634
- cur_p -> cntrl = 0 ;
635
646
cur_p -> app0 = 0 ;
636
647
cur_p -> app1 = 0 ;
637
648
cur_p -> app2 = 0 ;
638
649
cur_p -> app4 = 0 ;
639
- cur_p -> status = 0 ;
640
650
cur_p -> skb = NULL ;
651
+ /* ensure our transmit path and device don't prematurely see status cleared */
652
+ wmb ();
653
+ cur_p -> cntrl = 0 ;
654
+ cur_p -> status = 0 ;
641
655
642
656
if (sizep )
643
657
* sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK ;
@@ -646,6 +660,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
646
660
return i ;
647
661
}
648
662
663
+ /**
664
+ * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
665
+ * @lp: Pointer to the axienet_local structure
666
+ * @num_frag: The number of BDs to check for
667
+ *
668
+ * Return: 0, on success
669
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
670
+ *
671
+ * This function is invoked before BDs are allocated and transmission starts.
672
+ * This function returns 0 if a BD or group of BDs can be allocated for
673
+ * transmission. If the BD or any of the BDs are not free the function
674
+ * returns a busy status. This is invoked from axienet_start_xmit.
675
+ */
676
+ static inline int axienet_check_tx_bd_space (struct axienet_local * lp ,
677
+ int num_frag )
678
+ {
679
+ struct axidma_bd * cur_p ;
680
+
681
+ /* Ensure we see all descriptor updates from device or TX IRQ path */
682
+ rmb ();
683
+ cur_p = & lp -> tx_bd_v [(lp -> tx_bd_tail + num_frag ) % lp -> tx_bd_num ];
684
+ if (cur_p -> cntrl )
685
+ return NETDEV_TX_BUSY ;
686
+ return 0 ;
687
+ }
688
+
649
689
/**
650
690
* axienet_start_xmit_done - Invoked once a transmit is completed by the
651
691
* Axi DMA Tx channel.
@@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
675
715
/* Matches barrier in axienet_start_xmit */
676
716
smp_mb ();
677
717
678
- netif_wake_queue (ndev );
679
- }
680
-
681
- /**
682
- * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
683
- * @lp: Pointer to the axienet_local structure
684
- * @num_frag: The number of BDs to check for
685
- *
686
- * Return: 0, on success
687
- * NETDEV_TX_BUSY, if any of the descriptors are not free
688
- *
689
- * This function is invoked before BDs are allocated and transmission starts.
690
- * This function returns 0 if a BD or group of BDs can be allocated for
691
- * transmission. If the BD or any of the BDs are not free the function
692
- * returns a busy status. This is invoked from axienet_start_xmit.
693
- */
694
- static inline int axienet_check_tx_bd_space (struct axienet_local * lp ,
695
- int num_frag )
696
- {
697
- struct axidma_bd * cur_p ;
698
- cur_p = & lp -> tx_bd_v [(lp -> tx_bd_tail + num_frag ) % lp -> tx_bd_num ];
699
- if (cur_p -> status & XAXIDMA_BD_STS_ALL_MASK )
700
- return NETDEV_TX_BUSY ;
701
- return 0 ;
718
+ if (!axienet_check_tx_bd_space (lp , MAX_SKB_FRAGS + 1 ))
719
+ netif_wake_queue (ndev );
702
720
}
703
721
704
722
/**
@@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
730
748
num_frag = skb_shinfo (skb )-> nr_frags ;
731
749
cur_p = & lp -> tx_bd_v [lp -> tx_bd_tail ];
732
750
733
- if (axienet_check_tx_bd_space (lp , num_frag )) {
734
- if (netif_queue_stopped (ndev ))
735
- return NETDEV_TX_BUSY ;
736
-
751
+ if (axienet_check_tx_bd_space (lp , num_frag + 1 )) {
752
+ /* Should not happen as last start_xmit call should have
753
+ * checked for sufficient space and queue should only be
754
+ * woken when sufficient space is available.
755
+ */
737
756
netif_stop_queue (ndev );
738
-
739
- /* Matches barrier in axienet_start_xmit_done */
740
- smp_mb ();
741
-
742
- /* Space might have just been freed - check again */
743
- if (axienet_check_tx_bd_space (lp , num_frag ))
744
- return NETDEV_TX_BUSY ;
745
-
746
- netif_wake_queue (ndev );
757
+ if (net_ratelimit ())
758
+ netdev_warn (ndev , "TX ring unexpectedly full\n" );
759
+ return NETDEV_TX_BUSY ;
747
760
}
748
761
749
762
if (skb -> ip_summed == CHECKSUM_PARTIAL ) {
@@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
804
817
if (++ lp -> tx_bd_tail >= lp -> tx_bd_num )
805
818
lp -> tx_bd_tail = 0 ;
806
819
820
+ /* Stop queue if next transmit may not have space */
821
+ if (axienet_check_tx_bd_space (lp , MAX_SKB_FRAGS + 1 )) {
822
+ netif_stop_queue (ndev );
823
+
824
+ /* Matches barrier in axienet_start_xmit_done */
825
+ smp_mb ();
826
+
827
+ /* Space might have just been freed - check again */
828
+ if (!axienet_check_tx_bd_space (lp , MAX_SKB_FRAGS + 1 ))
829
+ netif_wake_queue (ndev );
830
+ }
831
+
807
832
return NETDEV_TX_OK ;
808
833
}
809
834
@@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev)
834
859
835
860
tail_p = lp -> rx_bd_p + sizeof (* lp -> rx_bd_v ) * lp -> rx_bd_ci ;
836
861
862
+ /* Ensure we see complete descriptor update */
863
+ dma_rmb ();
837
864
phys = desc_get_phys_addr (lp , cur_p );
838
865
dma_unmap_single (ndev -> dev .parent , phys , lp -> max_frm_size ,
839
866
DMA_FROM_DEVICE );
@@ -1352,7 +1379,8 @@ axienet_ethtools_set_ringparam(struct net_device *ndev,
1352
1379
if (ering -> rx_pending > RX_BD_NUM_MAX ||
1353
1380
ering -> rx_mini_pending ||
1354
1381
ering -> rx_jumbo_pending ||
1355
- ering -> rx_pending > TX_BD_NUM_MAX )
1382
+ ering -> tx_pending < TX_BD_NUM_MIN ||
1383
+ ering -> tx_pending > TX_BD_NUM_MAX )
1356
1384
return - EINVAL ;
1357
1385
1358
1386
if (netif_running (ndev ))
@@ -2027,6 +2055,11 @@ static int axienet_probe(struct platform_device *pdev)
2027
2055
lp -> coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD ;
2028
2056
lp -> coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD ;
2029
2057
2058
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
2059
+ ret = __axienet_device_reset (lp );
2060
+ if (ret )
2061
+ goto cleanup_clk ;
2062
+
2030
2063
lp -> phy_node = of_parse_phandle (pdev -> dev .of_node , "phy-handle" , 0 );
2031
2064
if (lp -> phy_node ) {
2032
2065
ret = axienet_mdio_setup (lp );
0 commit comments