@@ -280,6 +280,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
280
280
| BD_ENET_TX_LAST | BD_ENET_TX_TC );
281
281
bdp -> cbd_sc = status ;
282
282
283
+ #ifdef CONFIG_FEC_PTP
284
+ bdp -> cbd_bdu = 0 ;
285
+ if (unlikely (skb_shinfo (skb )-> tx_flags & SKBTX_HW_TSTAMP &&
286
+ fep -> hwts_tx_en )) {
287
+ bdp -> cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT );
288
+ skb_shinfo (skb )-> tx_flags |= SKBTX_IN_PROGRESS ;
289
+ } else {
290
+
291
+ bdp -> cbd_esc = BD_ENET_TX_INT ;
292
+ }
293
+ #endif
283
294
/* Trigger transmission start */
284
295
writel (0 , fep -> hwp + FEC_X_DES_ACTIVE );
285
296
@@ -437,10 +448,17 @@ fec_restart(struct net_device *ndev, int duplex)
437
448
writel (1 << 8 , fep -> hwp + FEC_X_WMRK );
438
449
}
439
450
451
+ #ifdef CONFIG_FEC_PTP
452
+ ecntl |= (1 << 4 );
453
+ #endif
454
+
440
455
/* And last, enable the transmit and receive processing */
441
456
writel (ecntl , fep -> hwp + FEC_ECNTRL );
442
457
writel (0 , fep -> hwp + FEC_R_DES_ACTIVE );
443
458
459
+ #ifdef CONFIG_FEC_PTP
460
+ fec_ptp_start_cyclecounter (ndev );
461
+ #endif
444
462
/* Enable interrupts we wish to service */
445
463
writel (FEC_DEFAULT_IMASK , fep -> hwp + FEC_IMASK );
446
464
}
@@ -526,6 +544,19 @@ fec_enet_tx(struct net_device *ndev)
526
544
ndev -> stats .tx_packets ++ ;
527
545
}
528
546
547
+ #ifdef CONFIG_FEC_PTP
548
+ if (unlikely (skb_shinfo (skb )-> tx_flags & SKBTX_IN_PROGRESS )) {
549
+ struct skb_shared_hwtstamps shhwtstamps ;
550
+ unsigned long flags ;
551
+
552
+ memset (& shhwtstamps , 0 , sizeof (shhwtstamps ));
553
+ spin_lock_irqsave (& fep -> tmreg_lock , flags );
554
+ shhwtstamps .hwtstamp = ns_to_ktime (
555
+ timecounter_cyc2time (& fep -> tc , bdp -> ts ));
556
+ spin_unlock_irqrestore (& fep -> tmreg_lock , flags );
557
+ skb_tstamp_tx (skb , & shhwtstamps );
558
+ }
559
+ #endif
529
560
if (status & BD_ENET_TX_READY )
530
561
printk ("HEY! Enet xmit interrupt and TX_READY.\n" );
531
562
@@ -652,6 +683,21 @@ fec_enet_rx(struct net_device *ndev)
652
683
skb_put (skb , pkt_len - 4 ); /* Make room */
653
684
skb_copy_to_linear_data (skb , data , pkt_len - 4 );
654
685
skb -> protocol = eth_type_trans (skb , ndev );
686
+ #ifdef CONFIG_FEC_PTP
687
+ /* Get receive timestamp from the skb */
688
+ if (fep -> hwts_rx_en ) {
689
+ struct skb_shared_hwtstamps * shhwtstamps =
690
+ skb_hwtstamps (skb );
691
+ unsigned long flags ;
692
+
693
+ memset (shhwtstamps , 0 , sizeof (* shhwtstamps ));
694
+
695
+ spin_lock_irqsave (& fep -> tmreg_lock , flags );
696
+ shhwtstamps -> hwtstamp = ns_to_ktime (
697
+ timecounter_cyc2time (& fep -> tc , bdp -> ts ));
698
+ spin_unlock_irqrestore (& fep -> tmreg_lock , flags );
699
+ }
700
+ #endif
655
701
if (!skb_defer_rx_timestamp (skb ))
656
702
netif_rx (skb );
657
703
}
@@ -666,6 +712,12 @@ fec_enet_rx(struct net_device *ndev)
666
712
status |= BD_ENET_RX_EMPTY ;
667
713
bdp -> cbd_sc = status ;
668
714
715
+ #ifdef CONFIG_FEC_PTP
716
+ bdp -> cbd_esc = BD_ENET_RX_INT ;
717
+ bdp -> cbd_prot = 0 ;
718
+ bdp -> cbd_bdu = 0 ;
719
+ #endif
720
+
669
721
/* Update BD pointer to next entry */
670
722
if (status & BD_ENET_RX_WRAP )
671
723
bdp = fep -> rx_bd_base ;
@@ -1105,6 +1157,10 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1105
1157
if (!phydev )
1106
1158
return - ENODEV ;
1107
1159
1160
+ #ifdef CONFIG_FEC_PTP
1161
+ if (cmd == SIOCSHWTSTAMP )
1162
+ return fec_ptp_ioctl (ndev , rq , cmd );
1163
+ #endif
1108
1164
return phy_mii_ioctl (phydev , rq , cmd );
1109
1165
}
1110
1166
@@ -1151,6 +1207,9 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1151
1207
bdp -> cbd_bufaddr = dma_map_single (& fep -> pdev -> dev , skb -> data ,
1152
1208
FEC_ENET_RX_FRSIZE , DMA_FROM_DEVICE );
1153
1209
bdp -> cbd_sc = BD_ENET_RX_EMPTY ;
1210
+ #ifdef CONFIG_FEC_PTP
1211
+ bdp -> cbd_esc = BD_ENET_RX_INT ;
1212
+ #endif
1154
1213
bdp ++ ;
1155
1214
}
1156
1215
@@ -1164,6 +1223,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1164
1223
1165
1224
bdp -> cbd_sc = 0 ;
1166
1225
bdp -> cbd_bufaddr = 0 ;
1226
+
1227
+ #ifdef CONFIG_FEC_PTP
1228
+ bdp -> cbd_esc = BD_ENET_RX_INT ;
1229
+ #endif
1167
1230
bdp ++ ;
1168
1231
}
1169
1232
@@ -1565,9 +1628,19 @@ fec_probe(struct platform_device *pdev)
1565
1628
goto failed_clk ;
1566
1629
}
1567
1630
1631
+ #ifdef CONFIG_FEC_PTP
1632
+ fep -> clk_ptp = devm_clk_get (& pdev -> dev , "ptp" );
1633
+ if (IS_ERR (fep -> clk_ptp )) {
1634
+ ret = PTR_ERR (fep -> clk_ptp );
1635
+ goto failed_clk ;
1636
+ }
1637
+ #endif
1638
+
1568
1639
clk_prepare_enable (fep -> clk_ahb );
1569
1640
clk_prepare_enable (fep -> clk_ipg );
1570
-
1641
+ #ifdef CONFIG_FEC_PTP
1642
+ clk_prepare_enable (fep -> clk_ptp );
1643
+ #endif
1571
1644
reg_phy = devm_regulator_get (& pdev -> dev , "phy" );
1572
1645
if (!IS_ERR (reg_phy )) {
1573
1646
ret = regulator_enable (reg_phy );
@@ -1595,6 +1668,10 @@ fec_probe(struct platform_device *pdev)
1595
1668
if (ret )
1596
1669
goto failed_register ;
1597
1670
1671
+ #ifdef CONFIG_FEC_PTP
1672
+ fec_ptp_init (ndev , pdev );
1673
+ #endif
1674
+
1598
1675
return 0 ;
1599
1676
1600
1677
failed_register :
@@ -1604,6 +1681,9 @@ fec_probe(struct platform_device *pdev)
1604
1681
failed_regulator :
1605
1682
clk_disable_unprepare (fep -> clk_ahb );
1606
1683
clk_disable_unprepare (fep -> clk_ipg );
1684
+ #ifdef CONFIG_FEC_PTP
1685
+ clk_disable_unprepare (fep -> clk_ptp );
1686
+ #endif
1607
1687
failed_pin :
1608
1688
failed_clk :
1609
1689
for (i = 0 ; i < FEC_IRQ_NUM ; i ++ ) {
@@ -1636,6 +1716,12 @@ fec_drv_remove(struct platform_device *pdev)
1636
1716
if (irq > 0 )
1637
1717
free_irq (irq , ndev );
1638
1718
}
1719
+ #ifdef CONFIG_FEC_PTP
1720
+ del_timer_sync (& fep -> time_keep );
1721
+ clk_disable_unprepare (fep -> clk_ptp );
1722
+ if (fep -> ptp_clock )
1723
+ ptp_clock_unregister (fep -> ptp_clock );
1724
+ #endif
1639
1725
clk_disable_unprepare (fep -> clk_ahb );
1640
1726
clk_disable_unprepare (fep -> clk_ipg );
1641
1727
iounmap (fep -> hwp );
0 commit comments