@@ -1000,25 +1000,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
1000
1000
return netdev_mc_count (netdev );
1001
1001
}
1002
1002
1003
- static __le32 igc_tx_launchtime (struct igc_adapter * adapter , ktime_t txtime )
1003
+ static __le32 igc_tx_launchtime (struct igc_ring * ring , ktime_t txtime ,
1004
+ bool * first_flag , bool * insert_empty )
1004
1005
{
1006
+ struct igc_adapter * adapter = netdev_priv (ring -> netdev );
1005
1007
ktime_t cycle_time = adapter -> cycle_time ;
1006
1008
ktime_t base_time = adapter -> base_time ;
1009
+ ktime_t now = ktime_get_clocktai ();
1010
+ ktime_t baset_est , end_of_cycle ;
1007
1011
u32 launchtime ;
1012
+ s64 n ;
1008
1013
1009
- /* FIXME: when using ETF together with taprio, we may have a
1010
- * case where 'delta' is larger than the cycle_time, this may
1011
- * cause problems if we don't read the current value of
1012
- * IGC_BASET, as the value writen into the launchtime
1013
- * descriptor field may be misinterpreted.
1014
+ n = div64_s64 (ktime_sub_ns (now , base_time ), cycle_time );
1015
+
1016
+ baset_est = ktime_add_ns (base_time , cycle_time * (n ));
1017
+ end_of_cycle = ktime_add_ns (baset_est , cycle_time );
1018
+
1019
+ if (ktime_compare (txtime , end_of_cycle ) >= 0 ) {
1020
+ if (baset_est != ring -> last_ff_cycle ) {
1021
+ * first_flag = true;
1022
+ ring -> last_ff_cycle = baset_est ;
1023
+
1024
+ if (ktime_compare (txtime , ring -> last_tx_cycle ) > 0 )
1025
+ * insert_empty = true;
1026
+ }
1027
+ }
1028
+
1029
+ /* Introducing a window at end of cycle on which packets
1030
+ * potentially not honor launchtime. Window of 5us chosen
1031
+ * considering software update the tail pointer and packets
1032
+ * are dma'ed to packet buffer.
1014
1033
*/
1015
- div_s64_rem (ktime_sub_ns (txtime , base_time ), cycle_time , & launchtime );
1034
+ if ((ktime_sub_ns (end_of_cycle , now ) < 5 * NSEC_PER_USEC ))
1035
+ netdev_warn (ring -> netdev , "Packet with txtime=%llu may not be honoured\n" ,
1036
+ txtime );
1037
+
1038
+ ring -> last_tx_cycle = end_of_cycle ;
1039
+
1040
+ launchtime = ktime_sub_ns (txtime , baset_est );
1041
+ if (launchtime > 0 )
1042
+ div_s64_rem (launchtime , cycle_time , & launchtime );
1043
+ else
1044
+ launchtime = 0 ;
1016
1045
1017
1046
return cpu_to_le32 (launchtime );
1018
1047
}
1019
1048
1049
+ static int igc_init_empty_frame (struct igc_ring * ring ,
1050
+ struct igc_tx_buffer * buffer ,
1051
+ struct sk_buff * skb )
1052
+ {
1053
+ unsigned int size ;
1054
+ dma_addr_t dma ;
1055
+
1056
+ size = skb_headlen (skb );
1057
+
1058
+ dma = dma_map_single (ring -> dev , skb -> data , size , DMA_TO_DEVICE );
1059
+ if (dma_mapping_error (ring -> dev , dma )) {
1060
+ netdev_err_once (ring -> netdev , "Failed to map DMA for TX\n" );
1061
+ return - ENOMEM ;
1062
+ }
1063
+
1064
+ buffer -> skb = skb ;
1065
+ buffer -> protocol = 0 ;
1066
+ buffer -> bytecount = skb -> len ;
1067
+ buffer -> gso_segs = 1 ;
1068
+ buffer -> time_stamp = jiffies ;
1069
+ dma_unmap_len_set (buffer , len , skb -> len );
1070
+ dma_unmap_addr_set (buffer , dma , dma );
1071
+
1072
+ return 0 ;
1073
+ }
1074
+
1075
+ static int igc_init_tx_empty_descriptor (struct igc_ring * ring ,
1076
+ struct sk_buff * skb ,
1077
+ struct igc_tx_buffer * first )
1078
+ {
1079
+ union igc_adv_tx_desc * desc ;
1080
+ u32 cmd_type , olinfo_status ;
1081
+ int err ;
1082
+
1083
+ if (!igc_desc_unused (ring ))
1084
+ return - EBUSY ;
1085
+
1086
+ err = igc_init_empty_frame (ring , first , skb );
1087
+ if (err )
1088
+ return err ;
1089
+
1090
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1091
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1092
+ first -> bytecount ;
1093
+ olinfo_status = first -> bytecount << IGC_ADVTXD_PAYLEN_SHIFT ;
1094
+
1095
+ desc = IGC_TX_DESC (ring , ring -> next_to_use );
1096
+ desc -> read .cmd_type_len = cpu_to_le32 (cmd_type );
1097
+ desc -> read .olinfo_status = cpu_to_le32 (olinfo_status );
1098
+ desc -> read .buffer_addr = cpu_to_le64 (dma_unmap_addr (first , dma ));
1099
+
1100
+ netdev_tx_sent_queue (txring_txq (ring ), skb -> len );
1101
+
1102
+ first -> next_to_watch = desc ;
1103
+
1104
+ ring -> next_to_use ++ ;
1105
+ if (ring -> next_to_use == ring -> count )
1106
+ ring -> next_to_use = 0 ;
1107
+
1108
+ return 0 ;
1109
+ }
1110
+
1111
+ #define IGC_EMPTY_FRAME_SIZE 60
1112
+
1020
1113
static void igc_tx_ctxtdesc (struct igc_ring * tx_ring ,
1021
- struct igc_tx_buffer * first ,
1114
+ __le32 launch_time , bool first_flag ,
1022
1115
u32 vlan_macip_lens , u32 type_tucmd ,
1023
1116
u32 mss_l4len_idx )
1024
1117
{
@@ -1037,26 +1130,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1037
1130
if (test_bit (IGC_RING_FLAG_TX_CTX_IDX , & tx_ring -> flags ))
1038
1131
mss_l4len_idx |= tx_ring -> reg_idx << 4 ;
1039
1132
1133
+ if (first_flag )
1134
+ mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST ;
1135
+
1040
1136
context_desc -> vlan_macip_lens = cpu_to_le32 (vlan_macip_lens );
1041
1137
context_desc -> type_tucmd_mlhl = cpu_to_le32 (type_tucmd );
1042
1138
context_desc -> mss_l4len_idx = cpu_to_le32 (mss_l4len_idx );
1043
-
1044
- /* We assume there is always a valid Tx time available. Invalid times
1045
- * should have been handled by the upper layers.
1046
- */
1047
- if (tx_ring -> launchtime_enable ) {
1048
- struct igc_adapter * adapter = netdev_priv (tx_ring -> netdev );
1049
- ktime_t txtime = first -> skb -> tstamp ;
1050
-
1051
- skb_txtime_consumed (first -> skb );
1052
- context_desc -> launch_time = igc_tx_launchtime (adapter ,
1053
- txtime );
1054
- } else {
1055
- context_desc -> launch_time = 0 ;
1056
- }
1139
+ context_desc -> launch_time = launch_time ;
1057
1140
}
1058
1141
1059
- static void igc_tx_csum (struct igc_ring * tx_ring , struct igc_tx_buffer * first )
1142
+ static void igc_tx_csum (struct igc_ring * tx_ring , struct igc_tx_buffer * first ,
1143
+ __le32 launch_time , bool first_flag )
1060
1144
{
1061
1145
struct sk_buff * skb = first -> skb ;
1062
1146
u32 vlan_macip_lens = 0 ;
@@ -1096,7 +1180,8 @@ static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
1096
1180
vlan_macip_lens |= skb_network_offset (skb ) << IGC_ADVTXD_MACLEN_SHIFT ;
1097
1181
vlan_macip_lens |= first -> tx_flags & IGC_TX_FLAGS_VLAN_MASK ;
1098
1182
1099
- igc_tx_ctxtdesc (tx_ring , first , vlan_macip_lens , type_tucmd , 0 );
1183
+ igc_tx_ctxtdesc (tx_ring , launch_time , first_flag ,
1184
+ vlan_macip_lens , type_tucmd , 0 );
1100
1185
}
1101
1186
1102
1187
static int __igc_maybe_stop_tx (struct igc_ring * tx_ring , const u16 size )
@@ -1320,6 +1405,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
1320
1405
1321
1406
static int igc_tso (struct igc_ring * tx_ring ,
1322
1407
struct igc_tx_buffer * first ,
1408
+ __le32 launch_time , bool first_flag ,
1323
1409
u8 * hdr_len )
1324
1410
{
1325
1411
u32 vlan_macip_lens , type_tucmd , mss_l4len_idx ;
@@ -1406,20 +1492,23 @@ static int igc_tso(struct igc_ring *tx_ring,
1406
1492
vlan_macip_lens |= (ip .hdr - skb -> data ) << IGC_ADVTXD_MACLEN_SHIFT ;
1407
1493
vlan_macip_lens |= first -> tx_flags & IGC_TX_FLAGS_VLAN_MASK ;
1408
1494
1409
- igc_tx_ctxtdesc (tx_ring , first , vlan_macip_lens ,
1410
- type_tucmd , mss_l4len_idx );
1495
+ igc_tx_ctxtdesc (tx_ring , launch_time , first_flag ,
1496
+ vlan_macip_lens , type_tucmd , mss_l4len_idx );
1411
1497
1412
1498
return 1 ;
1413
1499
}
1414
1500
1415
1501
static netdev_tx_t igc_xmit_frame_ring (struct sk_buff * skb ,
1416
1502
struct igc_ring * tx_ring )
1417
1503
{
1504
+ bool first_flag = false, insert_empty = false;
1418
1505
u16 count = TXD_USE_COUNT (skb_headlen (skb ));
1419
1506
__be16 protocol = vlan_get_protocol (skb );
1420
1507
struct igc_tx_buffer * first ;
1508
+ __le32 launch_time = 0 ;
1421
1509
u32 tx_flags = 0 ;
1422
1510
unsigned short f ;
1511
+ ktime_t txtime ;
1423
1512
u8 hdr_len = 0 ;
1424
1513
int tso = 0 ;
1425
1514
@@ -1433,11 +1522,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1433
1522
count += TXD_USE_COUNT (skb_frag_size (
1434
1523
& skb_shinfo (skb )-> frags [f ]));
1435
1524
1436
- if (igc_maybe_stop_tx (tx_ring , count + 3 )) {
1525
+ if (igc_maybe_stop_tx (tx_ring , count + 5 )) {
1437
1526
/* this is a hard error */
1438
1527
return NETDEV_TX_BUSY ;
1439
1528
}
1440
1529
1530
+ if (!tx_ring -> launchtime_enable )
1531
+ goto done ;
1532
+
1533
+ txtime = skb -> tstamp ;
1534
+ skb -> tstamp = ktime_set (0 , 0 );
1535
+ launch_time = igc_tx_launchtime (tx_ring , txtime , & first_flag , & insert_empty );
1536
+
1537
+ if (insert_empty ) {
1538
+ struct igc_tx_buffer * empty_info ;
1539
+ struct sk_buff * empty ;
1540
+ void * data ;
1541
+
1542
+ empty_info = & tx_ring -> tx_buffer_info [tx_ring -> next_to_use ];
1543
+ empty = alloc_skb (IGC_EMPTY_FRAME_SIZE , GFP_ATOMIC );
1544
+ if (!empty )
1545
+ goto done ;
1546
+
1547
+ data = skb_put (empty , IGC_EMPTY_FRAME_SIZE );
1548
+ memset (data , 0 , IGC_EMPTY_FRAME_SIZE );
1549
+
1550
+ igc_tx_ctxtdesc (tx_ring , 0 , false, 0 , 0 , 0 );
1551
+
1552
+ if (igc_init_tx_empty_descriptor (tx_ring ,
1553
+ empty ,
1554
+ empty_info ) < 0 )
1555
+ dev_kfree_skb_any (empty );
1556
+ }
1557
+
1558
+ done :
1441
1559
/* record the location of the first descriptor for this packet */
1442
1560
first = & tx_ring -> tx_buffer_info [tx_ring -> next_to_use ];
1443
1561
first -> type = IGC_TX_BUFFER_TYPE_SKB ;
@@ -1474,11 +1592,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1474
1592
first -> tx_flags = tx_flags ;
1475
1593
first -> protocol = protocol ;
1476
1594
1477
- tso = igc_tso (tx_ring , first , & hdr_len );
1595
+ tso = igc_tso (tx_ring , first , launch_time , first_flag , & hdr_len );
1478
1596
if (tso < 0 )
1479
1597
goto out_drop ;
1480
1598
else if (!tso )
1481
- igc_tx_csum (tx_ring , first );
1599
+ igc_tx_csum (tx_ring , first , launch_time , first_flag );
1482
1600
1483
1601
igc_tx_map (tx_ring , first , hdr_len );
1484
1602
@@ -5925,10 +6043,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
5925
6043
bool queue_configured [IGC_MAX_TX_QUEUES ] = { };
5926
6044
u32 start_time = 0 , end_time = 0 ;
5927
6045
size_t n ;
6046
+ int i ;
6047
+
6048
+ adapter -> qbv_enable = qopt -> enable ;
5928
6049
5929
6050
if (!qopt -> enable )
5930
6051
return igc_tsn_clear_schedule (adapter );
5931
6052
6053
+ if (qopt -> base_time < 0 )
6054
+ return - ERANGE ;
6055
+
5932
6056
if (adapter -> base_time )
5933
6057
return - EALREADY ;
5934
6058
@@ -5940,10 +6064,24 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
5940
6064
5941
6065
for (n = 0 ; n < qopt -> num_entries ; n ++ ) {
5942
6066
struct tc_taprio_sched_entry * e = & qopt -> entries [n ];
5943
- int i ;
5944
6067
5945
6068
end_time += e -> interval ;
5946
6069
6070
+ /* If any of the conditions below are true, we need to manually
6071
+ * control the end time of the cycle.
6072
+ * 1. Qbv users can specify a cycle time that is not equal
6073
+ * to the total GCL intervals. Hence, recalculation is
6074
+ * necessary here to exclude the time interval that
6075
+ * exceeds the cycle time.
6076
+ * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
6077
+ * once the end of the list is reached, it will switch
6078
+ * to the END_OF_CYCLE state and leave the gates in the
6079
+ * same state until the next cycle is started.
6080
+ */
6081
+ if (end_time > adapter -> cycle_time ||
6082
+ n + 1 == qopt -> num_entries )
6083
+ end_time = adapter -> cycle_time ;
6084
+
5947
6085
for (i = 0 ; i < adapter -> num_tx_queues ; i ++ ) {
5948
6086
struct igc_ring * ring = adapter -> tx_ring [i ];
5949
6087
@@ -5964,6 +6102,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
5964
6102
start_time += e -> interval ;
5965
6103
}
5966
6104
6105
+ /* Check whether a queue gets configured.
6106
+ * If not, set the start and end time to be end time.
6107
+ */
6108
+ for (i = 0 ; i < adapter -> num_tx_queues ; i ++ ) {
6109
+ if (!queue_configured [i ]) {
6110
+ struct igc_ring * ring = adapter -> tx_ring [i ];
6111
+
6112
+ ring -> start_time = end_time ;
6113
+ ring -> end_time = end_time ;
6114
+ }
6115
+ }
6116
+
5967
6117
return 0 ;
5968
6118
}
5969
6119
0 commit comments