@@ -1307,25 +1307,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1307
1307
return in_sack ;
1308
1308
}
1309
1309
1310
- static u8 tcp_sacktag_one (const struct sk_buff * skb , struct sock * sk ,
1311
- struct tcp_sacktag_state * state ,
1310
+ /* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1311
+ static u8 tcp_sacktag_one (struct sock * sk ,
1312
+ struct tcp_sacktag_state * state , u8 sacked ,
1313
+ u32 start_seq , u32 end_seq ,
1312
1314
int dup_sack , int pcount )
1313
1315
{
1314
1316
struct tcp_sock * tp = tcp_sk (sk );
1315
- u8 sacked = TCP_SKB_CB (skb )-> sacked ;
1316
1317
int fack_count = state -> fack_count ;
1317
1318
1318
1319
/* Account D-SACK for retransmitted packet. */
1319
1320
if (dup_sack && (sacked & TCPCB_RETRANS )) {
1320
1321
if (tp -> undo_marker && tp -> undo_retrans &&
1321
- after (TCP_SKB_CB ( skb ) -> end_seq , tp -> undo_marker ))
1322
+ after (end_seq , tp -> undo_marker ))
1322
1323
tp -> undo_retrans -- ;
1323
1324
if (sacked & TCPCB_SACKED_ACKED )
1324
1325
state -> reord = min (fack_count , state -> reord );
1325
1326
}
1326
1327
1327
1328
/* Nothing to do; acked frame is about to be dropped (was ACKed). */
1328
- if (!after (TCP_SKB_CB ( skb ) -> end_seq , tp -> snd_una ))
1329
+ if (!after (end_seq , tp -> snd_una ))
1329
1330
return sacked ;
1330
1331
1331
1332
if (!(sacked & TCPCB_SACKED_ACKED )) {
@@ -1344,13 +1345,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1344
1345
/* New sack for not retransmitted frame,
1345
1346
* which was in hole. It is reordering.
1346
1347
*/
1347
- if (before (TCP_SKB_CB ( skb ) -> seq ,
1348
+ if (before (start_seq ,
1348
1349
tcp_highest_sack_seq (tp )))
1349
1350
state -> reord = min (fack_count ,
1350
1351
state -> reord );
1351
1352
1352
1353
/* SACK enhanced F-RTO (RFC4138; Appendix B) */
1353
- if (!after (TCP_SKB_CB ( skb ) -> end_seq , tp -> frto_highmark ))
1354
+ if (!after (end_seq , tp -> frto_highmark ))
1354
1355
state -> flag |= FLAG_ONLY_ORIG_SACKED ;
1355
1356
}
1356
1357
@@ -1368,8 +1369,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
1368
1369
1369
1370
/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1370
1371
if (!tcp_is_fack (tp ) && (tp -> lost_skb_hint != NULL ) &&
1371
- before (TCP_SKB_CB (skb )-> seq ,
1372
- TCP_SKB_CB (tp -> lost_skb_hint )-> seq ))
1372
+ before (start_seq , TCP_SKB_CB (tp -> lost_skb_hint )-> seq ))
1373
1373
tp -> lost_cnt_hint += pcount ;
1374
1374
1375
1375
if (fack_count > tp -> fackets_out )
@@ -1425,7 +1425,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1425
1425
}
1426
1426
1427
1427
/* We discard results */
1428
- tcp_sacktag_one (skb , sk , state , dup_sack , pcount );
1428
+ tcp_sacktag_one (sk , state ,
1429
+ TCP_SKB_CB (skb )-> sacked ,
1430
+ TCP_SKB_CB (skb )-> seq ,
1431
+ TCP_SKB_CB (skb )-> end_seq ,
1432
+ dup_sack , pcount );
1429
1433
1430
1434
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
1431
1435
TCP_SKB_CB (prev )-> sacked |= (TCP_SKB_CB (skb )-> sacked & TCPCB_EVER_RETRANS );
@@ -1664,10 +1668,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1664
1668
break ;
1665
1669
1666
1670
if (in_sack ) {
1667
- TCP_SKB_CB (skb )-> sacked = tcp_sacktag_one (skb , sk ,
1668
- state ,
1669
- dup_sack ,
1670
- tcp_skb_pcount (skb ));
1671
+ TCP_SKB_CB (skb )-> sacked =
1672
+ tcp_sacktag_one (sk ,
1673
+ state ,
1674
+ TCP_SKB_CB (skb )-> sacked ,
1675
+ TCP_SKB_CB (skb )-> seq ,
1676
+ TCP_SKB_CB (skb )-> end_seq ,
1677
+ dup_sack ,
1678
+ tcp_skb_pcount (skb ));
1671
1679
1672
1680
if (!before (TCP_SKB_CB (skb )-> seq ,
1673
1681
tcp_highest_sack_seq (tp )))
0 commit comments