Skip to content

Commit 19119f2

Browse files
Eric Dumazetdavem330
authored andcommitted
tcp: take care of compressed acks in tcp_add_reno_sack()
Neal pointed out that non sack flows might suffer from ACK compression added in the following patch ("tcp: implement coalescing on backlog queue") Instead of tweaking tcp_add_backlog() we can take into account how many ACK were coalesced, this information will be available in skb_shinfo(skb)->gso_segs Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Neal Cardwell <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent ebeef4b commit 19119f2

File tree

1 file changed

+33
-25
lines changed

1 file changed

+33
-25
lines changed

net/ipv4/tcp_input.c

Lines changed: 33 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1865,16 +1865,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
18651865

18661866
/* Emulate SACKs for SACKless connection: account for a new dupack. */
18671867

1868-
static void tcp_add_reno_sack(struct sock *sk)
1868+
static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
18691869
{
1870-
struct tcp_sock *tp = tcp_sk(sk);
1871-
u32 prior_sacked = tp->sacked_out;
1870+
if (num_dupack) {
1871+
struct tcp_sock *tp = tcp_sk(sk);
1872+
u32 prior_sacked = tp->sacked_out;
1873+
s32 delivered;
18721874

1873-
tp->sacked_out++;
1874-
tcp_check_reno_reordering(sk, 0);
1875-
if (tp->sacked_out > prior_sacked)
1876-
tp->delivered++; /* Some out-of-order packet is delivered */
1877-
tcp_verify_left_out(tp);
1875+
tp->sacked_out += num_dupack;
1876+
tcp_check_reno_reordering(sk, 0);
1877+
delivered = tp->sacked_out - prior_sacked;
1878+
if (delivered > 0)
1879+
tp->delivered += delivered;
1880+
tcp_verify_left_out(tp);
1881+
}
18781882
}
18791883

18801884
/* Account for ACK, ACKing some data in Reno Recovery phase. */
@@ -2636,7 +2640,7 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
26362640
/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
26372641
* recovered or spurious. Otherwise retransmits more on partial ACKs.
26382642
*/
2639-
static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
2643+
static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
26402644
int *rexmit)
26412645
{
26422646
struct tcp_sock *tp = tcp_sk(sk);
@@ -2655,7 +2659,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
26552659
return;
26562660

26572661
if (after(tp->snd_nxt, tp->high_seq)) {
2658-
if (flag & FLAG_DATA_SACKED || is_dupack)
2662+
if (flag & FLAG_DATA_SACKED || num_dupack)
26592663
tp->frto = 0; /* Step 3.a. loss was real */
26602664
} else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
26612665
tp->high_seq = tp->snd_nxt;
@@ -2681,8 +2685,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
26812685
/* A Reno DUPACK means new data in F-RTO step 2.b above are
26822686
* delivered. Lower inflight to clock out (re)tranmissions.
26832687
*/
2684-
if (after(tp->snd_nxt, tp->high_seq) && is_dupack)
2685-
tcp_add_reno_sack(sk);
2688+
if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
2689+
tcp_add_reno_sack(sk, num_dupack);
26862690
else if (flag & FLAG_SND_UNA_ADVANCED)
26872691
tcp_reset_reno_sack(tp);
26882692
}
@@ -2759,13 +2763,13 @@ static bool tcp_force_fast_retransmit(struct sock *sk)
27592763
* tcp_xmit_retransmit_queue().
27602764
*/
27612765
static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2762-
bool is_dupack, int *ack_flag, int *rexmit)
2766+
int num_dupack, int *ack_flag, int *rexmit)
27632767
{
27642768
struct inet_connection_sock *icsk = inet_csk(sk);
27652769
struct tcp_sock *tp = tcp_sk(sk);
27662770
int fast_rexmit = 0, flag = *ack_flag;
2767-
bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2768-
tcp_force_fast_retransmit(sk));
2771+
bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
2772+
tcp_force_fast_retransmit(sk));
27692773

27702774
if (!tp->packets_out && tp->sacked_out)
27712775
tp->sacked_out = 0;
@@ -2812,8 +2816,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
28122816
switch (icsk->icsk_ca_state) {
28132817
case TCP_CA_Recovery:
28142818
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
2815-
if (tcp_is_reno(tp) && is_dupack)
2816-
tcp_add_reno_sack(sk);
2819+
if (tcp_is_reno(tp))
2820+
tcp_add_reno_sack(sk, num_dupack);
28172821
} else {
28182822
if (tcp_try_undo_partial(sk, prior_snd_una))
28192823
return;
@@ -2828,7 +2832,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
28282832
tcp_identify_packet_loss(sk, ack_flag);
28292833
break;
28302834
case TCP_CA_Loss:
2831-
tcp_process_loss(sk, flag, is_dupack, rexmit);
2835+
tcp_process_loss(sk, flag, num_dupack, rexmit);
28322836
tcp_identify_packet_loss(sk, ack_flag);
28332837
if (!(icsk->icsk_ca_state == TCP_CA_Open ||
28342838
(*ack_flag & FLAG_LOST_RETRANS)))
@@ -2839,8 +2843,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
28392843
if (tcp_is_reno(tp)) {
28402844
if (flag & FLAG_SND_UNA_ADVANCED)
28412845
tcp_reset_reno_sack(tp);
2842-
if (is_dupack)
2843-
tcp_add_reno_sack(sk);
2846+
tcp_add_reno_sack(sk, num_dupack);
28442847
}
28452848

28462849
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
@@ -3562,7 +3565,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
35623565
bool is_sack_reneg = tp->is_sack_reneg;
35633566
u32 ack_seq = TCP_SKB_CB(skb)->seq;
35643567
u32 ack = TCP_SKB_CB(skb)->ack_seq;
3565-
bool is_dupack = false;
3568+
int num_dupack = 0;
35663569
int prior_packets = tp->packets_out;
35673570
u32 delivered = tp->delivered;
35683571
u32 lost = tp->lost;
@@ -3673,8 +3676,13 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
36733676
tcp_set_xmit_timer(sk);
36743677

36753678
if (tcp_ack_is_dubious(sk, flag)) {
3676-
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3677-
tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
3679+
if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
3680+
num_dupack = 1;
3681+
/* Consider if pure acks were aggregated in tcp_add_backlog() */
3682+
if (!(flag & FLAG_DATA))
3683+
num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
3684+
}
3685+
tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
36783686
&rexmit);
36793687
}
36803688

@@ -3692,7 +3700,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
36923700
no_queue:
36933701
/* If data was DSACKed, see if we can undo a cwnd reduction. */
36943702
if (flag & FLAG_DSACKING_ACK) {
3695-
tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
3703+
tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
36963704
&rexmit);
36973705
tcp_newly_delivered(sk, delivered, flag);
36983706
}
@@ -3717,7 +3725,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
37173725
if (TCP_SKB_CB(skb)->sacked) {
37183726
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
37193727
&sack_state);
3720-
tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
3728+
tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
37213729
&rexmit);
37223730
tcp_newly_delivered(sk, delivered, flag);
37233731
tcp_xmit_recovery(sk, rexmit);

0 commit comments

Comments
 (0)