@@ -203,21 +203,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
203
203
}
204
204
}
205
205
206
- static void tcp_incr_quickack (struct sock * sk )
206
+ static void tcp_incr_quickack (struct sock * sk , unsigned int max_quickacks )
207
207
{
208
208
struct inet_connection_sock * icsk = inet_csk (sk );
209
209
unsigned int quickacks = tcp_sk (sk )-> rcv_wnd / (2 * icsk -> icsk_ack .rcv_mss );
210
210
211
211
if (quickacks == 0 )
212
212
quickacks = 2 ;
213
+ quickacks = min (quickacks , max_quickacks );
213
214
if (quickacks > icsk -> icsk_ack .quick )
214
- icsk -> icsk_ack .quick = min ( quickacks , TCP_MAX_QUICKACKS ) ;
215
+ icsk -> icsk_ack .quick = quickacks ;
215
216
}
216
217
217
- static void tcp_enter_quickack_mode (struct sock * sk )
218
+ static void tcp_enter_quickack_mode (struct sock * sk , unsigned int max_quickacks )
218
219
{
219
220
struct inet_connection_sock * icsk = inet_csk (sk );
220
- tcp_incr_quickack (sk );
221
+
222
+ tcp_incr_quickack (sk , max_quickacks );
221
223
icsk -> icsk_ack .pingpong = 0 ;
222
224
icsk -> icsk_ack .ato = TCP_ATO_MIN ;
223
225
}
@@ -261,15 +263,15 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
261
263
* it is probably a retransmit.
262
264
*/
263
265
if (tp -> ecn_flags & TCP_ECN_SEEN )
264
- tcp_enter_quickack_mode ((struct sock * )tp );
266
+ tcp_enter_quickack_mode ((struct sock * )tp , 1 );
265
267
break ;
266
268
case INET_ECN_CE :
267
269
if (tcp_ca_needs_ecn ((struct sock * )tp ))
268
270
tcp_ca_event ((struct sock * )tp , CA_EVENT_ECN_IS_CE );
269
271
270
272
if (!(tp -> ecn_flags & TCP_ECN_DEMAND_CWR )) {
271
273
/* Better not delay acks, sender can have a very low cwnd */
272
- tcp_enter_quickack_mode ((struct sock * )tp );
274
+ tcp_enter_quickack_mode ((struct sock * )tp , 1 );
273
275
tp -> ecn_flags |= TCP_ECN_DEMAND_CWR ;
274
276
}
275
277
tp -> ecn_flags |= TCP_ECN_SEEN ;
@@ -686,7 +688,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
686
688
/* The _first_ data packet received, initialize
687
689
* delayed ACK engine.
688
690
*/
689
- tcp_incr_quickack (sk );
691
+ tcp_incr_quickack (sk , TCP_MAX_QUICKACKS );
690
692
icsk -> icsk_ack .ato = TCP_ATO_MIN ;
691
693
} else {
692
694
int m = now - icsk -> icsk_ack .lrcvtime ;
@@ -702,7 +704,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
702
704
/* Too long gap. Apparently sender failed to
703
705
* restart window, so that we send ACKs quickly.
704
706
*/
705
- tcp_incr_quickack (sk );
707
+ tcp_incr_quickack (sk , TCP_MAX_QUICKACKS );
706
708
sk_mem_reclaim (sk );
707
709
}
708
710
}
@@ -4179,7 +4181,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4179
4181
if (TCP_SKB_CB (skb )-> end_seq != TCP_SKB_CB (skb )-> seq &&
4180
4182
before (TCP_SKB_CB (skb )-> seq , tp -> rcv_nxt )) {
4181
4183
NET_INC_STATS (sock_net (sk ), LINUX_MIB_DELAYEDACKLOST );
4182
- tcp_enter_quickack_mode (sk );
4184
+ tcp_enter_quickack_mode (sk , TCP_MAX_QUICKACKS );
4183
4185
4184
4186
if (tcp_is_sack (tp ) && sock_net (sk )-> ipv4 .sysctl_tcp_dsack ) {
4185
4187
u32 end_seq = TCP_SKB_CB (skb )-> end_seq ;
@@ -4706,7 +4708,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4706
4708
tcp_dsack_set (sk , TCP_SKB_CB (skb )-> seq , TCP_SKB_CB (skb )-> end_seq );
4707
4709
4708
4710
out_of_window :
4709
- tcp_enter_quickack_mode (sk );
4711
+ tcp_enter_quickack_mode (sk , TCP_MAX_QUICKACKS );
4710
4712
inet_csk_schedule_ack (sk );
4711
4713
drop :
4712
4714
tcp_drop (sk , skb );
@@ -5790,7 +5792,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5790
5792
* to stand against the temptation 8) --ANK
5791
5793
*/
5792
5794
inet_csk_schedule_ack (sk );
5793
- tcp_enter_quickack_mode (sk );
5795
+ tcp_enter_quickack_mode (sk , TCP_MAX_QUICKACKS );
5794
5796
inet_csk_reset_xmit_timer (sk , ICSK_TIME_DACK ,
5795
5797
TCP_DELACK_MAX , TCP_RTO_MAX );
5796
5798
0 commit comments