@@ -1917,19 +1917,54 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
1917
1917
tp -> undo_retrans = tp -> retrans_out ? : -1 ;
1918
1918
}
1919
1919
1920
- /* Enter Loss state. If we detect SACK reneging, forget all SACK information
1920
+ static bool tcp_is_rack (const struct sock * sk )
1921
+ {
1922
+ return sock_net (sk )-> ipv4 .sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION ;
1923
+ }
1924
+
1925
+ /* If we detect SACK reneging, forget all SACK information
1921
1926
* and reset tags completely, otherwise preserve SACKs. If receiver
1922
1927
* dropped its ofo queue, we will know this due to reneging detection.
1923
1928
*/
1929
+ static void tcp_timeout_mark_lost (struct sock * sk )
1930
+ {
1931
+ struct tcp_sock * tp = tcp_sk (sk );
1932
+ struct sk_buff * skb , * head ;
1933
+ bool is_reneg ; /* is receiver reneging on SACKs? */
1934
+
1935
+ head = tcp_rtx_queue_head (sk );
1936
+ is_reneg = head && (TCP_SKB_CB (head )-> sacked & TCPCB_SACKED_ACKED );
1937
+ if (is_reneg ) {
1938
+ NET_INC_STATS (sock_net (sk ), LINUX_MIB_TCPSACKRENEGING );
1939
+ tp -> sacked_out = 0 ;
1940
+ /* Mark SACK reneging until we recover from this loss event. */
1941
+ tp -> is_sack_reneg = 1 ;
1942
+ } else if (tcp_is_reno (tp )) {
1943
+ tcp_reset_reno_sack (tp );
1944
+ }
1945
+
1946
+ skb = head ;
1947
+ skb_rbtree_walk_from (skb ) {
1948
+ if (is_reneg )
1949
+ TCP_SKB_CB (skb )-> sacked &= ~TCPCB_SACKED_ACKED ;
1950
+ else if (tcp_is_rack (sk ) && skb != head &&
1951
+ tcp_rack_skb_timeout (tp , skb , 0 ) > 0 )
1952
+ continue ; /* Don't mark recently sent ones lost yet */
1953
+ tcp_mark_skb_lost (sk , skb );
1954
+ }
1955
+ tcp_verify_left_out (tp );
1956
+ tcp_clear_all_retrans_hints (tp );
1957
+ }
1958
+
1959
+ /* Enter Loss state. */
1924
1960
void tcp_enter_loss (struct sock * sk )
1925
1961
{
1926
1962
const struct inet_connection_sock * icsk = inet_csk (sk );
1927
1963
struct tcp_sock * tp = tcp_sk (sk );
1928
1964
struct net * net = sock_net (sk );
1929
- struct sk_buff * skb ;
1930
1965
bool new_recovery = icsk -> icsk_ca_state < TCP_CA_Recovery ;
1931
- bool is_reneg ; /* is receiver reneging on SACKs? */
1932
- bool mark_lost ;
1966
+
1967
+ tcp_timeout_mark_lost ( sk ) ;
1933
1968
1934
1969
/* Reduce ssthresh if it has not yet been made inside this window. */
1935
1970
if (icsk -> icsk_ca_state <= TCP_CA_Disorder ||
@@ -1941,40 +1976,10 @@ void tcp_enter_loss(struct sock *sk)
1941
1976
tcp_ca_event (sk , CA_EVENT_LOSS );
1942
1977
tcp_init_undo (tp );
1943
1978
}
1944
- tp -> snd_cwnd = 1 ;
1979
+ tp -> snd_cwnd = tcp_packets_in_flight ( tp ) + 1 ;
1945
1980
tp -> snd_cwnd_cnt = 0 ;
1946
1981
tp -> snd_cwnd_stamp = tcp_jiffies32 ;
1947
1982
1948
- tp -> retrans_out = 0 ;
1949
- tp -> lost_out = 0 ;
1950
-
1951
- if (tcp_is_reno (tp ))
1952
- tcp_reset_reno_sack (tp );
1953
-
1954
- skb = tcp_rtx_queue_head (sk );
1955
- is_reneg = skb && (TCP_SKB_CB (skb )-> sacked & TCPCB_SACKED_ACKED );
1956
- if (is_reneg ) {
1957
- NET_INC_STATS (sock_net (sk ), LINUX_MIB_TCPSACKRENEGING );
1958
- tp -> sacked_out = 0 ;
1959
- /* Mark SACK reneging until we recover from this loss event. */
1960
- tp -> is_sack_reneg = 1 ;
1961
- }
1962
- tcp_clear_all_retrans_hints (tp );
1963
-
1964
- skb_rbtree_walk_from (skb ) {
1965
- mark_lost = (!(TCP_SKB_CB (skb )-> sacked & TCPCB_SACKED_ACKED ) ||
1966
- is_reneg );
1967
- if (mark_lost )
1968
- tcp_sum_lost (tp , skb );
1969
- TCP_SKB_CB (skb )-> sacked &= (~TCPCB_TAGBITS )|TCPCB_SACKED_ACKED ;
1970
- if (mark_lost ) {
1971
- TCP_SKB_CB (skb )-> sacked &= ~TCPCB_SACKED_ACKED ;
1972
- TCP_SKB_CB (skb )-> sacked |= TCPCB_LOST ;
1973
- tp -> lost_out += tcp_skb_pcount (skb );
1974
- }
1975
- }
1976
- tcp_verify_left_out (tp );
1977
-
1978
1983
/* Timeout in disordered state after receiving substantial DUPACKs
1979
1984
* suggests that the degree of reordering is over-estimated.
1980
1985
*/
@@ -2141,7 +2146,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
2141
2146
return true;
2142
2147
2143
2148
/* Not-A-Trick#2 : Classic rule... */
2144
- if (tcp_dupack_heuristics (tp ) > tp -> reordering )
2149
+ if (! tcp_is_rack ( sk ) && tcp_dupack_heuristics (tp ) > tp -> reordering )
2145
2150
return true;
2146
2151
2147
2152
return false;
@@ -2218,9 +2223,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2218
2223
{
2219
2224
struct tcp_sock * tp = tcp_sk (sk );
2220
2225
2221
- if (tcp_is_reno (tp )) {
2222
- tcp_mark_head_lost (sk , 1 , 1 );
2223
- } else {
2226
+ if (tcp_is_sack (tp )) {
2224
2227
int sacked_upto = tp -> sacked_out - tp -> reordering ;
2225
2228
if (sacked_upto >= 0 )
2226
2229
tcp_mark_head_lost (sk , sacked_upto , 0 );
@@ -2718,12 +2721,16 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
2718
2721
return false;
2719
2722
}
2720
2723
2721
- static void tcp_rack_identify_loss (struct sock * sk , int * ack_flag )
2724
+ static void tcp_identify_packet_loss (struct sock * sk , int * ack_flag )
2722
2725
{
2723
2726
struct tcp_sock * tp = tcp_sk (sk );
2724
2727
2725
- /* Use RACK to detect loss */
2726
- if (sock_net (sk )-> ipv4 .sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION ) {
2728
+ if (tcp_rtx_queue_empty (sk ))
2729
+ return ;
2730
+
2731
+ if (unlikely (tcp_is_reno (tp ))) {
2732
+ tcp_newreno_mark_lost (sk , * ack_flag & FLAG_SND_UNA_ADVANCED );
2733
+ } else if (tcp_is_rack (sk )) {
2727
2734
u32 prior_retrans = tp -> retrans_out ;
2728
2735
2729
2736
tcp_rack_mark_lost (sk );
@@ -2819,11 +2826,11 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2819
2826
tcp_try_keep_open (sk );
2820
2827
return ;
2821
2828
}
2822
- tcp_rack_identify_loss (sk , ack_flag );
2829
+ tcp_identify_packet_loss (sk , ack_flag );
2823
2830
break ;
2824
2831
case TCP_CA_Loss :
2825
2832
tcp_process_loss (sk , flag , is_dupack , rexmit );
2826
- tcp_rack_identify_loss (sk , ack_flag );
2833
+ tcp_identify_packet_loss (sk , ack_flag );
2827
2834
if (!(icsk -> icsk_ca_state == TCP_CA_Open ||
2828
2835
(* ack_flag & FLAG_LOST_RETRANS )))
2829
2836
return ;
@@ -2840,7 +2847,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2840
2847
if (icsk -> icsk_ca_state <= TCP_CA_Disorder )
2841
2848
tcp_try_undo_dsack (sk );
2842
2849
2843
- tcp_rack_identify_loss (sk , ack_flag );
2850
+ tcp_identify_packet_loss (sk , ack_flag );
2844
2851
if (!tcp_time_to_recover (sk , flag )) {
2845
2852
tcp_try_to_open (sk , flag );
2846
2853
return ;
@@ -2862,7 +2869,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2862
2869
fast_rexmit = 1 ;
2863
2870
}
2864
2871
2865
- if (do_lost )
2872
+ if (! tcp_is_rack ( sk ) && do_lost )
2866
2873
tcp_update_scoreboard (sk , fast_rexmit );
2867
2874
* rexmit = REXMIT_LOST ;
2868
2875
}
0 commit comments