@@ -201,28 +201,25 @@ static inline bool tcp_in_quickack_mode(const struct sock *sk)
201
201
return icsk -> icsk_ack .quick && !icsk -> icsk_ack .pingpong ;
202
202
}
203
203
204
- static inline void TCP_ECN_queue_cwr (struct tcp_sock * tp )
204
+ static void tcp_ecn_queue_cwr (struct tcp_sock * tp )
205
205
{
206
206
if (tp -> ecn_flags & TCP_ECN_OK )
207
207
tp -> ecn_flags |= TCP_ECN_QUEUE_CWR ;
208
208
}
209
209
210
- static inline void TCP_ECN_accept_cwr (struct tcp_sock * tp , const struct sk_buff * skb )
210
+ static void tcp_ecn_accept_cwr (struct tcp_sock * tp , const struct sk_buff * skb )
211
211
{
212
212
if (tcp_hdr (skb )-> cwr )
213
213
tp -> ecn_flags &= ~TCP_ECN_DEMAND_CWR ;
214
214
}
215
215
216
- static inline void TCP_ECN_withdraw_cwr (struct tcp_sock * tp )
216
+ static void tcp_ecn_withdraw_cwr (struct tcp_sock * tp )
217
217
{
218
218
tp -> ecn_flags &= ~TCP_ECN_DEMAND_CWR ;
219
219
}
220
220
221
- static inline void TCP_ECN_check_ce (struct tcp_sock * tp , const struct sk_buff * skb )
221
+ static void __tcp_ecn_check_ce (struct tcp_sock * tp , const struct sk_buff * skb )
222
222
{
223
- if (!(tp -> ecn_flags & TCP_ECN_OK ))
224
- return ;
225
-
226
223
switch (TCP_SKB_CB (skb )-> ip_dsfield & INET_ECN_MASK ) {
227
224
case INET_ECN_NOT_ECT :
228
225
/* Funny extension: if ECT is not set on a segment,
@@ -251,19 +248,25 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s
251
248
}
252
249
}
253
250
254
- static inline void TCP_ECN_rcv_synack (struct tcp_sock * tp , const struct tcphdr * th )
251
+ static void tcp_ecn_check_ce (struct tcp_sock * tp , const struct sk_buff * skb )
252
+ {
253
+ if (tp -> ecn_flags & TCP_ECN_OK )
254
+ __tcp_ecn_check_ce (tp , skb );
255
+ }
256
+
257
+ static void tcp_ecn_rcv_synack (struct tcp_sock * tp , const struct tcphdr * th )
255
258
{
256
259
if ((tp -> ecn_flags & TCP_ECN_OK ) && (!th -> ece || th -> cwr ))
257
260
tp -> ecn_flags &= ~TCP_ECN_OK ;
258
261
}
259
262
260
- static inline void TCP_ECN_rcv_syn (struct tcp_sock * tp , const struct tcphdr * th )
263
+ static void tcp_ecn_rcv_syn (struct tcp_sock * tp , const struct tcphdr * th )
261
264
{
262
265
if ((tp -> ecn_flags & TCP_ECN_OK ) && (!th -> ece || !th -> cwr ))
263
266
tp -> ecn_flags &= ~TCP_ECN_OK ;
264
267
}
265
268
266
- static bool TCP_ECN_rcv_ecn_echo (const struct tcp_sock * tp , const struct tcphdr * th )
269
+ static bool tcp_ecn_rcv_ecn_echo (const struct tcp_sock * tp , const struct tcphdr * th )
267
270
{
268
271
if (th -> ece && !th -> syn && (tp -> ecn_flags & TCP_ECN_OK ))
269
272
return true;
@@ -660,7 +663,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
660
663
}
661
664
icsk -> icsk_ack .lrcvtime = now ;
662
665
663
- TCP_ECN_check_ce (tp , skb );
666
+ tcp_ecn_check_ce (tp , skb );
664
667
665
668
if (skb -> len >= 128 )
666
669
tcp_grow_window (sk , skb );
@@ -1976,7 +1979,7 @@ void tcp_enter_loss(struct sock *sk)
1976
1979
sysctl_tcp_reordering );
1977
1980
tcp_set_ca_state (sk , TCP_CA_Loss );
1978
1981
tp -> high_seq = tp -> snd_nxt ;
1979
- TCP_ECN_queue_cwr (tp );
1982
+ tcp_ecn_queue_cwr (tp );
1980
1983
1981
1984
/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1982
1985
* loss recovery is underway except recurring timeout(s) on
@@ -2368,7 +2371,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2368
2371
2369
2372
if (tp -> prior_ssthresh > tp -> snd_ssthresh ) {
2370
2373
tp -> snd_ssthresh = tp -> prior_ssthresh ;
2371
- TCP_ECN_withdraw_cwr (tp );
2374
+ tcp_ecn_withdraw_cwr (tp );
2372
2375
}
2373
2376
} else {
2374
2377
tp -> snd_cwnd = max (tp -> snd_cwnd , tp -> snd_ssthresh );
@@ -2498,7 +2501,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
2498
2501
tp -> prr_delivered = 0 ;
2499
2502
tp -> prr_out = 0 ;
2500
2503
tp -> snd_ssthresh = inet_csk (sk )-> icsk_ca_ops -> ssthresh (sk );
2501
- TCP_ECN_queue_cwr (tp );
2504
+ tcp_ecn_queue_cwr (tp );
2502
2505
}
2503
2506
2504
2507
static void tcp_cwnd_reduction (struct sock * sk , const int prior_unsacked ,
@@ -3453,7 +3456,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3453
3456
flag |= tcp_sacktag_write_queue (sk , skb , prior_snd_una ,
3454
3457
& sack_rtt_us );
3455
3458
3456
- if (TCP_ECN_rcv_ecn_echo (tp , tcp_hdr (skb ))) {
3459
+ if (tcp_ecn_rcv_ecn_echo (tp , tcp_hdr (skb ))) {
3457
3460
flag |= FLAG_ECE ;
3458
3461
ack_ev_flags |= CA_ACK_ECE ;
3459
3462
}
@@ -4193,7 +4196,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4193
4196
struct sk_buff * skb1 ;
4194
4197
u32 seq , end_seq ;
4195
4198
4196
- TCP_ECN_check_ce (tp , skb );
4199
+ tcp_ecn_check_ce (tp , skb );
4197
4200
4198
4201
if (unlikely (tcp_try_rmem_schedule (sk , skb , skb -> truesize ))) {
4199
4202
NET_INC_STATS_BH (sock_net (sk ), LINUX_MIB_TCPOFODROP );
@@ -4376,7 +4379,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4376
4379
skb_dst_drop (skb );
4377
4380
__skb_pull (skb , tcp_hdr (skb )-> doff * 4 );
4378
4381
4379
- TCP_ECN_accept_cwr (tp , skb );
4382
+ tcp_ecn_accept_cwr (tp , skb );
4380
4383
4381
4384
tp -> rx_opt .dsack = 0 ;
4382
4385
@@ -5457,7 +5460,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5457
5460
* state to ESTABLISHED..."
5458
5461
*/
5459
5462
5460
- TCP_ECN_rcv_synack (tp , th );
5463
+ tcp_ecn_rcv_synack (tp , th );
5461
5464
5462
5465
tcp_init_wl (tp , TCP_SKB_CB (skb )-> seq );
5463
5466
tcp_ack (sk , skb , FLAG_SLOWPATH );
@@ -5576,7 +5579,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5576
5579
tp -> snd_wl1 = TCP_SKB_CB (skb )-> seq ;
5577
5580
tp -> max_window = tp -> snd_wnd ;
5578
5581
5579
- TCP_ECN_rcv_syn (tp , th );
5582
+ tcp_ecn_rcv_syn (tp , th );
5580
5583
5581
5584
tcp_mtup_init (sk );
5582
5585
tcp_sync_mss (sk , icsk -> icsk_pmtu_cookie );
0 commit comments