@@ -97,10 +97,9 @@ struct bbr {
97
97
packet_conservation :1 , /* use packet conservation? */
98
98
restore_cwnd :1 , /* decided to revert cwnd to old value */
99
99
round_start :1 , /* start of packet-timed tx->ack round? */
100
- tso_segs_goal :7 , /* segments we want in each skb we send */
101
100
idle_restart :1 , /* restarting after idle? */
102
101
probe_rtt_round_done :1 , /* a BBR_PROBE_RTT round at 4 pkts? */
103
- unused :5 ,
102
+ unused :12 ,
104
103
lt_is_sampling :1 , /* taking long-term ("LT") samples now? */
105
104
lt_rtt_cnt :7 , /* round trips in long-term interval */
106
105
lt_use_bw :1 ; /* use lt_bw as our bw estimate? */
@@ -261,23 +260,25 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
261
260
sk -> sk_pacing_rate = rate ;
262
261
}
263
262
264
- /* Return count of segments we want in the skbs we send, or 0 for default. */
265
- static u32 bbr_tso_segs_goal (struct sock * sk )
263
+ /* override sysctl_tcp_min_tso_segs */
264
+ static u32 bbr_min_tso_segs (struct sock * sk )
266
265
{
267
- struct bbr * bbr = inet_csk_ca (sk );
268
-
269
- return bbr -> tso_segs_goal ;
266
+ return sk -> sk_pacing_rate < (bbr_min_tso_rate >> 3 ) ? 1 : 2 ;
270
267
}
271
268
272
- static void bbr_set_tso_segs_goal (struct sock * sk )
269
+ static u32 bbr_tso_segs_goal (struct sock * sk )
273
270
{
274
271
struct tcp_sock * tp = tcp_sk (sk );
275
- struct bbr * bbr = inet_csk_ca (sk );
276
- u32 min_segs ;
272
+ u32 segs , bytes ;
273
+
274
+ /* Sort of tcp_tso_autosize() but ignoring
275
+ * driver provided sk_gso_max_size.
276
+ */
277
+ bytes = min_t (u32 , sk -> sk_pacing_rate >> sk -> sk_pacing_shift ,
278
+ GSO_MAX_SIZE - 1 - MAX_TCP_HEADER );
279
+ segs = max_t (u32 , bytes / tp -> mss_cache , bbr_min_tso_segs (sk ));
277
280
278
- min_segs = sk -> sk_pacing_rate < (bbr_min_tso_rate >> 3 ) ? 1 : 2 ;
279
- bbr -> tso_segs_goal = min (tcp_tso_autosize (sk , tp -> mss_cache , min_segs ),
280
- 0x7FU );
281
+ return min (segs , 0x7FU );
281
282
}
282
283
283
284
/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
@@ -348,7 +349,7 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
348
349
cwnd = (((w * gain ) >> BBR_SCALE ) + BW_UNIT - 1 ) / BW_UNIT ;
349
350
350
351
/* Allow enough full-sized skbs in flight to utilize end systems. */
351
- cwnd += 3 * bbr -> tso_segs_goal ;
352
+ cwnd += 3 * bbr_tso_segs_goal ( sk ) ;
352
353
353
354
/* Reduce delayed ACKs by rounding up cwnd to the next even number. */
354
355
cwnd = (cwnd + 1 ) & ~1U ;
@@ -824,7 +825,6 @@ static void bbr_main(struct sock *sk, const struct rate_sample *rs)
824
825
825
826
bw = bbr_bw (sk );
826
827
bbr_set_pacing_rate (sk , bw , bbr -> pacing_gain );
827
- bbr_set_tso_segs_goal (sk );
828
828
bbr_set_cwnd (sk , rs , rs -> acked_sacked , bw , bbr -> cwnd_gain );
829
829
}
830
830
@@ -834,7 +834,6 @@ static void bbr_init(struct sock *sk)
834
834
struct bbr * bbr = inet_csk_ca (sk );
835
835
836
836
bbr -> prior_cwnd = 0 ;
837
- bbr -> tso_segs_goal = 0 ; /* default segs per skb until first ACK */
838
837
bbr -> rtt_cnt = 0 ;
839
838
bbr -> next_rtt_delivered = 0 ;
840
839
bbr -> prev_ca_state = TCP_CA_Open ;
@@ -936,7 +935,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
936
935
.undo_cwnd = bbr_undo_cwnd ,
937
936
.cwnd_event = bbr_cwnd_event ,
938
937
.ssthresh = bbr_ssthresh ,
939
- .tso_segs_goal = bbr_tso_segs_goal ,
938
+ .min_tso_segs = bbr_min_tso_segs ,
940
939
.get_info = bbr_get_info ,
941
940
.set_state = bbr_set_state ,
942
941
};
0 commit comments