Skip to content

Commit 3a9b76f

Browse files
edumazetdavem330
authored andcommitted
tcp: allow drivers to tweak TSQ logic
I had many reports that TSQ logic breaks wifi aggregation. Current logic is to allow up to 1 ms of bytes to be queued into qdisc and drivers queues. But Wifi aggregation needs a bigger budget to allow bigger rates to be discovered by various TCP Congestion Controls algorithms. This patch adds an extra socket field, allowing wifi drivers to select another log scale to derive TCP Small Queue credit from current pacing rate. Initial value is 10, meaning that this patch does not change current behavior. We expect wifi drivers to set this field to smaller values (tests have been done with values from 6 to 9) They would have to use following template : if (skb->sk && skb->sk->sk_pacing_shift != MY_PACING_SHIFT) skb->sk->sk_pacing_shift = MY_PACING_SHIFT; Ref: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1670041 Signed-off-by: Eric Dumazet <[email protected]> Cc: Johannes Berg <[email protected]> Cc: Toke Høiland-Jørgensen <[email protected]> Cc: Kir Kolyshkin <[email protected]> Acked-by: Neal Cardwell <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 166c881 commit 3a9b76f

File tree

3 files changed

+5
-2
lines changed

3 files changed

+5
-2
lines changed

include/net/sock.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,7 @@ struct sock_common {
267267
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
268268
* @sk_gso_max_size: Maximum GSO segment size to build
269269
* @sk_gso_max_segs: Maximum number of GSO segments
270+
* @sk_pacing_shift: scaling factor for TCP Small Queues
270271
* @sk_lingertime: %SO_LINGER l_linger setting
271272
* @sk_backlog: always used with the per-socket spinlock held
272273
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -451,6 +452,7 @@ struct sock {
451452
kmemcheck_bitfield_end(flags);
452453

453454
u16 sk_gso_max_segs;
455+
u8 sk_pacing_shift;
454456
unsigned long sk_lingertime;
455457
struct proto *sk_prot_creator;
456458
rwlock_t sk_callback_lock;

net/core/sock.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2746,6 +2746,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
27462746

27472747
sk->sk_max_pacing_rate = ~0U;
27482748
sk->sk_pacing_rate = ~0U;
2749+
sk->sk_pacing_shift = 10;
27492750
sk->sk_incoming_cpu = -1;
27502751
/*
27512752
* Before updating sk_refcnt, we must commit prior changes to memory

net/ipv4/tcp_output.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1720,7 +1720,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
17201720
{
17211721
u32 bytes, segs;
17221722

1723-
bytes = min(sk->sk_pacing_rate >> 10,
1723+
bytes = min(sk->sk_pacing_rate >> sk->sk_pacing_shift,
17241724
sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
17251725

17261726
/* Goal is to send at least one packet per ms,
@@ -2198,7 +2198,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
21982198
{
21992199
unsigned int limit;
22002200

2201-
limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
2201+
limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift);
22022202
limit = min_t(u32, limit,
22032203
sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
22042204
limit <<= factor;

0 commit comments

Comments
 (0)