Skip to content

Commit e42134b

Browse files
committed
Merge branch 'tcp-mem-pressure-fixes'
Eric Dumazet says: ==================== tcp: final (?) round of mem pressure fixes While working on prior patch series (e10b02e "Merge branch 'net-reduce-tcp_memory_allocated-inflation'"), I found that we could still have frozen TCP flows under memory pressure. I thought we had solved this in 2015, but the fix was not complete. v2: deal with zerocopy tx paths. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents e8b0339 + f54755f commit e42134b

File tree

2 files changed

+50
-7
lines changed

2 files changed

+50
-7
lines changed

net/ipv4/tcp.c

Lines changed: 46 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -951,6 +951,40 @@ static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
951951
return 0;
952952
}
953953

954+
static int tcp_wmem_schedule(struct sock *sk, int copy)
955+
{
956+
int left;
957+
958+
if (likely(sk_wmem_schedule(sk, copy)))
959+
return copy;
960+
961+
/* We could be in trouble if we have nothing queued.
962+
* Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
963+
* to guarantee some progress.
964+
*/
965+
left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued;
966+
if (left > 0)
967+
sk_forced_mem_schedule(sk, min(left, copy));
968+
return min(copy, sk->sk_forward_alloc);
969+
}
970+
971+
static int tcp_wmem_schedule(struct sock *sk, int copy)
972+
{
973+
int left;
974+
975+
if (likely(sk_wmem_schedule(sk, copy)))
976+
return copy;
977+
978+
/* We could be in trouble if we have nothing queued.
979+
* Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
980+
* to guarantee some progress.
981+
*/
982+
left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued;
983+
if (left > 0)
984+
sk_forced_mem_schedule(sk, min(left, copy));
985+
return min(copy, sk->sk_forward_alloc);
986+
}
987+
954988
static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
955989
struct page *page, int offset, size_t *size)
956990
{
@@ -986,7 +1020,11 @@ static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
9861020
tcp_mark_push(tp, skb);
9871021
goto new_segment;
9881022
}
989-
if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy))
1023+
if (tcp_downgrade_zcopy_pure(sk, skb))
1024+
return NULL;
1025+
1026+
copy = tcp_wmem_schedule(sk, copy);
1027+
if (!copy)
9901028
return NULL;
9911029

9921030
if (can_coalesce) {
@@ -1334,8 +1372,11 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
13341372

13351373
copy = min_t(int, copy, pfrag->size - pfrag->offset);
13361374

1337-
if (tcp_downgrade_zcopy_pure(sk, skb) ||
1338-
!sk_wmem_schedule(sk, copy))
1375+
if (tcp_downgrade_zcopy_pure(sk, skb))
1376+
goto wait_for_space;
1377+
1378+
copy = tcp_wmem_schedule(sk, copy);
1379+
if (!copy)
13391380
goto wait_for_space;
13401381

13411382
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
@@ -1362,7 +1403,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
13621403
skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
13631404

13641405
if (!skb_zcopy_pure(skb)) {
1365-
if (!sk_wmem_schedule(sk, copy))
1406+
copy = tcp_wmem_schedule(sk, copy);
1407+
if (!copy)
13661408
goto wait_for_space;
13671409
}
13681410

net/ipv4/tcp_output.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3362,11 +3362,12 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
33623362
*/
33633363
void sk_forced_mem_schedule(struct sock *sk, int size)
33643364
{
3365-
int amt;
3365+
int delta, amt;
33663366

3367-
if (size <= sk->sk_forward_alloc)
3367+
delta = size - sk->sk_forward_alloc;
3368+
if (delta <= 0)
33683369
return;
3369-
amt = sk_mem_pages(size);
3370+
amt = sk_mem_pages(delta);
33703371
sk->sk_forward_alloc += amt << PAGE_SHIFT;
33713372
sk_memory_allocated_add(sk, amt);
33723373

0 commit comments

Comments
 (0)