Skip to content

Commit 7360132

Browse files
Eric Dumazetkuba-moo
authored andcommitted
tcp: let tcp_mtu_probe() build headless packets
tcp_mtu_probe() is still copying payload from skbs in the write queue, using skb_copy_bits(), ignoring potential errors. Modern TCP stack wants to only deal with payload found in page frags, as this is a prereq for TCPDirect (host stack might not have access to the payload) Signed-off-by: Eric Dumazet <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent f84ad5c commit 7360132

File tree

1 file changed

+58
-2
lines changed

1 file changed

+58
-2
lines changed

net/ipv4/tcp_output.c

Lines changed: 58 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2319,6 +2319,57 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
23192319
return true;
23202320
}
23212321

2322+
static int tcp_clone_payload(struct sock *sk, struct sk_buff *to,
2323+
int probe_size)
2324+
{
2325+
skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags;
2326+
int i, todo, len = 0, nr_frags = 0;
2327+
const struct sk_buff *skb;
2328+
2329+
if (!sk_wmem_schedule(sk, to->truesize + probe_size))
2330+
return -ENOMEM;
2331+
2332+
skb_queue_walk(&sk->sk_write_queue, skb) {
2333+
const skb_frag_t *fragfrom = skb_shinfo(skb)->frags;
2334+
2335+
if (skb_headlen(skb))
2336+
return -EINVAL;
2337+
2338+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) {
2339+
if (len >= probe_size)
2340+
goto commit;
2341+
todo = min_t(int, skb_frag_size(fragfrom),
2342+
probe_size - len);
2343+
len += todo;
2344+
if (lastfrag &&
2345+
skb_frag_page(fragfrom) == skb_frag_page(lastfrag) &&
2346+
skb_frag_off(fragfrom) == skb_frag_off(lastfrag) +
2347+
skb_frag_size(lastfrag)) {
2348+
skb_frag_size_add(lastfrag, todo);
2349+
continue;
2350+
}
2351+
if (unlikely(nr_frags == MAX_SKB_FRAGS))
2352+
return -E2BIG;
2353+
skb_frag_page_copy(fragto, fragfrom);
2354+
skb_frag_off_copy(fragto, fragfrom);
2355+
skb_frag_size_set(fragto, todo);
2356+
nr_frags++;
2357+
lastfrag = fragto++;
2358+
}
2359+
}
2360+
commit:
2361+
WARN_ON_ONCE(len != probe_size);
2362+
for (i = 0; i < nr_frags; i++)
2363+
skb_frag_ref(to, i);
2364+
2365+
skb_shinfo(to)->nr_frags = nr_frags;
2366+
to->truesize += probe_size;
2367+
to->len += probe_size;
2368+
to->data_len += probe_size;
2369+
__skb_header_release(to);
2370+
return 0;
2371+
}
2372+
23222373
/* Create a new MTU probe if we are ready.
23232374
* MTU probe is regularly attempting to increase the path MTU by
23242375
* deliberately sending larger packets. This discovers routing
@@ -2395,9 +2446,15 @@ static int tcp_mtu_probe(struct sock *sk)
23952446
return -1;
23962447

23972448
/* We're allowed to probe. Build it now. */
2398-
nskb = tcp_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2449+
nskb = tcp_stream_alloc_skb(sk, 0, GFP_ATOMIC, false);
23992450
if (!nskb)
24002451
return -1;
2452+
2453+
/* build the payload, and be prepared to abort if this fails. */
2454+
if (tcp_clone_payload(sk, nskb, probe_size)) {
2455+
consume_skb(nskb);
2456+
return -1;
2457+
}
24012458
sk_wmem_queued_add(sk, nskb->truesize);
24022459
sk_mem_charge(sk, nskb->truesize);
24032460

@@ -2415,7 +2472,6 @@ static int tcp_mtu_probe(struct sock *sk)
24152472
len = 0;
24162473
tcp_for_write_queue_from_safe(skb, next, sk) {
24172474
copy = min_t(int, skb->len, probe_size - len);
2418-
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
24192475

24202476
if (skb->len <= copy) {
24212477
/* We've eaten all the data from this skb.

0 commit comments

Comments
 (0)