Skip to content

Commit 7e692df

Browse files
edumazetdavem330
authored andcommitted
tcp: fix skb_copy_ubufs() vs BIG TCP
David Ahern reported crashes in skb_copy_ubufs() caused by TCP tx zerocopy using hugepages, and skb length bigger than ~68 KB. skb_copy_ubufs() assumed it could copy all payload using up to MAX_SKB_FRAGS order-0 pages. This assumption broke when BIG TCP was able to put up to 512 KB per skb. We did not hit this bug at Google because we use CONFIG_MAX_SKB_FRAGS=45 and limit gso_max_size to 180000. A solution is to use higher order pages if needed. v2: add missing __GFP_COMP, or we leak memory. Fixes: 7c4e983 ("net: allow gso_max_size to exceed 65536") Reported-by: David Ahern <[email protected]> Link: https://lore.kernel.org/netdev/[email protected]/T/ Signed-off-by: Eric Dumazet <[email protected]> Cc: Xin Long <[email protected]> Cc: Willem de Bruijn <[email protected]> Cc: Coco Li <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 6f75cd1 commit 7e692df

File tree

1 file changed

+14
-6
lines changed

1 file changed

+14
-6
lines changed

net/core/skbuff.c

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1758,7 +1758,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
17581758
{
17591759
int num_frags = skb_shinfo(skb)->nr_frags;
17601760
struct page *page, *head = NULL;
1761-
int i, new_frags;
1761+
int i, order, psize, new_frags;
17621762
u32 d_off;
17631763

17641764
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
@@ -1767,9 +1767,17 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
17671767
if (!num_frags)
17681768
goto release;
17691769

1770-
new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1770+
/* We might have to allocate high order pages, so compute what minimum
1771+
* page order is needed.
1772+
*/
1773+
order = 0;
1774+
while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
1775+
order++;
1776+
psize = (PAGE_SIZE << order);
1777+
1778+
new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
17711779
for (i = 0; i < new_frags; i++) {
1772-
page = alloc_page(gfp_mask);
1780+
page = alloc_pages(gfp_mask | __GFP_COMP, order);
17731781
if (!page) {
17741782
while (head) {
17751783
struct page *next = (struct page *)page_private(head);
@@ -1796,11 +1804,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
17961804
vaddr = kmap_atomic(p);
17971805

17981806
while (done < p_len) {
1799-
if (d_off == PAGE_SIZE) {
1807+
if (d_off == psize) {
18001808
d_off = 0;
18011809
page = (struct page *)page_private(page);
18021810
}
1803-
copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1811+
copy = min_t(u32, psize - d_off, p_len - done);
18041812
memcpy(page_address(page) + d_off,
18051813
vaddr + p_off + done, copy);
18061814
done += copy;
@@ -1816,7 +1824,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
18161824

18171825
/* skb frags point to kernel buffers */
18181826
for (i = 0; i < new_frags - 1; i++) {
1819-
__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1827+
__skb_fill_page_desc(skb, i, head, 0, psize);
18201828
head = (struct page *)page_private(head);
18211829
}
18221830
__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);

0 commit comments

Comments
 (0)