Skip to content

Commit 8218f62

Browse files
Yunsheng Linkuba-moo
authored andcommitted
mm: page_frag: use initial zero offset for page_frag_alloc_align()
We are about to use page_frag_alloc_*() API to not just allocate memory for skb->data, but also use them to do the memory allocation for skb frag too. Currently the implementation of page_frag in mm subsystem is running the offset as a countdown rather than count-up value, there may have several advantages to that as mentioned in [1], but it may have some disadvantages, for example, it may disable skb frag coalescing and more correct cache prefetching We have a trade-off to make in order to have a unified implementation and API for page_frag, so use a initial zero offset in this patch, and the following patch will try to make some optimization to avoid the disadvantages as much as possible. 1. https://lore.kernel.org/all/[email protected]/ CC: Andrew Morton <[email protected]> CC: Linux-MM <[email protected]> Signed-off-by: Yunsheng Lin <[email protected]> Reviewed-by: Alexander Duyck <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 65941f1 commit 8218f62

File tree

1 file changed

+23
-23
lines changed

1 file changed

+23
-23
lines changed

mm/page_frag_cache.c

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -63,9 +63,13 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
6363
unsigned int fragsz, gfp_t gfp_mask,
6464
unsigned int align_mask)
6565
{
66+
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
67+
unsigned int size = nc->size;
68+
#else
6669
unsigned int size = PAGE_SIZE;
70+
#endif
71+
unsigned int offset;
6772
struct page *page;
68-
int offset;
6973

7074
if (unlikely(!nc->va)) {
7175
refill:
@@ -85,11 +89,24 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
8589
/* reset page count bias and offset to start of new frag */
8690
nc->pfmemalloc = page_is_pfmemalloc(page);
8791
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
88-
nc->offset = size;
92+
nc->offset = 0;
8993
}
9094

91-
offset = nc->offset - fragsz;
92-
if (unlikely(offset < 0)) {
95+
offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);
96+
if (unlikely(offset + fragsz > size)) {
97+
if (unlikely(fragsz > PAGE_SIZE)) {
98+
/*
99+
* The caller is trying to allocate a fragment
100+
* with fragsz > PAGE_SIZE but the cache isn't big
101+
* enough to satisfy the request, this may
102+
* happen in low memory conditions.
103+
* We don't release the cache page because
104+
* it could make memory pressure worse
105+
* so we simply return NULL here.
106+
*/
107+
return NULL;
108+
}
109+
93110
page = virt_to_page(nc->va);
94111

95112
if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
@@ -100,33 +117,16 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
100117
goto refill;
101118
}
102119

103-
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
104-
/* if size can vary use size else just use PAGE_SIZE */
105-
size = nc->size;
106-
#endif
107120
/* OK, page count is 0, we can safely set it */
108121
set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
109122

110123
/* reset page count bias and offset to start of new frag */
111124
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
112-
offset = size - fragsz;
113-
if (unlikely(offset < 0)) {
114-
/*
115-
* The caller is trying to allocate a fragment
116-
* with fragsz > PAGE_SIZE but the cache isn't big
117-
* enough to satisfy the request, this may
118-
* happen in low memory conditions.
119-
* We don't release the cache page because
120-
* it could make memory pressure worse
121-
* so we simply return NULL here.
122-
*/
123-
return NULL;
124-
}
125+
offset = 0;
125126
}
126127

127128
nc->pagecnt_bias--;
128-
offset &= align_mask;
129-
nc->offset = offset;
129+
nc->offset = offset + fragsz;
130130

131131
return nc->va + offset;
132132
}

0 commit comments

Comments
 (0)