29
29
* page allocated from page pool. Page splitting enables memory saving and thus
30
30
* avoids TLB/cache miss for data access, but there also is some cost to
31
31
* implement page splitting, mainly some cache line dirtying/bouncing for
32
- * 'struct page' and atomic operation for page->pp_frag_count .
32
+ * 'struct page' and atomic operation for page->pp_ref_count .
33
33
*
34
34
* The API keeps track of in-flight pages, in order to let API users know when
35
35
* it is safe to free a page_pool object, the API users must call
@@ -210,69 +210,77 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
210
210
return pool -> p .dma_dir ;
211
211
}
212
212
213
- /* pp_frag_count represents the number of writers who can update the page
214
- * either by updating skb->data or via DMA mappings for the device.
215
- * We can't rely on the page refcnt for that as we don't know who might be
216
- * holding page references and we can't reliably destroy or sync DMA mappings
217
- * of the fragments.
213
+ /**
214
+ * page_pool_fragment_page() - split a fresh page into fragments
215
+ * @page: page to split
216
+ * @nr: references to set
217
+ *
218
+ * pp_ref_count represents the number of outstanding references to the page,
219
+ * which will be freed using page_pool APIs (rather than page allocator APIs
220
+ * like put_page()). Such references are usually held by page_pool-aware
221
+ * objects like skbs marked for page pool recycling.
218
222
*
219
- * When pp_frag_count reaches 0 we can either recycle the page if the page
220
- * refcnt is 1 or return it back to the memory allocator and destroy any
221
- * mappings we have.
223
+ * This helper allows the caller to take (set) multiple references to a
224
+ * freshly allocated page. The page must be freshly allocated (have a
225
+ * pp_ref_count of 1). This is commonly done by drivers and
226
+ * "fragment allocators" to save atomic operations - either when they know
227
+ * upfront how many references they will need; or to take MAX references and
228
+ * return the unused ones with a single atomic dec(), instead of performing
229
+ * multiple atomic inc() operations.
222
230
*/
223
231
static inline void page_pool_fragment_page (struct page * page , long nr )
224
232
{
225
- atomic_long_set (& page -> pp_frag_count , nr );
233
+ atomic_long_set (& page -> pp_ref_count , nr );
226
234
}
227
235
228
- static inline long page_pool_defrag_page (struct page * page , long nr )
236
+ static inline long page_pool_unref_page (struct page * page , long nr )
229
237
{
230
238
long ret ;
231
239
232
- /* If nr == pp_frag_count then we have cleared all remaining
240
+ /* If nr == pp_ref_count then we have cleared all remaining
233
241
* references to the page:
234
242
* 1. 'n == 1': no need to actually overwrite it.
235
243
* 2. 'n != 1': overwrite it with one, which is the rare case
236
- * for pp_frag_count draining.
244
+ * for pp_ref_count draining.
237
245
*
238
246
* The main advantage to doing this is that not only we avoid a atomic
239
247
* update, as an atomic_read is generally a much cheaper operation than
240
248
* an atomic update, especially when dealing with a page that may be
241
- * partitioned into only 2 or 3 pieces ; but also unify the pp_frag_count
249
+ * referenced by only 2 or 3 users ; but also unify the pp_ref_count
242
250
* handling by ensuring all pages have partitioned into only 1 piece
243
251
* initially, and only overwrite it when the page is partitioned into
244
252
* more than one piece.
245
253
*/
246
- if (atomic_long_read (& page -> pp_frag_count ) == nr ) {
254
+ if (atomic_long_read (& page -> pp_ref_count ) == nr ) {
247
255
/* As we have ensured nr is always one for constant case using
248
256
* the BUILD_BUG_ON(), only need to handle the non-constant case
249
- * here for pp_frag_count draining, which is a rare case.
257
+ * here for pp_ref_count draining, which is a rare case.
250
258
*/
251
259
BUILD_BUG_ON (__builtin_constant_p (nr ) && nr != 1 );
252
260
if (!__builtin_constant_p (nr ))
253
- atomic_long_set (& page -> pp_frag_count , 1 );
261
+ atomic_long_set (& page -> pp_ref_count , 1 );
254
262
255
263
return 0 ;
256
264
}
257
265
258
- ret = atomic_long_sub_return (nr , & page -> pp_frag_count );
266
+ ret = atomic_long_sub_return (nr , & page -> pp_ref_count );
259
267
WARN_ON (ret < 0 );
260
268
261
- /* We are the last user here too, reset pp_frag_count back to 1 to
269
+ /* We are the last user here too, reset pp_ref_count back to 1 to
262
270
* ensure all pages have been partitioned into 1 piece initially,
263
271
* this should be the rare case when the last two fragment users call
264
- * page_pool_defrag_page () currently.
272
+ * page_pool_unref_page () currently.
265
273
*/
266
274
if (unlikely (!ret ))
267
- atomic_long_set (& page -> pp_frag_count , 1 );
275
+ atomic_long_set (& page -> pp_ref_count , 1 );
268
276
269
277
return ret ;
270
278
}
271
279
272
- static inline bool page_pool_is_last_frag (struct page * page )
280
+ static inline bool page_pool_is_last_ref (struct page * page )
273
281
{
274
- /* If page_pool_defrag_page () returns 0, we were the last user */
275
- return page_pool_defrag_page (page , 1 ) == 0 ;
282
+ /* If page_pool_unref_page () returns 0, we were the last user */
283
+ return page_pool_unref_page (page , 1 ) == 0 ;
276
284
}
277
285
278
286
/**
@@ -297,10 +305,10 @@ static inline void page_pool_put_page(struct page_pool *pool,
297
305
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
298
306
*/
299
307
#ifdef CONFIG_PAGE_POOL
300
- if (!page_pool_is_last_frag (page ))
308
+ if (!page_pool_is_last_ref (page ))
301
309
return ;
302
310
303
- page_pool_put_defragged_page (pool , page , dma_sync_size , allow_direct );
311
+ page_pool_put_unrefed_page (pool , page , dma_sync_size , allow_direct );
304
312
#endif
305
313
}
306
314
0 commit comments