11
11
#include <linux/device.h>
12
12
13
13
#include <net/page_pool.h>
14
+ #include <net/xdp.h>
15
+
14
16
#include <linux/dma-direction.h>
15
17
#include <linux/dma-mapping.h>
16
18
#include <linux/page-flags.h>
@@ -362,8 +364,9 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
362
364
* If the page refcnt != 1, then the page will be returned to memory
363
365
* subsystem.
364
366
*/
365
- void page_pool_put_page (struct page_pool * pool , struct page * page ,
366
- unsigned int dma_sync_size , bool allow_direct )
367
+ static __always_inline struct page *
368
+ __page_pool_put_page (struct page_pool * pool , struct page * page ,
369
+ unsigned int dma_sync_size , bool allow_direct )
367
370
{
368
371
/* This allocator is optimized for the XDP mode that uses
369
372
* one-frame-per-page, but have fallbacks that act like the
@@ -379,15 +382,12 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
379
382
page_pool_dma_sync_for_device (pool , page ,
380
383
dma_sync_size );
381
384
382
- if (allow_direct && in_serving_softirq ())
383
- if ( page_pool_recycle_in_cache (page , pool ))
384
- return ;
385
+ if (allow_direct && in_serving_softirq () &&
386
+ page_pool_recycle_in_cache (page , pool ))
387
+ return NULL ;
385
388
386
- if (!page_pool_recycle_in_ring (pool , page )) {
387
- /* Cache full, fallback to free pages */
388
- page_pool_return_page (pool , page );
389
- }
390
- return ;
389
+ /* Page found as candidate for recycling */
390
+ return page ;
391
391
}
392
392
/* Fallback/non-XDP mode: API user have elevated refcnt.
393
393
*
@@ -405,9 +405,59 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
405
405
/* Do not replace this with page_pool_return_page() */
406
406
page_pool_release_page (pool , page );
407
407
put_page (page );
408
+
409
+ return NULL ;
410
+ }
411
+
412
+ void page_pool_put_page (struct page_pool * pool , struct page * page ,
413
+ unsigned int dma_sync_size , bool allow_direct )
414
+ {
415
+ page = __page_pool_put_page (pool , page , dma_sync_size , allow_direct );
416
+ if (page && !page_pool_recycle_in_ring (pool , page )) {
417
+ /* Cache full, fallback to free pages */
418
+ page_pool_return_page (pool , page );
419
+ }
408
420
}
409
421
EXPORT_SYMBOL (page_pool_put_page );
410
422
423
+ /* Caller must not use data area after call, as this function overwrites it */
424
+ void page_pool_put_page_bulk (struct page_pool * pool , void * * data ,
425
+ int count )
426
+ {
427
+ int i , bulk_len = 0 ;
428
+
429
+ for (i = 0 ; i < count ; i ++ ) {
430
+ struct page * page = virt_to_head_page (data [i ]);
431
+
432
+ page = __page_pool_put_page (pool , page , -1 , false);
433
+ /* Approved for bulk recycling in ptr_ring cache */
434
+ if (page )
435
+ data [bulk_len ++ ] = page ;
436
+ }
437
+
438
+ if (unlikely (!bulk_len ))
439
+ return ;
440
+
441
+ /* Bulk producer into ptr_ring page_pool cache */
442
+ page_pool_ring_lock (pool );
443
+ for (i = 0 ; i < bulk_len ; i ++ ) {
444
+ if (__ptr_ring_produce (& pool -> ring , data [i ]))
445
+ break ; /* ring full */
446
+ }
447
+ page_pool_ring_unlock (pool );
448
+
449
+ /* Hopefully all pages was return into ptr_ring */
450
+ if (likely (i == bulk_len ))
451
+ return ;
452
+
453
+ /* ptr_ring cache full, free remaining pages outside producer lock
454
+ * since put_page() with refcnt == 1 can be an expensive operation
455
+ */
456
+ for (; i < bulk_len ; i ++ )
457
+ page_pool_return_page (pool , data [i ]);
458
+ }
459
+ EXPORT_SYMBOL (page_pool_put_page_bulk );
460
+
411
461
static void page_pool_empty_ring (struct page_pool * pool )
412
462
{
413
463
struct page * page ;
0 commit comments