54
54
#include <net/pkt_cls.h>
55
55
#include <linux/hwmon.h>
56
56
#include <linux/hwmon-sysfs.h>
57
+ #include <net/page_pool.h>
57
58
58
59
#include "bnxt_hsi.h"
59
60
#include "bnxt.h"
@@ -668,19 +669,20 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
668
669
}
669
670
670
671
static struct page * __bnxt_alloc_rx_page (struct bnxt * bp , dma_addr_t * mapping ,
672
+ struct bnxt_rx_ring_info * rxr ,
671
673
gfp_t gfp )
672
674
{
673
675
struct device * dev = & bp -> pdev -> dev ;
674
676
struct page * page ;
675
677
676
- page = alloc_page ( gfp );
678
+ page = page_pool_dev_alloc_pages ( rxr -> page_pool );
677
679
if (!page )
678
680
return NULL ;
679
681
680
682
* mapping = dma_map_page_attrs (dev , page , 0 , PAGE_SIZE , bp -> rx_dir ,
681
683
DMA_ATTR_WEAK_ORDERING );
682
684
if (dma_mapping_error (dev , * mapping )) {
683
- __free_page ( page );
685
+ page_pool_recycle_direct ( rxr -> page_pool , page );
684
686
return NULL ;
685
687
}
686
688
* mapping += bp -> rx_dma_offset ;
@@ -716,7 +718,8 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
716
718
dma_addr_t mapping ;
717
719
718
720
if (BNXT_RX_PAGE_MODE (bp )) {
719
- struct page * page = __bnxt_alloc_rx_page (bp , & mapping , gfp );
721
+ struct page * page =
722
+ __bnxt_alloc_rx_page (bp , & mapping , rxr , gfp );
720
723
721
724
if (!page )
722
725
return - ENOMEM ;
@@ -2360,7 +2363,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
2360
2363
dma_unmap_page_attrs (& pdev -> dev , mapping ,
2361
2364
PAGE_SIZE , bp -> rx_dir ,
2362
2365
DMA_ATTR_WEAK_ORDERING );
2363
- __free_page ( data );
2366
+ page_pool_recycle_direct ( rxr -> page_pool , data );
2364
2367
} else {
2365
2368
dma_unmap_single_attrs (& pdev -> dev , mapping ,
2366
2369
bp -> rx_buf_use_size ,
@@ -2497,6 +2500,8 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
2497
2500
if (xdp_rxq_info_is_reg (& rxr -> xdp_rxq ))
2498
2501
xdp_rxq_info_unreg (& rxr -> xdp_rxq );
2499
2502
2503
+ rxr -> page_pool = NULL ;
2504
+
2500
2505
kfree (rxr -> rx_tpa );
2501
2506
rxr -> rx_tpa = NULL ;
2502
2507
@@ -2511,6 +2516,26 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
2511
2516
}
2512
2517
}
2513
2518
2519
+ static int bnxt_alloc_rx_page_pool (struct bnxt * bp ,
2520
+ struct bnxt_rx_ring_info * rxr )
2521
+ {
2522
+ struct page_pool_params pp = { 0 };
2523
+
2524
+ pp .pool_size = bp -> rx_ring_size ;
2525
+ pp .nid = dev_to_node (& bp -> pdev -> dev );
2526
+ pp .dev = & bp -> pdev -> dev ;
2527
+ pp .dma_dir = DMA_BIDIRECTIONAL ;
2528
+
2529
+ rxr -> page_pool = page_pool_create (& pp );
2530
+ if (IS_ERR (rxr -> page_pool )) {
2531
+ int err = PTR_ERR (rxr -> page_pool );
2532
+
2533
+ rxr -> page_pool = NULL ;
2534
+ return err ;
2535
+ }
2536
+ return 0 ;
2537
+ }
2538
+
2514
2539
static int bnxt_alloc_rx_rings (struct bnxt * bp )
2515
2540
{
2516
2541
int i , rc , agg_rings = 0 , tpa_rings = 0 ;
@@ -2530,14 +2555,24 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
2530
2555
2531
2556
ring = & rxr -> rx_ring_struct ;
2532
2557
2558
+ rc = bnxt_alloc_rx_page_pool (bp , rxr );
2559
+ if (rc )
2560
+ return rc ;
2561
+
2533
2562
rc = xdp_rxq_info_reg (& rxr -> xdp_rxq , bp -> dev , i );
2534
- if (rc < 0 )
2563
+ if (rc < 0 ) {
2564
+ page_pool_free (rxr -> page_pool );
2565
+ rxr -> page_pool = NULL ;
2535
2566
return rc ;
2567
+ }
2536
2568
2537
2569
rc = xdp_rxq_info_reg_mem_model (& rxr -> xdp_rxq ,
2538
- MEM_TYPE_PAGE_SHARED , NULL );
2570
+ MEM_TYPE_PAGE_POOL ,
2571
+ rxr -> page_pool );
2539
2572
if (rc ) {
2540
2573
xdp_rxq_info_unreg (& rxr -> xdp_rxq );
2574
+ page_pool_free (rxr -> page_pool );
2575
+ rxr -> page_pool = NULL ;
2541
2576
return rc ;
2542
2577
}
2543
2578
0 commit comments