@@ -2540,93 +2540,91 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
2540
2540
}
2541
2541
}
2542
2542
2543
- static void bnxt_free_rx_skbs (struct bnxt * bp )
2543
+ static void bnxt_free_one_rx_ring_skbs (struct bnxt * bp , int ring_nr )
2544
2544
{
2545
- int i , max_idx , max_agg_idx ;
2545
+ struct bnxt_rx_ring_info * rxr = & bp -> rx_ring [ ring_nr ] ;
2546
2546
struct pci_dev * pdev = bp -> pdev ;
2547
-
2548
- if (!bp -> rx_ring )
2549
- return ;
2547
+ struct bnxt_tpa_idx_map * map ;
2548
+ int i , max_idx , max_agg_idx ;
2550
2549
2551
2550
max_idx = bp -> rx_nr_pages * RX_DESC_CNT ;
2552
2551
max_agg_idx = bp -> rx_agg_nr_pages * RX_DESC_CNT ;
2553
- for (i = 0 ; i < bp -> rx_nr_rings ; i ++ ) {
2554
- struct bnxt_rx_ring_info * rxr = & bp -> rx_ring [i ];
2555
- struct bnxt_tpa_idx_map * map ;
2556
- int j ;
2557
-
2558
- if (rxr -> rx_tpa ) {
2559
- for (j = 0 ; j < bp -> max_tpa ; j ++ ) {
2560
- struct bnxt_tpa_info * tpa_info =
2561
- & rxr -> rx_tpa [j ];
2562
- u8 * data = tpa_info -> data ;
2552
+ if (!rxr -> rx_tpa )
2553
+ goto skip_rx_tpa_free ;
2563
2554
2564
- if (!data )
2565
- continue ;
2555
+ for (i = 0 ; i < bp -> max_tpa ; i ++ ) {
2556
+ struct bnxt_tpa_info * tpa_info = & rxr -> rx_tpa [i ];
2557
+ u8 * data = tpa_info -> data ;
2566
2558
2567
- dma_unmap_single_attrs (& pdev -> dev ,
2568
- tpa_info -> mapping ,
2569
- bp -> rx_buf_use_size ,
2570
- bp -> rx_dir ,
2571
- DMA_ATTR_WEAK_ORDERING );
2559
+ if (!data )
2560
+ continue ;
2572
2561
2573
- tpa_info -> data = NULL ;
2562
+ dma_unmap_single_attrs (& pdev -> dev , tpa_info -> mapping ,
2563
+ bp -> rx_buf_use_size , bp -> rx_dir ,
2564
+ DMA_ATTR_WEAK_ORDERING );
2574
2565
2575
- kfree (data );
2576
- }
2577
- }
2566
+ tpa_info -> data = NULL ;
2578
2567
2579
- for (j = 0 ; j < max_idx ; j ++ ) {
2580
- struct bnxt_sw_rx_bd * rx_buf = & rxr -> rx_buf_ring [j ];
2581
- dma_addr_t mapping = rx_buf -> mapping ;
2582
- void * data = rx_buf -> data ;
2568
+ kfree (data );
2569
+ }
2583
2570
2584
- if (!data )
2585
- continue ;
2571
+ skip_rx_tpa_free :
2572
+ for (i = 0 ; i < max_idx ; i ++ ) {
2573
+ struct bnxt_sw_rx_bd * rx_buf = & rxr -> rx_buf_ring [i ];
2574
+ dma_addr_t mapping = rx_buf -> mapping ;
2575
+ void * data = rx_buf -> data ;
2586
2576
2587
- rx_buf -> data = NULL ;
2577
+ if (!data )
2578
+ continue ;
2588
2579
2589
- if (BNXT_RX_PAGE_MODE (bp )) {
2590
- mapping -= bp -> rx_dma_offset ;
2591
- dma_unmap_page_attrs (& pdev -> dev , mapping ,
2592
- PAGE_SIZE , bp -> rx_dir ,
2593
- DMA_ATTR_WEAK_ORDERING );
2594
- page_pool_recycle_direct (rxr -> page_pool , data );
2595
- } else {
2596
- dma_unmap_single_attrs (& pdev -> dev , mapping ,
2597
- bp -> rx_buf_use_size ,
2598
- bp -> rx_dir ,
2599
- DMA_ATTR_WEAK_ORDERING );
2600
- kfree (data );
2601
- }
2580
+ rx_buf -> data = NULL ;
2581
+ if (BNXT_RX_PAGE_MODE (bp )) {
2582
+ mapping -= bp -> rx_dma_offset ;
2583
+ dma_unmap_page_attrs (& pdev -> dev , mapping , PAGE_SIZE ,
2584
+ bp -> rx_dir ,
2585
+ DMA_ATTR_WEAK_ORDERING );
2586
+ page_pool_recycle_direct (rxr -> page_pool , data );
2587
+ } else {
2588
+ dma_unmap_single_attrs (& pdev -> dev , mapping ,
2589
+ bp -> rx_buf_use_size , bp -> rx_dir ,
2590
+ DMA_ATTR_WEAK_ORDERING );
2591
+ kfree (data );
2602
2592
}
2593
+ }
2594
+ for (i = 0 ; i < max_agg_idx ; i ++ ) {
2595
+ struct bnxt_sw_rx_agg_bd * rx_agg_buf = & rxr -> rx_agg_ring [i ];
2596
+ struct page * page = rx_agg_buf -> page ;
2603
2597
2604
- for (j = 0 ; j < max_agg_idx ; j ++ ) {
2605
- struct bnxt_sw_rx_agg_bd * rx_agg_buf =
2606
- & rxr -> rx_agg_ring [j ];
2607
- struct page * page = rx_agg_buf -> page ;
2608
-
2609
- if (!page )
2610
- continue ;
2598
+ if (!page )
2599
+ continue ;
2611
2600
2612
- dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2613
- BNXT_RX_PAGE_SIZE ,
2614
- PCI_DMA_FROMDEVICE ,
2615
- DMA_ATTR_WEAK_ORDERING );
2601
+ dma_unmap_page_attrs (& pdev -> dev , rx_agg_buf -> mapping ,
2602
+ BNXT_RX_PAGE_SIZE , PCI_DMA_FROMDEVICE ,
2603
+ DMA_ATTR_WEAK_ORDERING );
2616
2604
2617
- rx_agg_buf -> page = NULL ;
2618
- __clear_bit (j , rxr -> rx_agg_bmap );
2605
+ rx_agg_buf -> page = NULL ;
2606
+ __clear_bit (i , rxr -> rx_agg_bmap );
2619
2607
2620
- __free_page (page );
2621
- }
2622
- if (rxr -> rx_page ) {
2623
- __free_page (rxr -> rx_page );
2624
- rxr -> rx_page = NULL ;
2625
- }
2626
- map = rxr -> rx_tpa_idx_map ;
2627
- if (map )
2628
- memset (map -> agg_idx_bmap , 0 , sizeof (map -> agg_idx_bmap ));
2608
+ __free_page (page );
2609
+ }
2610
+ if (rxr -> rx_page ) {
2611
+ __free_page (rxr -> rx_page );
2612
+ rxr -> rx_page = NULL ;
2629
2613
}
2614
+ map = rxr -> rx_tpa_idx_map ;
2615
+ if (map )
2616
+ memset (map -> agg_idx_bmap , 0 , sizeof (map -> agg_idx_bmap ));
2617
+ }
2618
+
2619
+ static void bnxt_free_rx_skbs (struct bnxt * bp )
2620
+ {
2621
+ int i ;
2622
+
2623
+ if (!bp -> rx_ring )
2624
+ return ;
2625
+
2626
+ for (i = 0 ; i < bp -> rx_nr_rings ; i ++ )
2627
+ bnxt_free_one_rx_ring_skbs (bp , i );
2630
2628
}
2631
2629
2632
2630
static void bnxt_free_skbs (struct bnxt * bp )
0 commit comments