Skip to content

Commit 975bc99

Browse files
Michael Chandavem330
authored andcommitted
bnxt_en: Refactor bnxt_free_rx_skbs().
bnxt_free_rx_skbs() frees all the allocated buffers and SKBs for every RX ring. Refactor this function by calling a new function bnxt_free_one_rx_ring_skbs() to free these buffers on one specified RX ring at a time. This is preparation work for resetting one RX ring during run-time. Reviewed-by: Pavan Chebbi <[email protected]> Signed-off-by: Michael Chan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent fc8864e commit 975bc99

File tree

1 file changed

+66
-68
lines changed
  • drivers/net/ethernet/broadcom/bnxt

1 file changed

+66
-68
lines changed

drivers/net/ethernet/broadcom/bnxt/bnxt.c

Lines changed: 66 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -2540,93 +2540,91 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
25402540
}
25412541
}
25422542

2543-
static void bnxt_free_rx_skbs(struct bnxt *bp)
2543+
static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
25442544
{
2545-
int i, max_idx, max_agg_idx;
2545+
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
25462546
struct pci_dev *pdev = bp->pdev;
2547-
2548-
if (!bp->rx_ring)
2549-
return;
2547+
struct bnxt_tpa_idx_map *map;
2548+
int i, max_idx, max_agg_idx;
25502549

25512550
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
25522551
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2553-
for (i = 0; i < bp->rx_nr_rings; i++) {
2554-
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2555-
struct bnxt_tpa_idx_map *map;
2556-
int j;
2557-
2558-
if (rxr->rx_tpa) {
2559-
for (j = 0; j < bp->max_tpa; j++) {
2560-
struct bnxt_tpa_info *tpa_info =
2561-
&rxr->rx_tpa[j];
2562-
u8 *data = tpa_info->data;
2552+
if (!rxr->rx_tpa)
2553+
goto skip_rx_tpa_free;
25632554

2564-
if (!data)
2565-
continue;
2555+
for (i = 0; i < bp->max_tpa; i++) {
2556+
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2557+
u8 *data = tpa_info->data;
25662558

2567-
dma_unmap_single_attrs(&pdev->dev,
2568-
tpa_info->mapping,
2569-
bp->rx_buf_use_size,
2570-
bp->rx_dir,
2571-
DMA_ATTR_WEAK_ORDERING);
2559+
if (!data)
2560+
continue;
25722561

2573-
tpa_info->data = NULL;
2562+
dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2563+
bp->rx_buf_use_size, bp->rx_dir,
2564+
DMA_ATTR_WEAK_ORDERING);
25742565

2575-
kfree(data);
2576-
}
2577-
}
2566+
tpa_info->data = NULL;
25782567

2579-
for (j = 0; j < max_idx; j++) {
2580-
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2581-
dma_addr_t mapping = rx_buf->mapping;
2582-
void *data = rx_buf->data;
2568+
kfree(data);
2569+
}
25832570

2584-
if (!data)
2585-
continue;
2571+
skip_rx_tpa_free:
2572+
for (i = 0; i < max_idx; i++) {
2573+
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2574+
dma_addr_t mapping = rx_buf->mapping;
2575+
void *data = rx_buf->data;
25862576

2587-
rx_buf->data = NULL;
2577+
if (!data)
2578+
continue;
25882579

2589-
if (BNXT_RX_PAGE_MODE(bp)) {
2590-
mapping -= bp->rx_dma_offset;
2591-
dma_unmap_page_attrs(&pdev->dev, mapping,
2592-
PAGE_SIZE, bp->rx_dir,
2593-
DMA_ATTR_WEAK_ORDERING);
2594-
page_pool_recycle_direct(rxr->page_pool, data);
2595-
} else {
2596-
dma_unmap_single_attrs(&pdev->dev, mapping,
2597-
bp->rx_buf_use_size,
2598-
bp->rx_dir,
2599-
DMA_ATTR_WEAK_ORDERING);
2600-
kfree(data);
2601-
}
2580+
rx_buf->data = NULL;
2581+
if (BNXT_RX_PAGE_MODE(bp)) {
2582+
mapping -= bp->rx_dma_offset;
2583+
dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2584+
bp->rx_dir,
2585+
DMA_ATTR_WEAK_ORDERING);
2586+
page_pool_recycle_direct(rxr->page_pool, data);
2587+
} else {
2588+
dma_unmap_single_attrs(&pdev->dev, mapping,
2589+
bp->rx_buf_use_size, bp->rx_dir,
2590+
DMA_ATTR_WEAK_ORDERING);
2591+
kfree(data);
26022592
}
2593+
}
2594+
for (i = 0; i < max_agg_idx; i++) {
2595+
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2596+
struct page *page = rx_agg_buf->page;
26032597

2604-
for (j = 0; j < max_agg_idx; j++) {
2605-
struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2606-
&rxr->rx_agg_ring[j];
2607-
struct page *page = rx_agg_buf->page;
2608-
2609-
if (!page)
2610-
continue;
2598+
if (!page)
2599+
continue;
26112600

2612-
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2613-
BNXT_RX_PAGE_SIZE,
2614-
PCI_DMA_FROMDEVICE,
2615-
DMA_ATTR_WEAK_ORDERING);
2601+
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2602+
BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2603+
DMA_ATTR_WEAK_ORDERING);
26162604

2617-
rx_agg_buf->page = NULL;
2618-
__clear_bit(j, rxr->rx_agg_bmap);
2605+
rx_agg_buf->page = NULL;
2606+
__clear_bit(i, rxr->rx_agg_bmap);
26192607

2620-
__free_page(page);
2621-
}
2622-
if (rxr->rx_page) {
2623-
__free_page(rxr->rx_page);
2624-
rxr->rx_page = NULL;
2625-
}
2626-
map = rxr->rx_tpa_idx_map;
2627-
if (map)
2628-
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2608+
__free_page(page);
2609+
}
2610+
if (rxr->rx_page) {
2611+
__free_page(rxr->rx_page);
2612+
rxr->rx_page = NULL;
26292613
}
2614+
map = rxr->rx_tpa_idx_map;
2615+
if (map)
2616+
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2617+
}
2618+
2619+
static void bnxt_free_rx_skbs(struct bnxt *bp)
2620+
{
2621+
int i;
2622+
2623+
if (!bp->rx_ring)
2624+
return;
2625+
2626+
for (i = 0; i < bp->rx_nr_rings; i++)
2627+
bnxt_free_one_rx_ring_skbs(bp, i);
26302628
}
26312629

26322630
static void bnxt_free_skbs(struct bnxt *bp)

0 commit comments

Comments
 (0)