Skip to content

Commit 08c9b61

Browse files
dtatuleaSaeed Mahameed
authored andcommitted
net/mlx5e: RX, Remove internal page_cache
This patch removes the internal rx page_cache and uses the generic page_pool api only. It used to be that the page_pool couldn't handle all the mlx5 driver usecases, but with the introduction of skb recycling and page fragmentaton in the page_pool full switch can now be made. Some benfits of this transition: * Better page recycling in the cases when the page_cache was suffering from head of queue blocking. The page_pool doesn't have this issue. * DMA mapping/unmapping can be managed by the page_pool. * mlx5e_rq size reduced by more than 50% due to the page_cache array being deleted. This patch only removes the page_cache. Downstream patches will enable the required page_pool features and will add further fine-tuning. Signed-off-by: Dragos Tatulea <[email protected]> Reviewed-by: Tariq Toukan <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent ca6ef9f commit 08c9b61

File tree

3 files changed

+0
-72
lines changed

3 files changed

+0
-72
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -628,11 +628,6 @@ struct mlx5e_mpw_info {
628628
#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_MAX_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
629629
MLX5_MPWRQ_MAX_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
630630
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
631-
struct mlx5e_page_cache {
632-
u32 head;
633-
u32 tail;
634-
struct page *page_cache[MLX5E_CACHE_SIZE];
635-
};
636631

637632
struct mlx5e_rq;
638633
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
@@ -745,7 +740,6 @@ struct mlx5e_rq {
745740
struct mlx5e_rq_stats *stats;
746741
struct mlx5e_cq cq;
747742
struct mlx5e_cq_decomp cqd;
748-
struct mlx5e_page_cache page_cache;
749743
struct hwtstamp_config *tstamp;
750744
struct mlx5_clock *clock;
751745
struct mlx5e_icosq *icosq;

drivers/net/ethernet/mellanox/mlx5/core/en_main.c

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -900,9 +900,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
900900
rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
901901
}
902902

903-
rq->page_cache.head = 0;
904-
rq->page_cache.tail = 0;
905-
906903
return 0;
907904

908905
err_destroy_page_pool:
@@ -933,7 +930,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
933930
static void mlx5e_free_rq(struct mlx5e_rq *rq)
934931
{
935932
struct bpf_prog *old_prog;
936-
int i;
937933

938934
if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
939935
old_prog = rcu_dereference_protected(rq->xdp_prog,
@@ -953,15 +949,6 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
953949
mlx5e_free_wqe_alloc_info(rq);
954950
}
955951

956-
for (i = rq->page_cache.head; i != rq->page_cache.tail;
957-
i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
958-
/* With AF_XDP, page_cache is not used, so this loop is not
959-
* entered, and it's safe to call mlx5e_page_release_dynamic
960-
* directly.
961-
*/
962-
mlx5e_page_release_dynamic(rq, rq->page_cache.page_cache[i], false);
963-
}
964-
965952
xdp_rxq_info_unreg(&rq->xdp_rxq);
966953
page_pool_destroy(rq->page_pool);
967954
mlx5_wq_destroy(&rq->wq_ctrl);

drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

Lines changed: 0 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -271,60 +271,10 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
271271
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
272272
}
273273

274-
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
275-
{
276-
struct mlx5e_page_cache *cache = &rq->page_cache;
277-
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
278-
struct mlx5e_rq_stats *stats = rq->stats;
279-
280-
if (tail_next == cache->head) {
281-
stats->cache_full++;
282-
return false;
283-
}
284-
285-
if (!dev_page_is_reusable(page)) {
286-
stats->cache_waive++;
287-
return false;
288-
}
289-
290-
cache->page_cache[cache->tail] = page;
291-
cache->tail = tail_next;
292-
return true;
293-
}
294-
295-
static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, struct page **pagep)
296-
{
297-
struct mlx5e_page_cache *cache = &rq->page_cache;
298-
struct mlx5e_rq_stats *stats = rq->stats;
299-
dma_addr_t addr;
300-
301-
if (unlikely(cache->head == cache->tail)) {
302-
stats->cache_empty++;
303-
return false;
304-
}
305-
306-
if (page_ref_count(cache->page_cache[cache->head]) != 1) {
307-
stats->cache_busy++;
308-
return false;
309-
}
310-
311-
*pagep = cache->page_cache[cache->head];
312-
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
313-
stats->cache_reuse++;
314-
315-
addr = page_pool_get_dma_addr(*pagep);
316-
/* Non-XSK always uses PAGE_SIZE. */
317-
dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir);
318-
return true;
319-
}
320-
321274
static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, struct page **pagep)
322275
{
323276
dma_addr_t addr;
324277

325-
if (mlx5e_rx_cache_get(rq, pagep))
326-
return 0;
327-
328278
*pagep = page_pool_dev_alloc_pages(rq->page_pool);
329279
if (unlikely(!*pagep))
330280
return -ENOMEM;
@@ -353,9 +303,6 @@ void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
353303
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
354304
{
355305
if (likely(recycle)) {
356-
if (mlx5e_rx_cache_put(rq, page))
357-
return;
358-
359306
mlx5e_page_dma_unmap(rq, page);
360307
page_pool_recycle_direct(rq->page_pool, page);
361308
} else {

0 commit comments

Comments
 (0)