Skip to content

Commit 8d73f8f

Browse files
jlemondavem330
authored andcommitted
page_pool: fix logic in __page_pool_get_cached
__page_pool_get_cached() will return NULL when the ring is empty, even if there are pages present in the lookaside cache. It is also possible to refill the cache, and then return a NULL page. Restructure the logic so eliminate both cases. Signed-off-by: Jonathan Lemon <[email protected]> Acked-by: Jesper Dangaard Brouer <[email protected]> Acked-by: Ilias Apalodimas <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 4b58c9b commit 8d73f8f

File tree

1 file changed

+16
-23
lines changed

1 file changed

+16
-23
lines changed

net/core/page_pool.c

Lines changed: 16 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -82,40 +82,33 @@ EXPORT_SYMBOL(page_pool_create);
8282
static struct page *__page_pool_get_cached(struct page_pool *pool)
8383
{
8484
struct ptr_ring *r = &pool->ring;
85+
bool refill = false;
8586
struct page *page;
8687

87-
/* Quicker fallback, avoid locks when ring is empty */
88-
if (__ptr_ring_empty(r))
89-
return NULL;
90-
9188
/* Test for safe-context, caller should provide this guarantee */
9289
if (likely(in_serving_softirq())) {
9390
if (likely(pool->alloc.count)) {
9491
/* Fast-path */
9592
page = pool->alloc.cache[--pool->alloc.count];
9693
return page;
9794
}
98-
/* Slower-path: Alloc array empty, time to refill
99-
*
100-
* Open-coded bulk ptr_ring consumer.
101-
*
102-
* Discussion: the ring consumer lock is not really
103-
* needed due to the softirq/NAPI protection, but
104-
* later need the ability to reclaim pages on the
105-
* ring. Thus, keeping the locks.
106-
*/
107-
spin_lock(&r->consumer_lock);
108-
while ((page = __ptr_ring_consume(r))) {
109-
if (pool->alloc.count == PP_ALLOC_CACHE_REFILL)
110-
break;
111-
pool->alloc.cache[pool->alloc.count++] = page;
112-
}
113-
spin_unlock(&r->consumer_lock);
114-
return page;
95+
refill = true;
11596
}
11697

117-
/* Slow-path: Get page from locked ring queue */
118-
page = ptr_ring_consume(&pool->ring);
98+
/* Quicker fallback, avoid locks when ring is empty */
99+
if (__ptr_ring_empty(r))
100+
return NULL;
101+
102+
/* Slow-path: Get page from locked ring queue,
103+
* refill alloc array if requested.
104+
*/
105+
spin_lock(&r->consumer_lock);
106+
page = __ptr_ring_consume(r);
107+
if (refill)
108+
pool->alloc.count = __ptr_ring_consume_batched(r,
109+
pool->alloc.cache,
110+
PP_ALLOC_CACHE_REFILL);
111+
spin_unlock(&r->consumer_lock);
119112
return page;
120113
}
121114

0 commit comments

Comments
 (0)