Skip to content

Commit 463586e

Browse files
yuzhaogoogleakpm00
authored andcommitted
mm/cma: add cma_{alloc,free}_folio()
With alloc_contig_range() and free_contig_range() supporting large folios, CMA can allocate and free large folios too, by cma_alloc_folio() and cma_free_folio(). [[email protected]: fix WARN in cma_alloc_folio()] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Yu Zhao <[email protected]> Acked-by: Zi Yan <[email protected]> Cc: Frank van der Linden <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Muchun Song <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent e98337d commit 463586e

File tree

2 files changed

+56
-15
lines changed

2 files changed

+56
-15
lines changed

include/linux/cma.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,4 +52,20 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long
5252
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
5353

5454
extern void cma_reserve_pages_on_error(struct cma *cma);
55+
56+
#ifdef CONFIG_CMA
57+
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
58+
bool cma_free_folio(struct cma *cma, const struct folio *folio);
59+
#else
60+
static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
61+
{
62+
return NULL;
63+
}
64+
65+
static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
66+
{
67+
return false;
68+
}
69+
#endif
70+
5571
#endif

mm/cma.c

Lines changed: 40 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -403,18 +403,8 @@ static void cma_debug_show_areas(struct cma *cma)
403403
spin_unlock_irq(&cma->lock);
404404
}
405405

406-
/**
407-
* cma_alloc() - allocate pages from contiguous area
408-
* @cma: Contiguous memory region for which the allocation is performed.
409-
* @count: Requested number of pages.
410-
* @align: Requested alignment of pages (in PAGE_SIZE order).
411-
* @no_warn: Avoid printing message about failed allocation
412-
*
413-
* This function allocates part of contiguous memory on specific
414-
* contiguous memory area.
415-
*/
416-
struct page *cma_alloc(struct cma *cma, unsigned long count,
417-
unsigned int align, bool no_warn)
406+
static struct page *__cma_alloc(struct cma *cma, unsigned long count,
407+
unsigned int align, gfp_t gfp)
418408
{
419409
unsigned long mask, offset;
420410
unsigned long pfn = -1;
@@ -463,8 +453,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
463453

464454
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
465455
mutex_lock(&cma_mutex);
466-
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
467-
GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
456+
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp);
468457
mutex_unlock(&cma_mutex);
469458
if (ret == 0) {
470459
page = pfn_to_page(pfn);
@@ -494,7 +483,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
494483
page_kasan_tag_reset(nth_page(page, i));
495484
}
496485

497-
if (ret && !no_warn) {
486+
if (ret && !(gfp & __GFP_NOWARN)) {
498487
pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
499488
__func__, cma->name, count, ret);
500489
cma_debug_show_areas(cma);
@@ -513,6 +502,34 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
513502
return page;
514503
}
515504

505+
/**
506+
* cma_alloc() - allocate pages from contiguous area
507+
* @cma: Contiguous memory region for which the allocation is performed.
508+
* @count: Requested number of pages.
509+
* @align: Requested alignment of pages (in PAGE_SIZE order).
510+
* @no_warn: Avoid printing message about failed allocation
511+
*
512+
* This function allocates part of contiguous memory on specific
513+
* contiguous memory area.
514+
*/
515+
struct page *cma_alloc(struct cma *cma, unsigned long count,
516+
unsigned int align, bool no_warn)
517+
{
518+
return __cma_alloc(cma, count, align, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
519+
}
520+
521+
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
522+
{
523+
struct page *page;
524+
525+
if (WARN_ON(!order || !(gfp & __GFP_COMP)))
526+
return NULL;
527+
528+
page = __cma_alloc(cma, 1 << order, order, gfp);
529+
530+
return page ? page_folio(page) : NULL;
531+
}
532+
516533
bool cma_pages_valid(struct cma *cma, const struct page *pages,
517534
unsigned long count)
518535
{
@@ -564,6 +581,14 @@ bool cma_release(struct cma *cma, const struct page *pages,
564581
return true;
565582
}
566583

584+
bool cma_free_folio(struct cma *cma, const struct folio *folio)
585+
{
586+
if (WARN_ON(!folio_test_large(folio)))
587+
return false;
588+
589+
return cma_release(cma, &folio->page, folio_nr_pages(folio));
590+
}
591+
567592
int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
568593
{
569594
int i;

0 commit comments

Comments
 (0)