@@ -403,18 +403,8 @@ static void cma_debug_show_areas(struct cma *cma)
403
403
spin_unlock_irq (& cma -> lock );
404
404
}
405
405
406
- /**
407
- * cma_alloc() - allocate pages from contiguous area
408
- * @cma: Contiguous memory region for which the allocation is performed.
409
- * @count: Requested number of pages.
410
- * @align: Requested alignment of pages (in PAGE_SIZE order).
411
- * @no_warn: Avoid printing message about failed allocation
412
- *
413
- * This function allocates part of contiguous memory on specific
414
- * contiguous memory area.
415
- */
416
- struct page * cma_alloc (struct cma * cma , unsigned long count ,
417
- unsigned int align , bool no_warn )
406
+ static struct page * __cma_alloc (struct cma * cma , unsigned long count ,
407
+ unsigned int align , gfp_t gfp )
418
408
{
419
409
unsigned long mask , offset ;
420
410
unsigned long pfn = -1 ;
@@ -463,8 +453,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
463
453
464
454
pfn = cma -> base_pfn + (bitmap_no << cma -> order_per_bit );
465
455
mutex_lock (& cma_mutex );
466
- ret = alloc_contig_range (pfn , pfn + count , MIGRATE_CMA ,
467
- GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0 ));
456
+ ret = alloc_contig_range (pfn , pfn + count , MIGRATE_CMA , gfp );
468
457
mutex_unlock (& cma_mutex );
469
458
if (ret == 0 ) {
470
459
page = pfn_to_page (pfn );
@@ -494,7 +483,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
494
483
page_kasan_tag_reset (nth_page (page , i ));
495
484
}
496
485
497
- if (ret && !no_warn ) {
486
+ if (ret && !( gfp & __GFP_NOWARN ) ) {
498
487
pr_err_ratelimited ("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n" ,
499
488
__func__ , cma -> name , count , ret );
500
489
cma_debug_show_areas (cma );
@@ -513,6 +502,34 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
513
502
return page ;
514
503
}
515
504
505
+ /**
506
+ * cma_alloc() - allocate pages from contiguous area
507
+ * @cma: Contiguous memory region for which the allocation is performed.
508
+ * @count: Requested number of pages.
509
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
510
+ * @no_warn: Avoid printing message about failed allocation
511
+ *
512
+ * This function allocates part of contiguous memory on specific
513
+ * contiguous memory area.
514
+ */
515
+ struct page * cma_alloc (struct cma * cma , unsigned long count ,
516
+ unsigned int align , bool no_warn )
517
+ {
518
+ return __cma_alloc (cma , count , align , GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0 ));
519
+ }
520
+
521
+ struct folio * cma_alloc_folio (struct cma * cma , int order , gfp_t gfp )
522
+ {
523
+ struct page * page ;
524
+
525
+ if (WARN_ON (!order || !(gfp & __GFP_COMP )))
526
+ return NULL ;
527
+
528
+ page = __cma_alloc (cma , 1 << order , order , gfp );
529
+
530
+ return page ? page_folio (page ) : NULL ;
531
+ }
532
+
516
533
bool cma_pages_valid (struct cma * cma , const struct page * pages ,
517
534
unsigned long count )
518
535
{
@@ -564,6 +581,14 @@ bool cma_release(struct cma *cma, const struct page *pages,
564
581
return true;
565
582
}
566
583
584
+ bool cma_free_folio (struct cma * cma , const struct folio * folio )
585
+ {
586
+ if (WARN_ON (!folio_test_large (folio )))
587
+ return false;
588
+
589
+ return cma_release (cma , & folio -> page , folio_nr_pages (folio ));
590
+ }
591
+
567
592
int cma_for_each_area (int (* it )(struct cma * cma , void * data ), void * data )
568
593
{
569
594
int i ;
0 commit comments