Skip to content

Commit 1d47a3e

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/cma: remove ALLOC_CMA
Now, all reserved pages for CMA region are belong to the ZONE_MOVABLE and it only serves for a request with GFP_HIGHMEM && GFP_MOVABLE. Therefore, we don't need to maintain ALLOC_CMA at all. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Joonsoo Kim <[email protected]> Reviewed-by: Aneesh Kumar K.V <[email protected]> Tested-by: Tony Lindgren <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Laura Abbott <[email protected]> Cc: Marek Szyprowski <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Michal Nazarewicz <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Russell King <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent bad8c6c commit 1d47a3e

File tree

3 files changed

+4
-29
lines changed

3 files changed

+4
-29
lines changed

mm/compaction.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1450,14 +1450,12 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
14501450
* if compaction succeeds.
14511451
* For costly orders, we require low watermark instead of min for
14521452
* compaction to proceed to increase its chances.
1453-
* ALLOC_CMA is used, as pages in CMA pageblocks are considered
1454-
* suitable migration targets
14551453
*/
14561454
watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
14571455
low_wmark_pages(zone) : min_wmark_pages(zone);
14581456
watermark += compact_gap(order);
14591457
if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1460-
ALLOC_CMA, wmark_target))
1458+
0, wmark_target))
14611459
return COMPACT_SKIPPED;
14621460

14631461
return COMPACT_CONTINUE;

mm/internal.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
498498
#define ALLOC_HARDER 0x10 /* try to alloc harder */
499499
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
500500
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
501-
#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
502501

503502
enum ttu_flags;
504503
struct tlbflush_unmap_batch;

mm/page_alloc.c

Lines changed: 3 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -2893,7 +2893,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
28932893
* exists.
28942894
*/
28952895
watermark = min_wmark_pages(zone) + (1UL << order);
2896-
if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2896+
if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
28972897
return 0;
28982898

28992899
__mod_zone_freepage_state(zone, -(1UL << order), mt);
@@ -3169,12 +3169,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
31693169
}
31703170

31713171

3172-
#ifdef CONFIG_CMA
3173-
/* If allocation can't use CMA areas don't use free CMA pages */
3174-
if (!(alloc_flags & ALLOC_CMA))
3175-
free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
3176-
#endif
3177-
31783172
/*
31793173
* Check watermarks for an order-0 allocation request. If these
31803174
* are not met, then a high-order request also cannot go ahead
@@ -3201,10 +3195,8 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
32013195
}
32023196

32033197
#ifdef CONFIG_CMA
3204-
if ((alloc_flags & ALLOC_CMA) &&
3205-
!list_empty(&area->free_list[MIGRATE_CMA])) {
3198+
if (!list_empty(&area->free_list[MIGRATE_CMA]))
32063199
return true;
3207-
}
32083200
#endif
32093201
if (alloc_harder &&
32103202
!list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
@@ -3224,13 +3216,6 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
32243216
unsigned long mark, int classzone_idx, unsigned int alloc_flags)
32253217
{
32263218
long free_pages = zone_page_state(z, NR_FREE_PAGES);
3227-
long cma_pages = 0;
3228-
3229-
#ifdef CONFIG_CMA
3230-
/* If allocation can't use CMA areas don't use free CMA pages */
3231-
if (!(alloc_flags & ALLOC_CMA))
3232-
cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3233-
#endif
32343219

32353220
/*
32363221
* Fast check for order-0 only. If this fails then the reserves
@@ -3239,7 +3224,7 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
32393224
* the caller is !atomic then it'll uselessly search the free
32403225
* list. That corner case is then slower but it is harmless.
32413226
*/
3242-
if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3227+
if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx])
32433228
return true;
32443229

32453230
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
@@ -3875,10 +3860,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
38753860
} else if (unlikely(rt_task(current)) && !in_interrupt())
38763861
alloc_flags |= ALLOC_HARDER;
38773862

3878-
#ifdef CONFIG_CMA
3879-
if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3880-
alloc_flags |= ALLOC_CMA;
3881-
#endif
38823863
return alloc_flags;
38833864
}
38843865

@@ -4345,9 +4326,6 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
43454326
if (should_fail_alloc_page(gfp_mask, order))
43464327
return false;
43474328

4348-
if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4349-
*alloc_flags |= ALLOC_CMA;
4350-
43514329
return true;
43524330
}
43534331

0 commit comments

Comments
 (0)