@@ -2893,7 +2893,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
2893
2893
* exists.
2894
2894
*/
2895
2895
watermark = min_wmark_pages (zone ) + (1UL << order );
2896
- if (!zone_watermark_ok (zone , 0 , watermark , 0 , ALLOC_CMA ))
2896
+ if (!zone_watermark_ok (zone , 0 , watermark , 0 , 0 ))
2897
2897
return 0 ;
2898
2898
2899
2899
__mod_zone_freepage_state (zone , - (1UL << order ), mt );
@@ -3169,12 +3169,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3169
3169
}
3170
3170
3171
3171
3172
- #ifdef CONFIG_CMA
3173
- /* If allocation can't use CMA areas don't use free CMA pages */
3174
- if (!(alloc_flags & ALLOC_CMA ))
3175
- free_pages -= zone_page_state (z , NR_FREE_CMA_PAGES );
3176
- #endif
3177
-
3178
3172
/*
3179
3173
* Check watermarks for an order-0 allocation request. If these
3180
3174
* are not met, then a high-order request also cannot go ahead
@@ -3201,10 +3195,8 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3201
3195
}
3202
3196
3203
3197
#ifdef CONFIG_CMA
3204
- if ((alloc_flags & ALLOC_CMA ) &&
3205
- !list_empty (& area -> free_list [MIGRATE_CMA ])) {
3198
+ if (!list_empty (& area -> free_list [MIGRATE_CMA ]))
3206
3199
return true;
3207
- }
3208
3200
#endif
3209
3201
if (alloc_harder &&
3210
3202
!list_empty (& area -> free_list [MIGRATE_HIGHATOMIC ]))
@@ -3224,13 +3216,6 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3224
3216
unsigned long mark , int classzone_idx , unsigned int alloc_flags )
3225
3217
{
3226
3218
long free_pages = zone_page_state (z , NR_FREE_PAGES );
3227
- long cma_pages = 0 ;
3228
-
3229
- #ifdef CONFIG_CMA
3230
- /* If allocation can't use CMA areas don't use free CMA pages */
3231
- if (!(alloc_flags & ALLOC_CMA ))
3232
- cma_pages = zone_page_state (z , NR_FREE_CMA_PAGES );
3233
- #endif
3234
3219
3235
3220
/*
3236
3221
* Fast check for order-0 only. If this fails then the reserves
@@ -3239,7 +3224,7 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3239
3224
* the caller is !atomic then it'll uselessly search the free
3240
3225
* list. That corner case is then slower but it is harmless.
3241
3226
*/
3242
- if (!order && ( free_pages - cma_pages ) > mark + z -> lowmem_reserve [classzone_idx ])
3227
+ if (!order && free_pages > mark + z -> lowmem_reserve [classzone_idx ])
3243
3228
return true;
3244
3229
3245
3230
return __zone_watermark_ok (z , order , mark , classzone_idx , alloc_flags ,
@@ -3875,10 +3860,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
3875
3860
} else if (unlikely (rt_task (current )) && !in_interrupt ())
3876
3861
alloc_flags |= ALLOC_HARDER ;
3877
3862
3878
- #ifdef CONFIG_CMA
3879
- if (gfpflags_to_migratetype (gfp_mask ) == MIGRATE_MOVABLE )
3880
- alloc_flags |= ALLOC_CMA ;
3881
- #endif
3882
3863
return alloc_flags ;
3883
3864
}
3884
3865
@@ -4345,9 +4326,6 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4345
4326
if (should_fail_alloc_page (gfp_mask , order ))
4346
4327
return false;
4347
4328
4348
- if (IS_ENABLED (CONFIG_CMA ) && ac -> migratetype == MIGRATE_MOVABLE )
4349
- * alloc_flags |= ALLOC_CMA ;
4350
-
4351
4329
return true;
4352
4330
}
4353
4331
0 commit comments