@@ -162,7 +162,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
162
162
#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
163
163
164
164
/**
165
- * for_each_mem_range - iterate through memblock areas from type_a and not
165
+ * __for_each_mem_range - iterate through memblock areas from type_a and not
166
166
* included in type_b. Or just type_a if type_b is NULL.
167
167
* @i: u64 used as loop variable
168
168
* @type_a: ptr to memblock_type to iterate
@@ -173,7 +173,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
173
173
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
174
174
* @p_nid: ptr to int for nid of the range, can be %NULL
175
175
*/
176
- #define for_each_mem_range (i , type_a , type_b , nid , flags , \
176
+ #define __for_each_mem_range (i , type_a , type_b , nid , flags , \
177
177
p_start , p_end , p_nid ) \
178
178
for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
179
179
p_start, p_end, p_nid); \
@@ -182,7 +182,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
182
182
p_start, p_end, p_nid))
183
183
184
184
/**
185
- * for_each_mem_range_rev - reverse iterate through memblock areas from
185
+ * __for_each_mem_range_rev - reverse iterate through memblock areas from
186
186
* type_a and not included in type_b. Or just type_a if type_b is NULL.
187
187
* @i: u64 used as loop variable
188
188
* @type_a: ptr to memblock_type to iterate
@@ -193,15 +193,36 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
193
193
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
194
194
* @p_nid: ptr to int for nid of the range, can be %NULL
195
195
*/
196
- #define for_each_mem_range_rev (i , type_a , type_b , nid , flags , \
197
- p_start , p_end , p_nid ) \
196
+ #define __for_each_mem_range_rev (i , type_a , type_b , nid , flags , \
197
+ p_start , p_end , p_nid ) \
198
198
for (i = (u64)ULLONG_MAX, \
199
- __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
199
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
200
200
p_start, p_end, p_nid); \
201
201
i != (u64)ULLONG_MAX; \
202
202
__next_mem_range_rev(&i, nid, flags, type_a, type_b, \
203
203
p_start, p_end, p_nid))
204
204
205
+ /**
206
+ * for_each_mem_range - iterate through memory areas.
207
+ * @i: u64 used as loop variable
208
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
209
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
210
+ */
211
+ #define for_each_mem_range (i , p_start , p_end ) \
212
+ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
213
+ MEMBLOCK_NONE, p_start, p_end, NULL)
214
+
215
+ /**
216
+ * for_each_mem_range_rev - reverse iterate through memblock areas from
217
+ * type_a and not included in type_b. Or just type_a if type_b is NULL.
218
+ * @i: u64 used as loop variable
219
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
220
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
221
+ */
222
+ #define for_each_mem_range_rev (i , p_start , p_end ) \
223
+ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
224
+ MEMBLOCK_NONE, p_start, p_end, NULL)
225
+
205
226
/**
206
227
* for_each_reserved_mem_region - iterate over all reserved memblock areas
207
228
* @i: u64 used as loop variable
@@ -307,8 +328,8 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
307
328
* soon as memblock is initialized.
308
329
*/
309
330
#define for_each_free_mem_range (i , nid , flags , p_start , p_end , p_nid ) \
310
- for_each_mem_range (i, &memblock.memory, &memblock.reserved, \
311
- nid, flags, p_start, p_end, p_nid)
331
+ __for_each_mem_range (i, &memblock.memory, &memblock.reserved, \
332
+ nid, flags, p_start, p_end, p_nid)
312
333
313
334
/**
314
335
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -324,8 +345,8 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
324
345
*/
325
346
#define for_each_free_mem_range_reverse (i , nid , flags , p_start , p_end , \
326
347
p_nid ) \
327
- for_each_mem_range_rev (i, &memblock.memory, &memblock.reserved, \
328
- nid, flags, p_start, p_end, p_nid)
348
+ __for_each_mem_range_rev (i, &memblock.memory, &memblock.reserved, \
349
+ nid, flags, p_start, p_end, p_nid)
329
350
330
351
int memblock_set_node (phys_addr_t base , phys_addr_t size ,
331
352
struct memblock_type * type , int nid );
0 commit comments