Skip to content

Commit 142b45a

Browse files
committed
memblock: Add array resizing support
When one of the array gets full, we resize it. After much thinking and a few iterations of that code, I went back to on-demand resizing using the (new) internal memblock_find_base() function, which is pretty much what Yinghai initially proposed, though there some differences in the details. To work this relies on the default alloc limit being set sensibly by the architecture. Signed-off-by: Benjamin Herrenschmidt <[email protected]>
1 parent 6ed311b commit 142b45a

File tree

1 file changed

+102
-2
lines changed

1 file changed

+102
-2
lines changed

mm/memblock.c

Lines changed: 102 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,19 +11,31 @@
1111
*/
1212

1313
#include <linux/kernel.h>
14+
#include <linux/slab.h>
1415
#include <linux/init.h>
1516
#include <linux/bitops.h>
1617
#include <linux/poison.h>
1718
#include <linux/memblock.h>
1819

1920
struct memblock memblock;
2021

21-
static int memblock_debug;
22+
static int memblock_debug, memblock_can_resize;
2223
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1];
2324
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1];
2425

2526
#define MEMBLOCK_ERROR (~(phys_addr_t)0)
2627

28+
/* inline so we don't get a warning when pr_debug is compiled out */
29+
static inline const char *memblock_type_name(struct memblock_type *type)
30+
{
31+
if (type == &memblock.memory)
32+
return "memory";
33+
else if (type == &memblock.reserved)
34+
return "reserved";
35+
else
36+
return "unknown";
37+
}
38+
2739
/*
2840
* Address comparison utilities
2941
*/
@@ -156,6 +168,79 @@ static void memblock_coalesce_regions(struct memblock_type *type,
156168
memblock_remove_region(type, r2);
157169
}
158170

171+
/* Defined below but needed now */
172+
static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);
173+
174+
static int memblock_double_array(struct memblock_type *type)
175+
{
176+
struct memblock_region *new_array, *old_array;
177+
phys_addr_t old_size, new_size, addr;
178+
int use_slab = slab_is_available();
179+
180+
/* We don't allow resizing until we know about the reserved regions
181+
* of memory that aren't suitable for allocation
182+
*/
183+
if (!memblock_can_resize)
184+
return -1;
185+
186+
pr_debug("memblock: %s array full, doubling...", memblock_type_name(type));
187+
188+
/* Calculate new doubled size */
189+
old_size = type->max * sizeof(struct memblock_region);
190+
new_size = old_size << 1;
191+
192+
/* Try to find some space for it.
193+
*
194+
* WARNING: We assume that either slab_is_available() and we use it or
195+
* we use MEMBLOCK for allocations. That means that this is unsafe to use
196+
* when bootmem is currently active (unless bootmem itself is implemented
197+
* on top of MEMBLOCK which isn't the case yet)
198+
*
199+
* This should however not be an issue for now, as we currently only
200+
* call into MEMBLOCK while it's still active, or much later when slab is
201+
* active for memory hotplug operations
202+
*/
203+
if (use_slab) {
204+
new_array = kmalloc(new_size, GFP_KERNEL);
205+
addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
206+
} else
207+
addr = memblock_find_base(new_size, sizeof(phys_addr_t), MEMBLOCK_ALLOC_ACCESSIBLE);
208+
if (addr == MEMBLOCK_ERROR) {
209+
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
210+
memblock_type_name(type), type->max, type->max * 2);
211+
return -1;
212+
}
213+
new_array = __va(addr);
214+
215+
/* Found space, we now need to move the array over before
216+
* we add the reserved region since it may be our reserved
217+
* array itself that is full.
218+
*/
219+
memcpy(new_array, type->regions, old_size);
220+
memset(new_array + type->max, 0, old_size);
221+
old_array = type->regions;
222+
type->regions = new_array;
223+
type->max <<= 1;
224+
225+
/* If we use SLAB that's it, we are done */
226+
if (use_slab)
227+
return 0;
228+
229+
/* Add the new reserved region now. Should not fail ! */
230+
BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);
231+
232+
/* If the array wasn't our static init one, then free it. We only do
233+
* that before SLAB is available as later on, we don't know whether
234+
* to use kfree or free_bootmem_pages(). Shouldn't be a big deal
235+
* anyways
236+
*/
237+
if (old_array != memblock_memory_init_regions &&
238+
old_array != memblock_reserved_init_regions)
239+
memblock_free(__pa(old_array), old_size);
240+
241+
return 0;
242+
}
243+
159244
static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
160245
{
161246
unsigned long coalesced = 0;
@@ -196,7 +281,11 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph
196281

197282
if (coalesced)
198283
return coalesced;
199-
if (type->cnt >= type->max)
284+
285+
/* If we are out of space, we fail. It's too late to resize the array
286+
* but then this shouldn't have happened in the first place.
287+
*/
288+
if (WARN_ON(type->cnt >= type->max))
200289
return -1;
201290

202291
/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
@@ -217,6 +306,14 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph
217306
}
218307
type->cnt++;
219308

309+
/* The array is full ? Try to resize it. If that fails, we undo
310+
* our allocation and return an error
311+
*/
312+
if (type->cnt == type->max && memblock_double_array(type)) {
313+
type->cnt--;
314+
return -1;
315+
}
316+
220317
return 0;
221318
}
222319

@@ -541,6 +638,9 @@ void __init memblock_analyze(void)
541638

542639
for (i = 0; i < memblock.memory.cnt; i++)
543640
memblock.memory_size += memblock.memory.regions[i].size;
641+
642+
/* We allow resizing from there */
643+
memblock_can_resize = 1;
544644
}
545645

546646
void __init memblock_init(void)

0 commit comments

Comments
 (0)