11
11
*/
12
12
13
13
#include <linux/kernel.h>
14
+ #include <linux/slab.h>
14
15
#include <linux/init.h>
15
16
#include <linux/bitops.h>
16
17
#include <linux/poison.h>
17
18
#include <linux/memblock.h>
18
19
19
20
struct memblock memblock ;
20
21
21
- static int memblock_debug ;
22
+ static int memblock_debug , memblock_can_resize ;
22
23
static struct memblock_region memblock_memory_init_regions [INIT_MEMBLOCK_REGIONS + 1 ];
23
24
static struct memblock_region memblock_reserved_init_regions [INIT_MEMBLOCK_REGIONS + 1 ];
24
25
25
26
#define MEMBLOCK_ERROR (~(phys_addr_t)0)
26
27
28
+ /* inline so we don't get a warning when pr_debug is compiled out */
29
+ static inline const char * memblock_type_name (struct memblock_type * type )
30
+ {
31
+ if (type == & memblock .memory )
32
+ return "memory" ;
33
+ else if (type == & memblock .reserved )
34
+ return "reserved" ;
35
+ else
36
+ return "unknown" ;
37
+ }
38
+
27
39
/*
28
40
* Address comparison utilities
29
41
*/
@@ -156,6 +168,79 @@ static void memblock_coalesce_regions(struct memblock_type *type,
156
168
memblock_remove_region (type , r2 );
157
169
}
158
170
171
+ /* Defined below but needed now */
172
+ static long memblock_add_region (struct memblock_type * type , phys_addr_t base , phys_addr_t size );
173
+
174
+ static int memblock_double_array (struct memblock_type * type )
175
+ {
176
+ struct memblock_region * new_array , * old_array ;
177
+ phys_addr_t old_size , new_size , addr ;
178
+ int use_slab = slab_is_available ();
179
+
180
+ /* We don't allow resizing until we know about the reserved regions
181
+ * of memory that aren't suitable for allocation
182
+ */
183
+ if (!memblock_can_resize )
184
+ return -1 ;
185
+
186
+ pr_debug ("memblock: %s array full, doubling..." , memblock_type_name (type ));
187
+
188
+ /* Calculate new doubled size */
189
+ old_size = type -> max * sizeof (struct memblock_region );
190
+ new_size = old_size << 1 ;
191
+
192
+ /* Try to find some space for it.
193
+ *
194
+ * WARNING: We assume that either slab_is_available() and we use it or
195
+ * we use MEMBLOCK for allocations. That means that this is unsafe to use
196
+ * when bootmem is currently active (unless bootmem itself is implemented
197
+ * on top of MEMBLOCK which isn't the case yet)
198
+ *
199
+ * This should however not be an issue for now, as we currently only
200
+ * call into MEMBLOCK while it's still active, or much later when slab is
201
+ * active for memory hotplug operations
202
+ */
203
+ if (use_slab ) {
204
+ new_array = kmalloc (new_size , GFP_KERNEL );
205
+ addr = new_array == NULL ? MEMBLOCK_ERROR : __pa (new_array );
206
+ } else
207
+ addr = memblock_find_base (new_size , sizeof (phys_addr_t ), MEMBLOCK_ALLOC_ACCESSIBLE );
208
+ if (addr == MEMBLOCK_ERROR ) {
209
+ pr_err ("memblock: Failed to double %s array from %ld to %ld entries !\n" ,
210
+ memblock_type_name (type ), type -> max , type -> max * 2 );
211
+ return -1 ;
212
+ }
213
+ new_array = __va (addr );
214
+
215
+ /* Found space, we now need to move the array over before
216
+ * we add the reserved region since it may be our reserved
217
+ * array itself that is full.
218
+ */
219
+ memcpy (new_array , type -> regions , old_size );
220
+ memset (new_array + type -> max , 0 , old_size );
221
+ old_array = type -> regions ;
222
+ type -> regions = new_array ;
223
+ type -> max <<= 1 ;
224
+
225
+ /* If we use SLAB that's it, we are done */
226
+ if (use_slab )
227
+ return 0 ;
228
+
229
+ /* Add the new reserved region now. Should not fail ! */
230
+ BUG_ON (memblock_add_region (& memblock .reserved , addr , new_size ) < 0 );
231
+
232
+ /* If the array wasn't our static init one, then free it. We only do
233
+ * that before SLAB is available as later on, we don't know whether
234
+ * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
235
+ * anyways
236
+ */
237
+ if (old_array != memblock_memory_init_regions &&
238
+ old_array != memblock_reserved_init_regions )
239
+ memblock_free (__pa (old_array ), old_size );
240
+
241
+ return 0 ;
242
+ }
243
+
159
244
static long memblock_add_region (struct memblock_type * type , phys_addr_t base , phys_addr_t size )
160
245
{
161
246
unsigned long coalesced = 0 ;
@@ -196,7 +281,11 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph
196
281
197
282
if (coalesced )
198
283
return coalesced ;
199
- if (type -> cnt >= type -> max )
284
+
285
+ /* If we are out of space, we fail. It's too late to resize the array
286
+ * but then this shouldn't have happened in the first place.
287
+ */
288
+ if (WARN_ON (type -> cnt >= type -> max ))
200
289
return -1 ;
201
290
202
291
/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
@@ -217,6 +306,14 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph
217
306
}
218
307
type -> cnt ++ ;
219
308
309
+ /* The array is full ? Try to resize it. If that fails, we undo
310
+ * our allocation and return an error
311
+ */
312
+ if (type -> cnt == type -> max && memblock_double_array (type )) {
313
+ type -> cnt -- ;
314
+ return -1 ;
315
+ }
316
+
220
317
return 0 ;
221
318
}
222
319
@@ -541,6 +638,9 @@ void __init memblock_analyze(void)
541
638
542
639
for (i = 0 ; i < memblock .memory .cnt ; i ++ )
543
640
memblock .memory_size += memblock .memory .regions [i ].size ;
641
+
642
+ /* We allow resizing from there */
643
+ memblock_can_resize = 1 ;
544
644
}
545
645
546
646
void __init memblock_init (void )
0 commit comments