|
64 | 64 | #include <asm/xen/hypervisor.h>
|
65 | 65 | #include <asm/mmu_context.h>
|
66 | 66 |
|
| 67 | +static int num_standard_resources; |
| 68 | +static struct resource *standard_resources; |
| 69 | + |
67 | 70 | phys_addr_t __fdt_pointer __initdata;
|
68 | 71 |
|
69 | 72 | /*
|
@@ -206,14 +209,19 @@ static void __init request_standard_resources(void)
|
206 | 209 | {
|
207 | 210 | struct memblock_region *region;
|
208 | 211 | struct resource *res;
|
| 212 | + unsigned long i = 0; |
209 | 213 |
|
210 | 214 | kernel_code.start = __pa_symbol(_text);
|
211 | 215 | kernel_code.end = __pa_symbol(__init_begin - 1);
|
212 | 216 | kernel_data.start = __pa_symbol(_sdata);
|
213 | 217 | kernel_data.end = __pa_symbol(_end - 1);
|
214 | 218 |
|
| 219 | + num_standard_resources = memblock.memory.cnt; |
| 220 | + standard_resources = alloc_bootmem_low(num_standard_resources * |
| 221 | + sizeof(*standard_resources)); |
| 222 | + |
215 | 223 | for_each_memblock(memory, region) {
|
216 |
| - res = alloc_bootmem_low(sizeof(*res)); |
| 224 | + res = &standard_resources[i++]; |
217 | 225 | if (memblock_is_nomap(region)) {
|
218 | 226 | res->name = "reserved";
|
219 | 227 | res->flags = IORESOURCE_MEM;
|
@@ -243,36 +251,26 @@ static void __init request_standard_resources(void)
|
243 | 251 |
|
244 | 252 | static int __init reserve_memblock_reserved_regions(void)
|
245 | 253 | {
|
246 |
| - phys_addr_t start, end, roundup_end = 0; |
247 |
| - struct resource *mem, *res; |
248 |
| - u64 i; |
249 |
| - |
250 |
| - for_each_reserved_mem_region(i, &start, &end) { |
251 |
| - if (end <= roundup_end) |
252 |
| - continue; /* done already */ |
253 |
| - |
254 |
| - start = __pfn_to_phys(PFN_DOWN(start)); |
255 |
| - end = __pfn_to_phys(PFN_UP(end)) - 1; |
256 |
| - roundup_end = end; |
257 |
| - |
258 |
| - res = kzalloc(sizeof(*res), GFP_ATOMIC); |
259 |
| - if (WARN_ON(!res)) |
260 |
| - return -ENOMEM; |
261 |
| - res->start = start; |
262 |
| - res->end = end; |
263 |
| - res->name = "reserved"; |
264 |
| - res->flags = IORESOURCE_MEM; |
265 |
| - |
266 |
| - mem = request_resource_conflict(&iomem_resource, res); |
267 |
| - /* |
268 |
| - * We expected memblock_reserve() regions to conflict with |
269 |
| - * memory created by request_standard_resources(). |
270 |
| - */ |
271 |
| - if (WARN_ON_ONCE(!mem)) |
| 254 | + u64 i, j; |
| 255 | + |
| 256 | + for (i = 0; i < num_standard_resources; ++i) { |
| 257 | + struct resource *mem = &standard_resources[i]; |
| 258 | + phys_addr_t r_start, r_end, mem_size = resource_size(mem); |
| 259 | + |
| 260 | + if (!memblock_is_region_reserved(mem->start, mem_size)) |
272 | 261 | continue;
|
273 |
| - kfree(res); |
274 | 262 |
|
275 |
| - reserve_region_with_split(mem, start, end, "reserved"); |
| 263 | + for_each_reserved_mem_region(j, &r_start, &r_end) { |
| 264 | + resource_size_t start, end; |
| 265 | + |
| 266 | + start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); |
| 267 | + end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); |
| 268 | + |
| 269 | + if (start > mem->end || end < mem->start) |
| 270 | + continue; |
| 271 | + |
| 272 | + reserve_region_with_split(mem, start, end, "reserved"); |
| 273 | + } |
276 | 274 | }
|
277 | 275 |
|
278 | 276 | return 0;
|
|
0 commit comments