Skip to content

Commit cc9aec0

Browse files
djbwIngo Molnar
authored andcommitted
x86/numa_emulation: Introduce uniform split capability
The current NUMA emulation capabilities for splitting System RAM by a fixed size or by a set number of nodes may result in some nodes being larger than others. The implementation prioritizes establishing a minimum usable memory size over satisfying the requested number of NUMA nodes. Introduce a uniform split capability that evenly partitions each physical NUMA node into N emulated nodes. For example numa=fake=3U creates 6 emulated nodes total on a system that has 2 physical nodes. This capability is useful for debugging and evaluating platform memory-side-cache capabilities as described by the ACPI HMAT (see 5.2.27.5 Memory Side Cache Information Structure in ACPI 6.2a) Compare numa=fake=6 that results in only 5 nodes being created against numa=fake=3U which takes the 2 physical nodes and evenly divides them. numa=fake=6 available: 5 nodes (0-4) node 0 cpus: 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 node 0 size: 2648 MB node 0 free: 2443 MB node 1 cpus: 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35 37 39 node 1 size: 2672 MB node 1 free: 2442 MB node 2 cpus: 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 node 2 size: 5291 MB node 2 free: 5278 MB node 3 cpus: 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35 37 39 node 3 size: 2677 MB node 3 free: 2665 MB node 4 cpus: 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35 37 39 node 4 size: 2676 MB node 4 free: 2663 MB node distances: node 0 1 2 3 4 0: 10 20 10 20 20 1: 20 10 20 10 10 2: 10 20 10 20 20 3: 20 10 20 10 10 4: 20 10 20 10 10 numa=fake=3U available: 6 nodes (0-5) node 0 cpus: 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 node 0 size: 2900 MB node 0 free: 2637 MB node 1 cpus: 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 node 1 size: 3023 MB node 1 free: 3012 MB node 2 cpus: 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 node 2 size: 2015 MB node 2 free: 2004 MB node 3 cpus: 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35 37 39 node 3 size: 2704 MB node 3 free: 2522 MB node 4 cpus: 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35 37 39 node 4 size: 2709 MB node 4 free: 2698 MB node 5 cpus: 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31 33 35 37 39 node 5 size: 2612 MB node 5 free: 2601 MB node distances: node 0 1 2 3 4 5 0: 10 10 10 20 20 20 1: 10 10 10 20 20 20 2: 10 10 10 20 20 20 3: 20 20 20 10 10 10 4: 20 20 20 10 10 10 5: 20 20 20 10 10 10 Signed-off-by: Dan Williams <[email protected]> Cc: David Rientjes <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Wei Yang <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/153089328617.27680.14930758266174305832.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Ingo Molnar <[email protected]>
1 parent 3b6c62f commit cc9aec0

File tree

2 files changed

+90
-19
lines changed

2 files changed

+90
-19
lines changed

Documentation/x86/x86_64/boot-options.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,10 @@ NUMA
156156
If given as an integer, fills all system RAM with N fake nodes
157157
interleaved over physical nodes.
158158

159+
numa=fake=<N>U
160+
If given as an integer followed by 'U', it will divide each
161+
physical node into N emulated nodes.
162+
159163
ACPI
160164

161165
acpi=off Don't enable ACPI

arch/x86/mm/numa_emulation.c

Lines changed: 86 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -198,40 +198,73 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
198198
return end;
199199
}
200200

201+
static u64 uniform_size(u64 max_addr, u64 base, u64 hole, int nr_nodes)
202+
{
203+
unsigned long max_pfn = PHYS_PFN(max_addr);
204+
unsigned long base_pfn = PHYS_PFN(base);
205+
unsigned long hole_pfns = PHYS_PFN(hole);
206+
207+
return PFN_PHYS((max_pfn - base_pfn - hole_pfns) / nr_nodes);
208+
}
209+
201210
/*
202211
* Sets up fake nodes of `size' interleaved over physical nodes ranging from
203212
* `addr' to `max_addr'.
204213
*
205214
* Returns zero on success or negative on error.
206215
*/
207-
static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
216+
static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
208217
struct numa_meminfo *pi,
209-
u64 addr, u64 max_addr, u64 size)
218+
u64 addr, u64 max_addr, u64 size,
219+
int nr_nodes, struct numa_memblk *pblk,
220+
int nid)
210221
{
211222
nodemask_t physnode_mask = numa_nodes_parsed;
223+
int i, ret, uniform = 0;
212224
u64 min_size;
213-
int nid = 0;
214-
int i, ret;
215225

216-
if (!size)
226+
if ((!size && !nr_nodes) || (nr_nodes && !pblk))
217227
return -1;
228+
218229
/*
219-
* The limit on emulated nodes is MAX_NUMNODES, so the size per node is
220-
* increased accordingly if the requested size is too small. This
221-
* creates a uniform distribution of node sizes across the entire
222-
* machine (but not necessarily over physical nodes).
230+
* In the 'uniform' case split the passed in physical node by
231+
* nr_nodes, in the non-uniform case, ignore the passed in
232+
* physical block and try to create nodes of at least size
233+
* @size.
234+
*
235+
* In the uniform case, split the nodes strictly by physical
236+
* capacity, i.e. ignore holes. In the non-uniform case account
237+
* for holes and treat @size as a minimum floor.
223238
*/
224-
min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
225-
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
226-
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
227-
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
228-
FAKE_NODE_MIN_HASH_MASK;
239+
if (!nr_nodes)
240+
nr_nodes = MAX_NUMNODES;
241+
else {
242+
nodes_clear(physnode_mask);
243+
node_set(pblk->nid, physnode_mask);
244+
uniform = 1;
245+
}
246+
247+
if (uniform) {
248+
min_size = uniform_size(max_addr, addr, 0, nr_nodes);
249+
size = min_size;
250+
} else {
251+
/*
252+
* The limit on emulated nodes is MAX_NUMNODES, so the
253+
* size per node is increased accordingly if the
254+
* requested size is too small. This creates a uniform
255+
* distribution of node sizes across the entire machine
256+
* (but not necessarily over physical nodes).
257+
*/
258+
min_size = uniform_size(max_addr, addr,
259+
mem_hole_size(addr, max_addr), nr_nodes);
260+
}
261+
min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
229262
if (size < min_size) {
230263
pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
231264
size >> 20, min_size >> 20);
232265
size = min_size;
233266
}
234-
size &= FAKE_NODE_MIN_HASH_MASK;
267+
size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);
235268

236269
/*
237270
* Fill physical nodes with fake nodes of size until there is no memory
@@ -248,10 +281,14 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
248281
node_clear(i, physnode_mask);
249282
continue;
250283
}
284+
251285
start = pi->blk[phys_blk].start;
252286
limit = pi->blk[phys_blk].end;
253287

254-
end = find_end_of_node(start, limit, size);
288+
if (uniform)
289+
end = start + size;
290+
else
291+
end = find_end_of_node(start, limit, size);
255292
/*
256293
* If there won't be at least FAKE_NODE_MIN_SIZE of
257294
* non-reserved memory in ZONE_DMA32 for the next node,
@@ -266,7 +303,8 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
266303
* next node, this one must extend to the end of the
267304
* physical node.
268305
*/
269-
if (limit - end - mem_hole_size(end, limit) < size)
306+
if ((limit - end - mem_hole_size(end, limit) < size)
307+
&& !uniform)
270308
end = limit;
271309

272310
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
@@ -276,7 +314,15 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
276314
return ret;
277315
}
278316
}
279-
return 0;
317+
return nid;
318+
}
319+
320+
static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
321+
struct numa_meminfo *pi,
322+
u64 addr, u64 max_addr, u64 size)
323+
{
324+
return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
325+
0, NULL, NUMA_NO_NODE);
280326
}
281327

282328
int __init setup_emu2phys_nid(int *dfl_phys_nid)
@@ -346,7 +392,28 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
346392
* the fixed node size. Otherwise, if it is just a single number N,
347393
* split the system RAM into N fake nodes.
348394
*/
349-
if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
395+
if (strchr(emu_cmdline, 'U')) {
396+
nodemask_t physnode_mask = numa_nodes_parsed;
397+
unsigned long n;
398+
int nid = 0;
399+
400+
n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
401+
ret = -1;
402+
for_each_node_mask(i, physnode_mask) {
403+
ret = split_nodes_size_interleave_uniform(&ei, &pi,
404+
pi.blk[i].start, pi.blk[i].end, 0,
405+
n, &pi.blk[i], nid);
406+
if (ret < 0)
407+
break;
408+
if (ret < n) {
409+
pr_info("%s: phys: %d only got %d of %ld nodes, failing\n",
410+
__func__, i, ret, n);
411+
ret = -1;
412+
break;
413+
}
414+
nid = ret;
415+
}
416+
} else if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
350417
u64 size;
351418

352419
size = memparse(emu_cmdline, &emu_cmdline);

0 commit comments

Comments
 (0)