|
6 | 6 | #include <linux/export.h>
|
7 | 7 | #include <linux/memblock.h>
|
8 | 8 | #include <linux/numa.h>
|
9 |
| -#include <linux/sched/isolation.h> |
10 | 9 |
|
11 | 10 | /**
|
12 | 11 | * cpumask_next - get the next cpu in a cpumask
|
@@ -206,27 +205,22 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
|
206 | 205 | */
|
207 | 206 | unsigned int cpumask_local_spread(unsigned int i, int node)
|
208 | 207 | {
|
209 |
| - int cpu, hk_flags; |
210 |
| - const struct cpumask *mask; |
| 208 | + int cpu; |
211 | 209 |
|
212 |
| - hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ; |
213 |
| - mask = housekeeping_cpumask(hk_flags); |
214 | 210 | /* Wrap: we always want a cpu. */
|
215 |
| - i %= cpumask_weight(mask); |
| 211 | + i %= num_online_cpus(); |
216 | 212 |
|
217 | 213 | if (node == NUMA_NO_NODE) {
|
218 |
| - for_each_cpu(cpu, mask) { |
| 214 | + for_each_cpu(cpu, cpu_online_mask) |
219 | 215 | if (i-- == 0)
|
220 | 216 | return cpu;
|
221 |
| - } |
222 | 217 | } else {
|
223 | 218 | /* NUMA first. */
|
224 |
| - for_each_cpu_and(cpu, cpumask_of_node(node), mask) { |
| 219 | + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) |
225 | 220 | if (i-- == 0)
|
226 | 221 | return cpu;
|
227 |
| - } |
228 | 222 |
|
229 |
| - for_each_cpu(cpu, mask) { |
| 223 | + for_each_cpu(cpu, cpu_online_mask) { |
230 | 224 | /* Skip NUMA nodes, done above. */
|
231 | 225 | if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
|
232 | 226 | continue;
|
|
0 commit comments