|
5 | 5 | #include <xen/xen.h>
|
6 | 6 |
|
7 | 7 | #include <asm/apic.h>
|
| 8 | +#include <asm/io_apic.h> |
8 | 9 | #include <asm/mpspec.h>
|
9 | 10 | #include <asm/smp.h>
|
10 | 11 |
|
@@ -85,73 +86,6 @@ early_initcall(smp_init_primary_thread_mask);
|
85 | 86 | static inline void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) { }
|
86 | 87 | #endif
|
87 | 88 |
|
88 |
| -static int __initdata setup_possible_cpus = -1; |
89 |
| - |
90 |
| -/* |
91 |
| - * cpu_possible_mask should be static, it cannot change as cpu's |
92 |
| - * are onlined, or offlined. The reason is per-cpu data-structures |
93 |
| - * are allocated by some modules at init time, and don't expect to |
94 |
| - * do this dynamically on cpu arrival/departure. |
95 |
| - * cpu_present_mask on the other hand can change dynamically. |
96 |
| - * In case when cpu_hotplug is not compiled, then we resort to current |
97 |
| - * behaviour, which is cpu_possible == cpu_present. |
98 |
| - * - Ashok Raj |
99 |
| - * |
100 |
| - * Three ways to find out the number of additional hotplug CPUs: |
101 |
| - * - If the BIOS specified disabled CPUs in ACPI/mptables use that. |
102 |
| - * - The user can overwrite it with possible_cpus=NUM |
103 |
| - * - Otherwise don't reserve additional CPUs. |
104 |
| - * We do this because additional CPUs waste a lot of memory. |
105 |
| - * -AK |
106 |
| - */ |
107 |
| -__init void prefill_possible_map(void) |
108 |
| -{ |
109 |
| - unsigned int num_processors = topo_info.nr_assigned_cpus; |
110 |
| - unsigned int disabled_cpus = topo_info.nr_disabled_cpus; |
111 |
| - int i, possible; |
112 |
| - |
113 |
| - i = setup_max_cpus ?: 1; |
114 |
| - if (setup_possible_cpus == -1) { |
115 |
| - possible = topo_info.nr_assigned_cpus; |
116 |
| -#ifdef CONFIG_HOTPLUG_CPU |
117 |
| - if (setup_max_cpus) |
118 |
| - possible += num_processors; |
119 |
| -#else |
120 |
| - if (possible > i) |
121 |
| - possible = i; |
122 |
| -#endif |
123 |
| - } else |
124 |
| - possible = setup_possible_cpus; |
125 |
| - |
126 |
| - total_cpus = max_t(int, possible, num_processors + disabled_cpus); |
127 |
| - |
128 |
| - /* nr_cpu_ids could be reduced via nr_cpus= */ |
129 |
| - if (possible > nr_cpu_ids) { |
130 |
| - pr_warn("%d Processors exceeds NR_CPUS limit of %u\n", |
131 |
| - possible, nr_cpu_ids); |
132 |
| - possible = nr_cpu_ids; |
133 |
| - } |
134 |
| - |
135 |
| -#ifdef CONFIG_HOTPLUG_CPU |
136 |
| - if (!setup_max_cpus) |
137 |
| -#endif |
138 |
| - if (possible > i) { |
139 |
| - pr_warn("%d Processors exceeds max_cpus limit of %u\n", |
140 |
| - possible, setup_max_cpus); |
141 |
| - possible = i; |
142 |
| - } |
143 |
| - |
144 |
| - set_nr_cpu_ids(possible); |
145 |
| - |
146 |
| - pr_info("Allowing %d CPUs, %d hotplug CPUs\n", |
147 |
| - possible, max_t(int, possible - num_processors, 0)); |
148 |
| - |
149 |
| - reset_cpu_possible_mask(); |
150 |
| - |
151 |
| - for (i = 0; i < possible; i++) |
152 |
| - set_cpu_possible(i, true); |
153 |
| -} |
154 |
| - |
155 | 89 | static int topo_lookup_cpuid(u32 apic_id)
|
156 | 90 | {
|
157 | 91 | int i;
|
@@ -293,12 +227,114 @@ void topology_hotunplug_apic(unsigned int cpu)
|
293 | 227 | }
|
294 | 228 | #endif
|
295 | 229 |
|
296 |
| -static int __init _setup_possible_cpus(char *str) |
| 230 | +#ifdef CONFIG_SMP |
| 231 | +static unsigned int max_possible_cpus __initdata = NR_CPUS; |
| 232 | + |
| 233 | +/** |
| 234 | + * topology_apply_cmdline_limits_early - Apply topology command line limits early |
| 235 | + * |
| 236 | + * Ensure that command line limits are in effect before firmware parsing |
| 237 | + * takes place. |
| 238 | + */ |
| 239 | +void __init topology_apply_cmdline_limits_early(void) |
297 | 240 | {
|
298 |
| - get_option(&str, &setup_possible_cpus); |
| 241 | + unsigned int possible = nr_cpu_ids; |
| 242 | + |
| 243 | + /* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' 'noapic' */ |
| 244 | + if (!setup_max_cpus || ioapic_is_disabled || apic_is_disabled) |
| 245 | + possible = 1; |
| 246 | + |
| 247 | + /* 'possible_cpus=N' */ |
| 248 | + possible = min_t(unsigned int, max_possible_cpus, possible); |
| 249 | + |
| 250 | + if (possible < nr_cpu_ids) { |
| 251 | + pr_info("Limiting to %u possible CPUs\n", possible); |
| 252 | + set_nr_cpu_ids(possible); |
| 253 | + } |
| 254 | +} |
| 255 | + |
| 256 | +static __init bool restrict_to_up(void) |
| 257 | +{ |
| 258 | + if (!smp_found_config || ioapic_is_disabled) |
| 259 | + return true; |
| 260 | + /* |
| 261 | + * XEN PV is special as it does not advertise the local APIC |
| 262 | + * properly, but provides a fake topology for it so that the |
| 263 | + * infrastructure works. So don't apply the restrictions vs. APIC |
| 264 | + * here. |
| 265 | + */ |
| 266 | + if (xen_pv_domain()) |
| 267 | + return false; |
| 268 | + |
| 269 | + return apic_is_disabled; |
| 270 | +} |
| 271 | + |
| 272 | +void __init topology_init_possible_cpus(void) |
| 273 | +{ |
| 274 | + unsigned int assigned = topo_info.nr_assigned_cpus; |
| 275 | + unsigned int disabled = topo_info.nr_disabled_cpus; |
| 276 | + unsigned int total = assigned + disabled; |
| 277 | + unsigned int cpu, allowed = 1; |
| 278 | + |
| 279 | + if (!restrict_to_up()) { |
| 280 | + if (WARN_ON_ONCE(assigned > nr_cpu_ids)) { |
| 281 | + disabled += assigned - nr_cpu_ids; |
| 282 | + assigned = nr_cpu_ids; |
| 283 | + } |
| 284 | + allowed = min_t(unsigned int, total, nr_cpu_ids); |
| 285 | + } |
| 286 | + |
| 287 | + if (total > allowed) |
| 288 | + pr_warn("%u possible CPUs exceed the limit of %u\n", total, allowed); |
| 289 | + |
| 290 | + assigned = min_t(unsigned int, allowed, assigned); |
| 291 | + disabled = allowed - assigned; |
| 292 | + |
| 293 | + topo_info.nr_assigned_cpus = assigned; |
| 294 | + topo_info.nr_disabled_cpus = disabled; |
| 295 | + |
| 296 | + total_cpus = allowed; |
| 297 | + set_nr_cpu_ids(allowed); |
| 298 | + |
| 299 | + pr_info("Allowing %u present CPUs plus %u hotplug CPUs\n", assigned, disabled); |
| 300 | + if (topo_info.nr_rejected_cpus) |
| 301 | + pr_info("Rejected CPUs %u\n", topo_info.nr_rejected_cpus); |
| 302 | + |
| 303 | + init_cpu_present(cpumask_of(0)); |
| 304 | + init_cpu_possible(cpumask_of(0)); |
| 305 | + |
| 306 | + for (cpu = 0; cpu < allowed; cpu++) { |
| 307 | + u32 apicid = cpuid_to_apicid[cpu]; |
| 308 | + |
| 309 | + set_cpu_possible(cpu, true); |
| 310 | + |
| 311 | + if (apicid == BAD_APICID) |
| 312 | + continue; |
| 313 | + |
| 314 | + set_cpu_present(cpu, test_bit(apicid, phys_cpu_present_map)); |
| 315 | + } |
| 316 | +} |
| 317 | + |
| 318 | +/* |
| 319 | + * Late SMP disable after sizing CPU masks when APIC/IOAPIC setup failed. |
| 320 | + */ |
| 321 | +void __init topology_reset_possible_cpus_up(void) |
| 322 | +{ |
| 323 | + init_cpu_present(cpumask_of(0)); |
| 324 | + init_cpu_possible(cpumask_of(0)); |
| 325 | + |
| 326 | + bitmap_zero(phys_cpu_present_map, MAX_LOCAL_APIC); |
| 327 | + if (topo_info.boot_cpu_apic_id != BAD_APICID) |
| 328 | + set_bit(topo_info.boot_cpu_apic_id, phys_cpu_present_map); |
| 329 | +} |
| 330 | + |
| 331 | +static int __init setup_possible_cpus(char *str) |
| 332 | +{ |
| 333 | + get_option(&str, &max_possible_cpus); |
299 | 334 | return 0;
|
300 | 335 | }
|
301 |
| -early_param("possible_cpus", _setup_possible_cpus); |
| 336 | +early_param("possible_cpus", setup_possible_cpus); |
| 337 | +#endif |
302 | 338 |
|
303 | 339 | static int __init apic_set_disabled_cpu_apicid(char *arg)
|
304 | 340 | {
|
|
0 commit comments