Skip to content

Commit e525de3

Browse files
committed
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Misc fixes all across the map: - /proc/kcore vsyscall related fixes - LTO fix - build warning fix - CPU hotplug fix - Kconfig NR_CPUS cleanups - cpu_has() cleanups/robustification - .gitignore fix - memory-failure unmapping fix - UV platform fix" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm, mm/hwpoison: Don't unconditionally unmap kernel 1:1 pages x86/error_inject: Make just_return_func() globally visible x86/platform/UV: Fix GAM Range Table entries less than 1GB x86/build: Add arch/x86/tools/insn_decoder_test to .gitignore x86/smpboot: Fix uncore_pci_remove() indexing bug when hot-removing a physical CPU x86/mm/kcore: Add vsyscall page to /proc/kcore conditionally vfs/proc/kcore, x86/mm/kcore: Fix SMAP fault when dumping vsyscall user page x86/Kconfig: Further simplify the NR_CPUS config x86/Kconfig: Simplify NR_CPUS config x86/MCE: Fix build warning introduced by "x86: do not use print_symbol()" x86/cpufeature: Update _static_cpu_has() to use all named variables x86/cpufeature: Reindent _static_cpu_has()
2 parents d4667ca + fd0e786 commit e525de3

File tree

14 files changed

+146
-81
lines changed

14 files changed

+146
-81
lines changed

arch/x86/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
boot/compressed/vmlinux
22
tools/test_get_len
33
tools/insn_sanity
4+
tools/insn_decoder_test
45
purgatory/kexec-purgatory.c
56
purgatory/purgatory.ro
67

arch/x86/Kconfig

Lines changed: 58 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -423,12 +423,6 @@ config X86_MPPARSE
423423
For old smp systems that do not have proper acpi support. Newer systems
424424
(esp with 64bit cpus) with acpi support, MADT and DSDT will override it
425425

426-
config X86_BIGSMP
427-
bool "Support for big SMP systems with more than 8 CPUs"
428-
depends on X86_32 && SMP
429-
---help---
430-
This option is needed for the systems that have more than 8 CPUs
431-
432426
config GOLDFISH
433427
def_bool y
434428
depends on X86_GOLDFISH
@@ -460,6 +454,12 @@ config INTEL_RDT
460454
Say N if unsure.
461455

462456
if X86_32
457+
config X86_BIGSMP
458+
bool "Support for big SMP systems with more than 8 CPUs"
459+
depends on SMP
460+
---help---
461+
This option is needed for the systems that have more than 8 CPUs
462+
463463
config X86_EXTENDED_PLATFORM
464464
bool "Support for extended (non-PC) x86 platforms"
465465
default y
@@ -949,25 +949,66 @@ config MAXSMP
949949
Enable maximum number of CPUS and NUMA Nodes for this architecture.
950950
If unsure, say N.
951951

952+
#
953+
# The maximum number of CPUs supported:
954+
#
955+
# The main config value is NR_CPUS, which defaults to NR_CPUS_DEFAULT,
956+
# and which can be configured interactively in the
957+
# [NR_CPUS_RANGE_BEGIN ... NR_CPUS_RANGE_END] range.
958+
#
959+
# The ranges are different on 32-bit and 64-bit kernels, depending on
960+
# hardware capabilities and scalability features of the kernel.
961+
#
962+
# ( If MAXSMP is enabled we just use the highest possible value and disable
963+
# interactive configuration. )
964+
#
965+
966+
config NR_CPUS_RANGE_BEGIN
967+
int
968+
default NR_CPUS_RANGE_END if MAXSMP
969+
default 1 if !SMP
970+
default 2
971+
972+
config NR_CPUS_RANGE_END
973+
int
974+
depends on X86_32
975+
default 64 if SMP && X86_BIGSMP
976+
default 8 if SMP && !X86_BIGSMP
977+
default 1 if !SMP
978+
979+
config NR_CPUS_RANGE_END
980+
int
981+
depends on X86_64
982+
default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK)
983+
default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK)
984+
default 1 if !SMP
985+
986+
config NR_CPUS_DEFAULT
987+
int
988+
depends on X86_32
989+
default 32 if X86_BIGSMP
990+
default 8 if SMP
991+
default 1 if !SMP
992+
993+
config NR_CPUS_DEFAULT
994+
int
995+
depends on X86_64
996+
default 8192 if MAXSMP
997+
default 64 if SMP
998+
default 1 if !SMP
999+
9521000
config NR_CPUS
9531001
int "Maximum number of CPUs" if SMP && !MAXSMP
954-
range 2 8 if SMP && X86_32 && !X86_BIGSMP
955-
range 2 64 if SMP && X86_32 && X86_BIGSMP
956-
range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
957-
range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
958-
default "1" if !SMP
959-
default "8192" if MAXSMP
960-
default "32" if SMP && X86_BIGSMP
961-
default "8" if SMP && X86_32
962-
default "64" if SMP
1002+
range NR_CPUS_RANGE_BEGIN NR_CPUS_RANGE_END
1003+
default NR_CPUS_DEFAULT
9631004
---help---
9641005
This allows you to specify the maximum number of CPUs which this
9651006
kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum
9661007
supported value is 8192, otherwise the maximum value is 512. The
9671008
minimum value which makes sense is 2.
9681009

969-
This is purely to save memory - each supported CPU adds
970-
approximately eight kilobytes to the kernel image.
1010+
This is purely to save memory: each supported CPU adds about 8KB
1011+
to the kernel image.
9711012

9721013
config SCHED_SMT
9731014
bool "SMT (Hyperthreading) scheduler support"

arch/x86/include/asm/cpufeature.h

Lines changed: 40 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -148,45 +148,46 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
148148
*/
149149
static __always_inline __pure bool _static_cpu_has(u16 bit)
150150
{
151-
asm_volatile_goto("1: jmp 6f\n"
152-
"2:\n"
153-
".skip -(((5f-4f) - (2b-1b)) > 0) * "
154-
"((5f-4f) - (2b-1b)),0x90\n"
155-
"3:\n"
156-
".section .altinstructions,\"a\"\n"
157-
" .long 1b - .\n" /* src offset */
158-
" .long 4f - .\n" /* repl offset */
159-
" .word %P1\n" /* always replace */
160-
" .byte 3b - 1b\n" /* src len */
161-
" .byte 5f - 4f\n" /* repl len */
162-
" .byte 3b - 2b\n" /* pad len */
163-
".previous\n"
164-
".section .altinstr_replacement,\"ax\"\n"
165-
"4: jmp %l[t_no]\n"
166-
"5:\n"
167-
".previous\n"
168-
".section .altinstructions,\"a\"\n"
169-
" .long 1b - .\n" /* src offset */
170-
" .long 0\n" /* no replacement */
171-
" .word %P0\n" /* feature bit */
172-
" .byte 3b - 1b\n" /* src len */
173-
" .byte 0\n" /* repl len */
174-
" .byte 0\n" /* pad len */
175-
".previous\n"
176-
".section .altinstr_aux,\"ax\"\n"
177-
"6:\n"
178-
" testb %[bitnum],%[cap_byte]\n"
179-
" jnz %l[t_yes]\n"
180-
" jmp %l[t_no]\n"
181-
".previous\n"
182-
: : "i" (bit), "i" (X86_FEATURE_ALWAYS),
183-
[bitnum] "i" (1 << (bit & 7)),
184-
[cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
185-
: : t_yes, t_no);
186-
t_yes:
187-
return true;
188-
t_no:
189-
return false;
151+
asm_volatile_goto("1: jmp 6f\n"
152+
"2:\n"
153+
".skip -(((5f-4f) - (2b-1b)) > 0) * "
154+
"((5f-4f) - (2b-1b)),0x90\n"
155+
"3:\n"
156+
".section .altinstructions,\"a\"\n"
157+
" .long 1b - .\n" /* src offset */
158+
" .long 4f - .\n" /* repl offset */
159+
" .word %P[always]\n" /* always replace */
160+
" .byte 3b - 1b\n" /* src len */
161+
" .byte 5f - 4f\n" /* repl len */
162+
" .byte 3b - 2b\n" /* pad len */
163+
".previous\n"
164+
".section .altinstr_replacement,\"ax\"\n"
165+
"4: jmp %l[t_no]\n"
166+
"5:\n"
167+
".previous\n"
168+
".section .altinstructions,\"a\"\n"
169+
" .long 1b - .\n" /* src offset */
170+
" .long 0\n" /* no replacement */
171+
" .word %P[feature]\n" /* feature bit */
172+
" .byte 3b - 1b\n" /* src len */
173+
" .byte 0\n" /* repl len */
174+
" .byte 0\n" /* pad len */
175+
".previous\n"
176+
".section .altinstr_aux,\"ax\"\n"
177+
"6:\n"
178+
" testb %[bitnum],%[cap_byte]\n"
179+
" jnz %l[t_yes]\n"
180+
" jmp %l[t_no]\n"
181+
".previous\n"
182+
: : [feature] "i" (bit),
183+
[always] "i" (X86_FEATURE_ALWAYS),
184+
[bitnum] "i" (1 << (bit & 7)),
185+
[cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
186+
: : t_yes, t_no);
187+
t_yes:
188+
return true;
189+
t_no:
190+
return false;
190191
}
191192

192193
#define static_cpu_has(bit) \

arch/x86/include/asm/page_64.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,6 @@ static inline void clear_page(void *page)
5252

5353
void copy_page(void *to, void *from);
5454

55-
#ifdef CONFIG_X86_MCE
56-
#define arch_unmap_kpfn arch_unmap_kpfn
57-
#endif
58-
5955
#endif /* !__ASSEMBLY__ */
6056

6157
#ifdef CONFIG_X86_VSYSCALL_EMULATION

arch/x86/kernel/apic/x2apic_uv_x.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
11761176

11771177
uv_gre_table = gre;
11781178
for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
1179+
unsigned long size = ((unsigned long)(gre->limit - lgre)
1180+
<< UV_GAM_RANGE_SHFT);
1181+
int order = 0;
1182+
char suffix[] = " KMGTPE";
1183+
1184+
while (size > 9999 && order < sizeof(suffix)) {
1185+
size /= 1024;
1186+
order++;
1187+
}
1188+
11791189
if (!index) {
11801190
pr_info("UV: GAM Range Table...\n");
11811191
pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
11821192
}
1183-
pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n",
1193+
pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
11841194
index++,
11851195
(unsigned long)lgre << UV_GAM_RANGE_SHFT,
11861196
(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
1187-
((unsigned long)(gre->limit - lgre)) >>
1188-
(30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
1197+
size, suffix[order],
11891198
gre->type, gre->nasid, gre->sockid, gre->pnode);
11901199

11911200
lgre = gre->limit;

arch/x86/kernel/cpu/mcheck/mce-internal.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
115115

116116
extern struct mca_config mca_cfg;
117117

118+
#ifndef CONFIG_X86_64
119+
/*
120+
* On 32-bit systems it would be difficult to safely unmap a poison page
121+
* from the kernel 1:1 map because there are no non-canonical addresses that
122+
* we can use to refer to the address without risking a speculative access.
123+
* However, this isn't much of an issue because:
124+
* 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
125+
* are only mapped into the kernel as needed
126+
* 2) Few people would run a 32-bit kernel on a machine that supports
127+
* recoverable errors because they have too much memory to boot 32-bit.
128+
*/
129+
static inline void mce_unmap_kpfn(unsigned long pfn) {}
130+
#define mce_unmap_kpfn mce_unmap_kpfn
131+
#endif
132+
118133
#endif /* __X86_MCE_INTERNAL_H__ */

arch/x86/kernel/cpu/mcheck/mce.c

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,10 @@ static struct irq_work mce_irq_work;
105105

106106
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
107107

108+
#ifndef mce_unmap_kpfn
109+
static void mce_unmap_kpfn(unsigned long pfn);
110+
#endif
111+
108112
/*
109113
* CPU/chipset specific EDAC code can register a notifier call here to print
110114
* MCE errors in a human-readable form.
@@ -234,7 +238,7 @@ static void __print_mce(struct mce *m)
234238
m->cs, m->ip);
235239

236240
if (m->cs == __KERNEL_CS)
237-
pr_cont("{%pS}", (void *)m->ip);
241+
pr_cont("{%pS}", (void *)(unsigned long)m->ip);
238242
pr_cont("\n");
239243
}
240244

@@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
590594

591595
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
592596
pfn = mce->addr >> PAGE_SHIFT;
593-
memory_failure(pfn, 0);
597+
if (!memory_failure(pfn, 0))
598+
mce_unmap_kpfn(pfn);
594599
}
595600

596601
return NOTIFY_OK;
@@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m)
10571062
ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
10581063
if (ret)
10591064
pr_err("Memory error not recovered");
1065+
else
1066+
mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
10601067
return ret;
10611068
}
10621069

1063-
#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
1064-
1065-
void arch_unmap_kpfn(unsigned long pfn)
1070+
#ifndef mce_unmap_kpfn
1071+
static void mce_unmap_kpfn(unsigned long pfn)
10661072
{
10671073
unsigned long decoy_addr;
10681074

@@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn)
10731079
* We would like to just call:
10741080
* set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
10751081
* but doing that would radically increase the odds of a
1076-
* speculative access to the posion page because we'd have
1082+
* speculative access to the poison page because we'd have
10771083
* the virtual address of the kernel 1:1 mapping sitting
10781084
* around in registers.
10791085
* Instead we get tricky. We create a non-canonical address
@@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn)
10981104

10991105
if (set_memory_np(decoy_addr, 1))
11001106
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
1101-
11021107
}
11031108
#endif
11041109

arch/x86/kernel/smpboot.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1430,7 +1430,6 @@ static void remove_siblinginfo(int cpu)
14301430
cpumask_clear(cpu_llc_shared_mask(cpu));
14311431
cpumask_clear(topology_sibling_cpumask(cpu));
14321432
cpumask_clear(topology_core_cpumask(cpu));
1433-
c->phys_proc_id = 0;
14341433
c->cpu_core_id = 0;
14351434
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
14361435
recompute_smt_state();

arch/x86/lib/error-inject.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ asmlinkage void just_return_func(void);
77

88
asm(
99
".type just_return_func, @function\n"
10+
".globl just_return_func\n"
1011
"just_return_func:\n"
1112
" ret\n"
1213
".size just_return_func, .-just_return_func\n"

arch/x86/mm/init_64.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1193,8 +1193,8 @@ void __init mem_init(void)
11931193
register_page_bootmem_info();
11941194

11951195
/* Register memory areas for /proc/kcore */
1196-
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
1197-
PAGE_SIZE, KCORE_OTHER);
1196+
if (get_gate_vma(&init_mm))
1197+
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
11981198

11991199
mem_init_print_info(NULL);
12001200
}

fs/proc/kcore.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -510,6 +510,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
510510
/* we have to zero-fill user buffer even if no read */
511511
if (copy_to_user(buffer, buf, tsz))
512512
return -EFAULT;
513+
} else if (m->type == KCORE_USER) {
514+
/* User page is handled prior to normal kernel page: */
515+
if (copy_to_user(buffer, (char *)start, tsz))
516+
return -EFAULT;
513517
} else {
514518
if (kern_addr_valid(start)) {
515519
/*

include/linux/kcore.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ enum kcore_type {
1010
KCORE_VMALLOC,
1111
KCORE_RAM,
1212
KCORE_VMEMMAP,
13+
KCORE_USER,
1314
KCORE_OTHER,
1415
};
1516

include/linux/mm_inline.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
127127

128128
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
129129

130-
#ifdef arch_unmap_kpfn
131-
extern void arch_unmap_kpfn(unsigned long pfn);
132-
#else
133-
static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
134-
#endif
135-
136130
#endif

mm/memory-failure.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1139,8 +1139,6 @@ int memory_failure(unsigned long pfn, int flags)
11391139
return 0;
11401140
}
11411141

1142-
arch_unmap_kpfn(pfn);
1143-
11441142
orig_head = hpage = compound_head(p);
11451143
num_poisoned_pages_inc();
11461144

0 commit comments

Comments
 (0)