Skip to content

Commit 963172d

Browse files
committed
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "The accumulated fixes from this and last week: - Fix vmalloc TLB flush and map range calculations which lead to stale TLBs, spurious faults and other hard to diagnose issues. - Use fault_in_pages_writable() for prefaulting the user stack in the FPU code as it's less fragile than the current solution - Use the PF_KTHREAD flag when checking for a kernel thread instead of current->mm as the latter can give the wrong answer due to use_mm() - Compute the vmemmap size correctly for KASLR and 5-Level paging. Otherwise this can end up with a way too small vmemmap area. - Make KASAN and 5-level paging work again by making sure that all invalid bits are masked out when computing the P4D offset. This worked before but got broken recently when the LDT remap area was moved. - Prevent a NULL pointer dereference in the resource control code which can be triggered with certain mount options when the requested resource is not available. - Enforce ordering of microcode loading vs. perf initialization on secondary CPUs. Otherwise perf tries to access a non-existing MSR as the boot CPU marked it as available. - Don't stop the resource control group walk early otherwise the control bitmaps are not updated correctly and become inconsistent. - Unbreak kgdb by returning 0 on success from kgdb_arch_set_breakpoint() instead of an error code. - Add more Icelake CPU model defines so depending changes can be queued in other trees" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/microcode, cpuhotplug: Add a microcode loader CPU hotplug callback x86/kasan: Fix boot with 5-level paging and KASAN x86/fpu: Don't use current->mm to check for a kthread x86/kgdb: Return 0 from kgdb_arch_set_breakpoint() x86/resctrl: Prevent NULL pointer dereference when local MBM is disabled x86/resctrl: Don't stop walking closids when a locksetup group is found x86/fpu: Update kernel's FPU state before using for the fsave header x86/mm/KASLR: Compute the size of the vmemmap section properly x86/fpu: Use fault_in_pages_writeable() for pre-faulting x86/CPU: Add more Icelake model numbers mm/vmalloc: Avoid rare case of flushing TLB with weird arguments mm/vmalloc: Fix calculation of direct map addr range
2 parents efba92d + 78f4e93 commit 963172d

File tree

12 files changed

+45
-24
lines changed

12 files changed

+45
-24
lines changed

arch/x86/include/asm/fpu/internal.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -536,7 +536,7 @@ static inline void __fpregs_load_activate(void)
536536
struct fpu *fpu = &current->thread.fpu;
537537
int cpu = smp_processor_id();
538538

539-
if (WARN_ON_ONCE(current->mm == NULL))
539+
if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
540540
return;
541541

542542
if (!fpregs_state_valid(fpu, cpu)) {
@@ -567,11 +567,11 @@ static inline void __fpregs_load_activate(void)
567567
* otherwise.
568568
*
569569
* The FPU context is only stored/restored for a user task and
570-
* ->mm is used to distinguish between kernel and user threads.
570+
* PF_KTHREAD is used to distinguish between kernel and user threads.
571571
*/
572572
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
573573
{
574-
if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
574+
if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
575575
if (!copy_fpregs_to_fpstate(old_fpu))
576576
old_fpu->last_cpu = -1;
577577
else

arch/x86/include/asm/intel-family.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@
5252

5353
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
5454

55+
#define INTEL_FAM6_ICELAKE_X 0x6A
56+
#define INTEL_FAM6_ICELAKE_XEON_D 0x6C
57+
#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
5558
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
5659

5760
/* "Small Core" Processors (Atom) */

arch/x86/kernel/cpu/microcode/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -872,7 +872,7 @@ int __init microcode_init(void)
872872
goto out_ucode_group;
873873

874874
register_syscore_ops(&mc_syscore_ops);
875-
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
875+
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
876876
mc_cpu_online, mc_cpu_down_prep);
877877

878878
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);

arch/x86/kernel/cpu/resctrl/monitor.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -360,6 +360,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
360360
struct list_head *head;
361361
struct rdtgroup *entry;
362362

363+
if (!is_mbm_local_enabled())
364+
return;
365+
363366
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
364367
closid = rgrp->closid;
365368
rmid = rgrp->mon.rmid;

arch/x86/kernel/cpu/resctrl/rdtgroup.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2534,7 +2534,12 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
25342534
if (closid_allocated(i) && i != closid) {
25352535
mode = rdtgroup_mode_by_closid(i);
25362536
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2537-
break;
2537+
/*
2538+
* ctrl values for locksetup aren't relevant
2539+
* until the schemata is written, and the mode
2540+
* becomes RDT_MODE_PSEUDO_LOCKED.
2541+
*/
2542+
continue;
25382543
/*
25392544
* If CDP is active include peer domain's
25402545
* usage to ensure there is no overlap

arch/x86/kernel/fpu/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ static void __kernel_fpu_begin(void)
102102

103103
kernel_fpu_disable();
104104

105-
if (current->mm) {
105+
if (!(current->flags & PF_KTHREAD)) {
106106
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
107107
set_thread_flag(TIF_NEED_FPU_LOAD);
108108
/*

arch/x86/kernel/fpu/signal.c

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <linux/compat.h>
77
#include <linux/cpu.h>
8+
#include <linux/pagemap.h>
89

910
#include <asm/fpu/internal.h>
1011
#include <asm/fpu/signal.h>
@@ -61,6 +62,11 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
6162
struct user_i387_ia32_struct env;
6263
struct _fpstate_32 __user *fp = buf;
6364

65+
fpregs_lock();
66+
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
67+
copy_fxregs_to_kernel(&tsk->thread.fpu);
68+
fpregs_unlock();
69+
6470
convert_from_fxsr(&env, tsk);
6571

6672
if (__copy_to_user(buf, &env, sizeof(env)) ||
@@ -189,15 +195,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
189195
fpregs_unlock();
190196

191197
if (ret) {
192-
int aligned_size;
193-
int nr_pages;
194-
195-
aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
196-
nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
197-
198-
ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
199-
NULL, FOLL_WRITE);
200-
if (ret == nr_pages)
198+
if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
201199
goto retry;
202200
return -EFAULT;
203201
}

arch/x86/kernel/kgdb.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -758,7 +758,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
758758
BREAK_INSTR_SIZE);
759759
bpt->type = BP_POKE_BREAKPOINT;
760760

761-
return err;
761+
return 0;
762762
}
763763

764764
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)

arch/x86/mm/kasan_init_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
199199
if (!pgtable_l5_enabled())
200200
return (p4d_t *)pgd;
201201

202-
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
202+
p4d = pgd_val(*pgd) & PTE_PFN_MASK;
203203
p4d += __START_KERNEL_map - phys_base;
204204
return (p4d_t *)p4d + p4d_index(addr);
205205
}

arch/x86/mm/kaslr.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
5252
} kaslr_regions[] = {
5353
{ &page_offset_base, 0 },
5454
{ &vmalloc_base, 0 },
55-
{ &vmemmap_base, 1 },
55+
{ &vmemmap_base, 0 },
5656
};
5757

5858
/* Get size in bytes used by the memory region */
@@ -78,6 +78,7 @@ void __init kernel_randomize_memory(void)
7878
unsigned long rand, memory_tb;
7979
struct rnd_state rand_state;
8080
unsigned long remain_entropy;
81+
unsigned long vmemmap_size;
8182

8283
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
8384
vaddr = vaddr_start;
@@ -109,6 +110,14 @@ void __init kernel_randomize_memory(void)
109110
if (memory_tb < kaslr_regions[0].size_tb)
110111
kaslr_regions[0].size_tb = memory_tb;
111112

113+
/*
114+
* Calculate the vmemmap region size in TBs, aligned to a TB
115+
* boundary.
116+
*/
117+
vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
118+
sizeof(struct page);
119+
kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
120+
112121
/* Calculate entropy available between regions */
113122
remain_entropy = vaddr_end - vaddr_start;
114123
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)

include/linux/cpuhotplug.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ enum cpuhp_state {
101101
CPUHP_AP_IRQ_BCM2836_STARTING,
102102
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
103103
CPUHP_AP_ARM_MVEBU_COHERENCY,
104+
CPUHP_AP_MICROCODE_LOADER,
104105
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
105106
CPUHP_AP_PERF_X86_STARTING,
106107
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,

mm/vmalloc.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2123,9 +2123,9 @@ static inline void set_area_direct_map(const struct vm_struct *area,
21232123
/* Handle removing and resetting vm mappings related to the vm_struct. */
21242124
static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
21252125
{
2126-
unsigned long addr = (unsigned long)area->addr;
21272126
unsigned long start = ULONG_MAX, end = 0;
21282127
int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2128+
int flush_dmap = 0;
21292129
int i;
21302130

21312131
/*
@@ -2135,8 +2135,8 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
21352135
* execute permissions, without leaving a RW+X window.
21362136
*/
21372137
if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
2138-
set_memory_nx(addr, area->nr_pages);
2139-
set_memory_rw(addr, area->nr_pages);
2138+
set_memory_nx((unsigned long)area->addr, area->nr_pages);
2139+
set_memory_rw((unsigned long)area->addr, area->nr_pages);
21402140
}
21412141

21422142
remove_vm_area(area->addr);
@@ -2160,9 +2160,11 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
21602160
* the vm_unmap_aliases() flush includes the direct map.
21612161
*/
21622162
for (i = 0; i < area->nr_pages; i++) {
2163-
if (page_address(area->pages[i])) {
2163+
unsigned long addr = (unsigned long)page_address(area->pages[i]);
2164+
if (addr) {
21642165
start = min(addr, start);
2165-
end = max(addr, end);
2166+
end = max(addr + PAGE_SIZE, end);
2167+
flush_dmap = 1;
21662168
}
21672169
}
21682170

@@ -2172,7 +2174,7 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
21722174
* reset the direct map permissions to the default.
21732175
*/
21742176
set_area_direct_map(area, set_direct_map_invalid_noflush);
2175-
_vm_unmap_aliases(start, end, 1);
2177+
_vm_unmap_aliases(start, end, flush_dmap);
21762178
set_area_direct_map(area, set_direct_map_default_noflush);
21772179
}
21782180

0 commit comments

Comments
 (0)