Skip to content

Commit 8990cac

Browse files
Pavel TatashinKAGA-KOKO
authored andcommitted
x86/jump_label: Initialize static branching early
Static branching is useful to runtime patch branches that are used in hot path, but are infrequently changed. The x86 clock framework is one example that uses static branches to setup the best clock during boot and never changes it again. It is desired to enable the TSC based sched clock early to allow fine grained boot time analysis early on. That requires the static branching functionality to be functional early as well. Static branching requires patching nop instructions, thus, arch_init_ideal_nops() must be called prior to jump_label_init(). Do all the necessary steps to call arch_init_ideal_nops() right after early_cpu_init(), which also allows to insert a call to jump_label_init() right after that. jump_label_init() will be called again from the generic init code, but the code is protected against reinitialization already. [ tglx: Massaged changelog ] Suggested-by: Peter Zijlstra <[email protected]> Signed-off-by: Pavel Tatashin <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected]
1 parent 6fffacb commit 8990cac

File tree

3 files changed

+30
-25
lines changed

3 files changed

+30
-25
lines changed

arch/x86/kernel/cpu/amd.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -232,8 +232,6 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
232232
}
233233
}
234234

235-
set_cpu_cap(c, X86_FEATURE_K7);
236-
237235
/* calling is from identify_secondary_cpu() ? */
238236
if (!c->cpu_index)
239237
return;
@@ -617,6 +615,14 @@ static void early_init_amd(struct cpuinfo_x86 *c)
617615

618616
early_init_amd_mc(c);
619617

618+
#ifdef CONFIG_X86_32
619+
if (c->x86 == 6)
620+
set_cpu_cap(c, X86_FEATURE_K7);
621+
#endif
622+
623+
if (c->x86 >= 0xf)
624+
set_cpu_cap(c, X86_FEATURE_K8);
625+
620626
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
621627

622628
/*
@@ -863,9 +869,6 @@ static void init_amd(struct cpuinfo_x86 *c)
863869

864870
init_amd_cacheinfo(c);
865871

866-
if (c->x86 >= 0xf)
867-
set_cpu_cap(c, X86_FEATURE_K8);
868-
869872
if (cpu_has(c, X86_FEATURE_XMM2)) {
870873
unsigned long long val;
871874
int ret;

arch/x86/kernel/cpu/common.c

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1015,6 +1015,24 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
10151015
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
10161016
}
10171017

1018+
/*
1019+
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1020+
* unfortunately, that's not true in practice because of early VIA
1021+
* chips and (more importantly) broken virtualizers that are not easy
1022+
* to detect. In the latter case it doesn't even *fail* reliably, so
1023+
* probing for it doesn't even work. Disable it completely on 32-bit
1024+
* unless we can find a reliable way to detect all the broken cases.
1025+
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1026+
*/
1027+
static void detect_nopl(struct cpuinfo_x86 *c)
1028+
{
1029+
#ifdef CONFIG_X86_32
1030+
clear_cpu_cap(c, X86_FEATURE_NOPL);
1031+
#else
1032+
set_cpu_cap(c, X86_FEATURE_NOPL);
1033+
#endif
1034+
}
1035+
10181036
/*
10191037
* Do minimum CPU detection early.
10201038
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -1089,6 +1107,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
10891107
*/
10901108
if (!pgtable_l5_enabled())
10911109
setup_clear_cpu_cap(X86_FEATURE_LA57);
1110+
1111+
detect_nopl(c);
10921112
}
10931113

10941114
void __init early_cpu_init(void)
@@ -1124,24 +1144,6 @@ void __init early_cpu_init(void)
11241144
early_identify_cpu(&boot_cpu_data);
11251145
}
11261146

1127-
/*
1128-
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1129-
* unfortunately, that's not true in practice because of early VIA
1130-
* chips and (more importantly) broken virtualizers that are not easy
1131-
* to detect. In the latter case it doesn't even *fail* reliably, so
1132-
* probing for it doesn't even work. Disable it completely on 32-bit
1133-
* unless we can find a reliable way to detect all the broken cases.
1134-
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1135-
*/
1136-
static void detect_nopl(struct cpuinfo_x86 *c)
1137-
{
1138-
#ifdef CONFIG_X86_32
1139-
clear_cpu_cap(c, X86_FEATURE_NOPL);
1140-
#else
1141-
set_cpu_cap(c, X86_FEATURE_NOPL);
1142-
#endif
1143-
}
1144-
11451147
static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
11461148
{
11471149
#ifdef CONFIG_X86_64

arch/x86/kernel/setup.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -866,6 +866,8 @@ void __init setup_arch(char **cmdline_p)
866866

867867
idt_setup_early_traps();
868868
early_cpu_init();
869+
arch_init_ideal_nops();
870+
jump_label_init();
869871
early_ioremap_init();
870872

871873
setup_olpc_ofw_pgd();
@@ -1268,8 +1270,6 @@ void __init setup_arch(char **cmdline_p)
12681270

12691271
mcheck_init();
12701272

1271-
arch_init_ideal_nops();
1272-
12731273
register_refined_jiffies(CLOCK_TICK_RATE);
12741274

12751275
#ifdef CONFIG_EFI

0 commit comments

Comments
 (0)