Skip to content

Commit dbaba47

Browse files
calmisiKAGA-KOKO
authored andcommitted
x86/split_lock: Rework the initialization flow of split lock detection
Current initialization flow of split lock detection has following issues: 1. It assumes the initial value of MSR_TEST_CTRL.SPLIT_LOCK_DETECT to be zero. However, it's possible that BIOS/firmware has set it. 2. X86_FEATURE_SPLIT_LOCK_DETECT flag is unconditionally set even if there is a virtualization flaw that FMS indicates the existence while it's actually not supported. Rework the initialization flow to solve above issues. In detail, explicitly clear and set split_lock_detect bit to verify MSR_TEST_CTRL can be accessed, and rdmsr after wrmsr to ensure bit is cleared/set successfully. X86_FEATURE_SPLIT_LOCK_DETECT flag is set only when the feature does exist and the feature is not disabled with kernel param "split_lock_detect=off" On each processor, explicitly updating the SPLIT_LOCK_DETECT bit based on sld_sate in split_lock_init() since BIOS/firmware may touch it. Originally-by: Thomas Gleixner <[email protected]> Signed-off-by: Xiaoyao Li <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 6650cdd commit dbaba47

File tree

1 file changed

+42
-33
lines changed

1 file changed

+42
-33
lines changed

arch/x86/kernel/cpu/intel.c

Lines changed: 42 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ enum split_lock_detect_state {
4444
* split_lock_setup() will switch this to sld_warn on systems that support
4545
* split lock detect, unless there is a command line override.
4646
*/
47-
static enum split_lock_detect_state sld_state = sld_off;
47+
static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
4848

4949
/*
5050
* Processors which have self-snooping capability can handle conflicting
@@ -984,78 +984,87 @@ static inline bool match_option(const char *arg, int arglen, const char *opt)
984984
return len == arglen && !strncmp(arg, opt, len);
985985
}
986986

987+
static bool split_lock_verify_msr(bool on)
988+
{
989+
u64 ctrl, tmp;
990+
991+
if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
992+
return false;
993+
if (on)
994+
ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
995+
else
996+
ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
997+
if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
998+
return false;
999+
rdmsrl(MSR_TEST_CTRL, tmp);
1000+
return ctrl == tmp;
1001+
}
1002+
9871003
static void __init split_lock_setup(void)
9881004
{
1005+
enum split_lock_detect_state state = sld_warn;
9891006
char arg[20];
9901007
int i, ret;
9911008

992-
setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
993-
sld_state = sld_warn;
1009+
if (!split_lock_verify_msr(false)) {
1010+
pr_info("MSR access failed: Disabled\n");
1011+
return;
1012+
}
9941013

9951014
ret = cmdline_find_option(boot_command_line, "split_lock_detect",
9961015
arg, sizeof(arg));
9971016
if (ret >= 0) {
9981017
for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
9991018
if (match_option(arg, ret, sld_options[i].option)) {
1000-
sld_state = sld_options[i].state;
1019+
state = sld_options[i].state;
10011020
break;
10021021
}
10031022
}
10041023
}
10051024

1006-
switch (sld_state) {
1025+
switch (state) {
10071026
case sld_off:
10081027
pr_info("disabled\n");
1009-
break;
1010-
1028+
return;
10111029
case sld_warn:
10121030
pr_info("warning about user-space split_locks\n");
10131031
break;
1014-
10151032
case sld_fatal:
10161033
pr_info("sending SIGBUS on user-space split_locks\n");
10171034
break;
10181035
}
1036+
1037+
if (!split_lock_verify_msr(true)) {
1038+
pr_info("MSR access failed: Disabled\n");
1039+
return;
1040+
}
1041+
1042+
sld_state = state;
1043+
setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
10191044
}
10201045

10211046
/*
1022-
* Locking is not required at the moment because only bit 29 of this
1023-
* MSR is implemented and locking would not prevent that the operation
1024-
* of one thread is immediately undone by the sibling thread.
1025-
* Use the "safe" versions of rdmsr/wrmsr here because although code
1026-
* checks CPUID and MSR bits to make sure the TEST_CTRL MSR should
1027-
* exist, there may be glitches in virtualization that leave a guest
1028-
* with an incorrect view of real h/w capabilities.
1047+
* MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1048+
* is not implemented as one thread could undo the setting of the other
1049+
* thread immediately after dropping the lock anyway.
10291050
*/
1030-
static bool __sld_msr_set(bool on)
1051+
static void sld_update_msr(bool on)
10311052
{
10321053
u64 test_ctrl_val;
10331054

1034-
if (rdmsrl_safe(MSR_TEST_CTRL, &test_ctrl_val))
1035-
return false;
1055+
rdmsrl(MSR_TEST_CTRL, test_ctrl_val);
10361056

10371057
if (on)
10381058
test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
10391059
else
10401060
test_ctrl_val &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
10411061

1042-
return !wrmsrl_safe(MSR_TEST_CTRL, test_ctrl_val);
1062+
wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
10431063
}
10441064

10451065
static void split_lock_init(void)
10461066
{
1047-
if (sld_state == sld_off)
1048-
return;
1049-
1050-
if (__sld_msr_set(true))
1051-
return;
1052-
1053-
/*
1054-
* If this is anything other than the boot-cpu, you've done
1055-
* funny things and you get to keep whatever pieces.
1056-
*/
1057-
pr_warn("MSR fail -- disabled\n");
1058-
sld_state = sld_off;
1067+
split_lock_verify_msr(sld_state != sld_off);
10591068
}
10601069

10611070
bool handle_user_split_lock(struct pt_regs *regs, long error_code)
@@ -1071,7 +1080,7 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code)
10711080
* progress and set TIF_SLD so the detection is re-enabled via
10721081
* switch_to_sld() when the task is scheduled out.
10731082
*/
1074-
__sld_msr_set(false);
1083+
sld_update_msr(false);
10751084
set_tsk_thread_flag(current, TIF_SLD);
10761085
return true;
10771086
}
@@ -1085,7 +1094,7 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code)
10851094
*/
10861095
void switch_to_sld(unsigned long tifn)
10871096
{
1088-
__sld_msr_set(!(tifn & _TIF_SLD));
1097+
sld_update_msr(!(tifn & _TIF_SLD));
10891098
}
10901099

10911100
#define SPLIT_LOCK_CPU(model) {X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY}

0 commit comments

Comments
 (0)