Skip to content

Commit e7c25c4

Browse files
Peter Zijlstrabp3tk0v
authored andcommitted
x86/cpu: Cleanup the untrain mess
Since there can only be one active return_thunk, there only needs be one (matching) untrain_ret. It fundamentally doesn't make sense to allow multiple untrain_ret at the same time. Fold all the 3 different untrain methods into a single (temporary) helper stub. Fixes: fb3bd91 ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Borislav Petkov (AMD) <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 42be649 commit e7c25c4

File tree

3 files changed

+13
-14
lines changed

3 files changed

+13
-14
lines changed

arch/x86/include/asm/nospec-branch.h

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -272,9 +272,9 @@
272272
.endm
273273

274274
#ifdef CONFIG_CPU_UNRET_ENTRY
275-
#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret"
275+
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
276276
#else
277-
#define CALL_ZEN_UNTRAIN_RET ""
277+
#define CALL_UNTRAIN_RET ""
278278
#endif
279279

280280
/*
@@ -293,31 +293,21 @@
293293
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
294294
VALIDATE_UNRET_END
295295
ALTERNATIVE_3 "", \
296-
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
296+
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
297297
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
298298
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
299299
#endif
300-
301-
#ifdef CONFIG_CPU_SRSO
302-
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
303-
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
304-
#endif
305300
.endm
306301

307302
.macro UNTRAIN_RET_FROM_CALL
308303
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
309304
defined(CONFIG_CALL_DEPTH_TRACKING)
310305
VALIDATE_UNRET_END
311306
ALTERNATIVE_3 "", \
312-
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
307+
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
313308
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
314309
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
315310
#endif
316-
317-
#ifdef CONFIG_CPU_SRSO
318-
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
319-
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
320-
#endif
321311
.endm
322312

323313

@@ -355,6 +345,7 @@ extern void retbleed_untrain_ret(void);
355345
extern void srso_untrain_ret(void);
356346
extern void srso_alias_untrain_ret(void);
357347

348+
extern void entry_untrain_ret(void);
358349
extern void entry_ibpb(void);
359350

360351
extern void (*x86_return_thunk)(void);

arch/x86/kernel/cpu/bugs.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2460,6 +2460,7 @@ static void __init srso_select_mitigation(void)
24602460
* like ftrace, static_call, etc.
24612461
*/
24622462
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
2463+
setup_force_cpu_cap(X86_FEATURE_UNRET);
24632464

24642465
if (boot_cpu_data.x86 == 0x19) {
24652466
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);

arch/x86/lib/retpoline.S

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,13 @@ SYM_CODE_START(srso_return_thunk)
289289
ud2
290290
SYM_CODE_END(srso_return_thunk)
291291

292+
SYM_FUNC_START(entry_untrain_ret)
293+
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
294+
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
295+
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
296+
SYM_FUNC_END(entry_untrain_ret)
297+
__EXPORT_THUNK(entry_untrain_ret)
298+
292299
SYM_CODE_START(__x86_return_thunk)
293300
UNWIND_HINT_FUNC
294301
ANNOTATE_NOENDBR

0 commit comments

Comments
 (0)