Skip to content

Commit 582077c

Browse files
author
Peter Zijlstra
committed
x86/cfi: Clean up linkage
With the introduction of kCFI the addition of ENDBR to SYM_FUNC_START* no longer suffices to make the function indirectly callable. This now requires the use of SYM_TYPED_FUNC_START. As such, remove the implicit ENDBR from SYM_FUNC_START* and add some explicit annotations to fix things up again. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Sami Tolvanen <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 2981557 commit 582077c

29 files changed

+103
-23
lines changed

arch/x86/crypto/aesni-intel_asm.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
*/
1818

1919
#include <linux/linkage.h>
20+
#include <linux/objtool.h>
2021
#include <asm/frame.h>
2122

2223
#define STATE1 %xmm0
@@ -1071,6 +1072,7 @@ SYM_FUNC_END(_aesni_inc)
10711072
* size_t len, u8 *iv)
10721073
*/
10731074
SYM_FUNC_START(aesni_ctr_enc)
1075+
ANNOTATE_NOENDBR
10741076
FRAME_BEGIN
10751077
cmp $16, LEN
10761078
jb .Lctr_enc_just_ret

arch/x86/entry/calling.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -431,6 +431,7 @@ For 32-bit we have the following conventions - kernel is built with
431431
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
432432
.macro THUNK name, func
433433
SYM_FUNC_START(\name)
434+
ANNOTATE_NOENDBR
434435
pushq %rbp
435436
movq %rsp, %rbp
436437

arch/x86/entry/entry.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <linux/export.h>
77
#include <linux/linkage.h>
8+
#include <linux/objtool.h>
89
#include <asm/msr-index.h>
910
#include <asm/unwind_hints.h>
1011
#include <asm/segment.h>
@@ -17,6 +18,7 @@
1718
.pushsection .noinstr.text, "ax"
1819

1920
SYM_FUNC_START(entry_ibpb)
21+
ANNOTATE_NOENDBR
2022
movl $MSR_IA32_PRED_CMD, %ecx
2123
movl $PRED_CMD_IBPB, %eax
2224
xorl %edx, %edx

arch/x86/entry/entry_64.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,7 @@ SYM_CODE_END(entry_SYSCALL_64)
175175
*/
176176
.pushsection .text, "ax"
177177
SYM_FUNC_START(__switch_to_asm)
178+
ANNOTATE_NOENDBR
178179
/*
179180
* Save callee-saved registers
180181
* This must match the order in inactive_task_frame
@@ -742,6 +743,7 @@ _ASM_NOKPROBE(common_interrupt_return)
742743
* Is in entry.text as it shouldn't be instrumented.
743744
*/
744745
SYM_FUNC_START(asm_load_gs_index)
746+
ANNOTATE_NOENDBR
745747
FRAME_BEGIN
746748
swapgs
747749
.Lgs_change:
@@ -1526,6 +1528,7 @@ SYM_CODE_END(rewind_stack_and_make_dead)
15261528
* refactored in the future if needed.
15271529
*/
15281530
SYM_FUNC_START(clear_bhb_loop)
1531+
ANNOTATE_NOENDBR
15291532
push %rbp
15301533
mov %rsp, %rbp
15311534
movl $5, %ecx

arch/x86/entry/entry_64_fred.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ SYM_CODE_END(asm_fred_entrypoint_kernel)
5858

5959
#if IS_ENABLED(CONFIG_KVM_INTEL)
6060
SYM_FUNC_START(asm_fred_entry_from_kvm)
61+
ANNOTATE_NOENDBR
6162
push %rbp
6263
mov %rsp, %rbp
6364

arch/x86/entry/vdso/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ KBUILD_CFLAGS_32 += -fno-stack-protector
133133
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
134134
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
135135
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
136+
KBUILD_CFLAGS_32 += -DBUILD_VDSO
136137

137138
ifdef CONFIG_MITIGATION_RETPOLINE
138139
ifneq ($(RETPOLINE_VDSO_CFLAGS),)

arch/x86/include/asm/linkage.h

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -119,33 +119,27 @@
119119

120120
/* SYM_FUNC_START -- use for global functions */
121121
#define SYM_FUNC_START(name) \
122-
SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
123-
ENDBR
122+
SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)
124123

125124
/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
126125
#define SYM_FUNC_START_NOALIGN(name) \
127-
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
128-
ENDBR
126+
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
129127

130128
/* SYM_FUNC_START_LOCAL -- use for local functions */
131129
#define SYM_FUNC_START_LOCAL(name) \
132-
SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN) \
133-
ENDBR
130+
SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN)
134131

135132
/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
136133
#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
137-
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
138-
ENDBR
134+
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
139135

140136
/* SYM_FUNC_START_WEAK -- use for weak functions */
141137
#define SYM_FUNC_START_WEAK(name) \
142-
SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN) \
143-
ENDBR
138+
SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN)
144139

145140
/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
146141
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
147-
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
148-
ENDBR
142+
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
149143

150144
#endif /* _ASM_X86_LINKAGE_H */
151145

arch/x86/include/asm/page_64.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ static inline void clear_page(void *page)
6060
}
6161

6262
void copy_page(void *to, void *from);
63+
KCFI_REFERENCE(copy_page);
6364

6465
#ifdef CONFIG_X86_5LEVEL
6566
/*

arch/x86/include/asm/paravirt_types.h

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,17 @@ extern struct paravirt_patch_template pv_ops;
244244

245245
int paravirt_disable_iospace(void);
246246

247-
/* This generates an indirect call based on the operation type number. */
247+
/*
248+
* This generates an indirect call based on the operation type number.
249+
*
250+
* Since alternatives run after enabling CET/IBT -- the latter setting/clearing
251+
* capabilities and the former requiring all capabilities being finalized --
252+
* these indirect calls are subject to IBT and the paravirt stubs should have
253+
* ENDBR on.
254+
*
255+
* OTOH since this is effectively a __nocfi indirect call, the paravirt stubs
256+
* don't need to bother with CFI prefixes.
257+
*/
248258
#define PARAVIRT_CALL \
249259
ANNOTATE_RETPOLINE_SAFE \
250260
"call *%[paravirt_opptr];"

arch/x86/include/asm/special_insns.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,14 +42,14 @@ static __always_inline void native_write_cr2(unsigned long val)
4242
asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
4343
}
4444

45-
static inline unsigned long __native_read_cr3(void)
45+
static __always_inline unsigned long __native_read_cr3(void)
4646
{
4747
unsigned long val;
4848
asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
4949
return val;
5050
}
5151

52-
static inline void native_write_cr3(unsigned long val)
52+
static __always_inline void native_write_cr3(unsigned long val)
5353
{
5454
asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
5555
}

arch/x86/include/asm/string_64.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ extern void *__memcpy(void *to, const void *from, size_t len);
2121
#define __HAVE_ARCH_MEMSET
2222
void *memset(void *s, int c, size_t n);
2323
void *__memset(void *s, int c, size_t n);
24+
KCFI_REFERENCE(__memset);
2425

2526
/*
2627
* KMSAN needs to instrument as much code as possible. Use C versions of
@@ -70,6 +71,7 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
7071
#define __HAVE_ARCH_MEMMOVE
7172
void *memmove(void *dest, const void *src, size_t count);
7273
void *__memmove(void *dest, const void *src, size_t count);
74+
KCFI_REFERENCE(__memmove);
7375

7476
int memcmp(const void *cs, const void *ct, size_t count);
7577
size_t strlen(const char *s);

arch/x86/kernel/acpi/madt_playdead.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
* rsi: PGD of the identity mapping
1515
*/
1616
SYM_FUNC_START(asm_acpi_mp_play_dead)
17+
ANNOTATE_NOENDBR
1718
/* Turn off global entries. Following CR3 write will flush them. */
1819
movq %cr4, %rdx
1920
andq $~(X86_CR4_PGE), %rdx

arch/x86/kernel/acpi/wakeup_64.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
* Hooray, we are in Long 64-bit mode (but still running in low memory)
1818
*/
1919
SYM_FUNC_START(wakeup_long64)
20+
ANNOTATE_NOENDBR
2021
movq saved_magic(%rip), %rax
2122
movq $0x123456789abcdef0, %rdx
2223
cmpq %rdx, %rax

arch/x86/kernel/alternative.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -926,11 +926,7 @@ struct bpf_insn;
926926
extern unsigned int __bpf_prog_runX(const void *ctx,
927927
const struct bpf_insn *insn);
928928

929-
/*
930-
* Force a reference to the external symbol so the compiler generates
931-
* __kcfi_typid.
932-
*/
933-
__ADDRESSABLE(__bpf_prog_runX);
929+
KCFI_REFERENCE(__bpf_prog_runX);
934930

935931
/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
936932
asm (
@@ -947,7 +943,7 @@ asm (
947943
/* Must match bpf_callback_t */
948944
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
949945

950-
__ADDRESSABLE(__bpf_callback_fn);
946+
KCFI_REFERENCE(__bpf_callback_fn);
951947

952948
/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
953949
asm (

arch/x86/kernel/ftrace_64.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,12 +146,14 @@ SYM_FUNC_END(ftrace_stub_graph)
146146
#ifdef CONFIG_DYNAMIC_FTRACE
147147

148148
SYM_FUNC_START(__fentry__)
149+
ANNOTATE_NOENDBR
149150
CALL_DEPTH_ACCOUNT
150151
RET
151152
SYM_FUNC_END(__fentry__)
152153
EXPORT_SYMBOL(__fentry__)
153154

154155
SYM_FUNC_START(ftrace_caller)
156+
ANNOTATE_NOENDBR
155157
/* save_mcount_regs fills in first two parameters */
156158
save_mcount_regs
157159

@@ -197,6 +199,7 @@ SYM_FUNC_END(ftrace_caller);
197199
STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
198200

199201
SYM_FUNC_START(ftrace_regs_caller)
202+
ANNOTATE_NOENDBR
200203
/* Save the current flags before any operations that can change them */
201204
pushfq
202205

@@ -310,13 +313,15 @@ SYM_FUNC_END(ftrace_regs_caller)
310313
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
311314

312315
SYM_FUNC_START(ftrace_stub_direct_tramp)
316+
ANNOTATE_NOENDBR
313317
CALL_DEPTH_ACCOUNT
314318
RET
315319
SYM_FUNC_END(ftrace_stub_direct_tramp)
316320

317321
#else /* ! CONFIG_DYNAMIC_FTRACE */
318322

319323
SYM_FUNC_START(__fentry__)
324+
ANNOTATE_NOENDBR
320325
CALL_DEPTH_ACCOUNT
321326

322327
cmpq $ftrace_stub, ftrace_trace_function

arch/x86/kernel/irqflags.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
*/
1010
.pushsection .noinstr.text, "ax"
1111
SYM_FUNC_START(native_save_fl)
12+
ENDBR
1213
pushf
1314
pop %_ASM_AX
1415
RET

arch/x86/kernel/paravirt.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,16 @@ static noinstr void pv_native_write_cr2(unsigned long val)
116116
native_write_cr2(val);
117117
}
118118

119+
static noinstr unsigned long pv_native_read_cr3(void)
120+
{
121+
return __native_read_cr3();
122+
}
123+
124+
static noinstr void pv_native_write_cr3(unsigned long cr3)
125+
{
126+
native_write_cr3(cr3);
127+
}
128+
119129
static noinstr unsigned long pv_native_get_debugreg(int regno)
120130
{
121131
return native_get_debugreg(regno);
@@ -203,8 +213,8 @@ struct paravirt_patch_template pv_ops = {
203213
#ifdef CONFIG_PARAVIRT_XXL
204214
.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
205215
.mmu.write_cr2 = pv_native_write_cr2,
206-
.mmu.read_cr3 = __native_read_cr3,
207-
.mmu.write_cr3 = native_write_cr3,
216+
.mmu.read_cr3 = pv_native_read_cr3,
217+
.mmu.write_cr3 = pv_native_write_cr3,
208218

209219
.mmu.pgd_alloc = __paravirt_pgd_alloc,
210220
.mmu.pgd_free = paravirt_nop,

arch/x86/lib/clear_page_64.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#include <linux/export.h>
33
#include <linux/linkage.h>
44
#include <linux/cfi_types.h>
5+
#include <linux/objtool.h>
56
#include <asm/asm.h>
67

78
/*
@@ -64,6 +65,7 @@ EXPORT_SYMBOL_GPL(clear_page_erms)
6465
* rcx: uncleared bytes or 0 if successful.
6566
*/
6667
SYM_FUNC_START(rep_stos_alternative)
68+
ANNOTATE_NOENDBR
6769
cmpq $64,%rcx
6870
jae .Lunrolled
6971

arch/x86/lib/copy_user_64.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88

99
#include <linux/export.h>
1010
#include <linux/linkage.h>
11+
#include <linux/cfi_types.h>
12+
#include <linux/objtool.h>
1113
#include <asm/cpufeatures.h>
1214
#include <asm/alternative.h>
1315
#include <asm/asm.h>
@@ -30,6 +32,7 @@
3032
* it simpler for us, we can clobber rsi/rdi and rax freely.
3133
*/
3234
SYM_FUNC_START(rep_movs_alternative)
35+
ANNOTATE_NOENDBR
3336
cmpq $64,%rcx
3437
jae .Llarge
3538

arch/x86/lib/copy_user_uncached_64.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <linux/export.h>
77
#include <linux/linkage.h>
8+
#include <linux/objtool.h>
89
#include <asm/asm.h>
910

1011
/*
@@ -27,6 +28,7 @@
2728
* rax uncopied bytes or 0 if successful.
2829
*/
2930
SYM_FUNC_START(__copy_user_nocache)
31+
ANNOTATE_NOENDBR
3032
/* If destination is not 7-byte aligned, we'll have to align it */
3133
testb $7,%dil
3234
jne .Lalign

0 commit comments

Comments
 (0)