Skip to content

Commit 10b93a5

Browse files
committed
[AArch64] Make speculation-hardening-sls.ll x16 test more robust
As suggested in D110830, this copies the Arm backend method of testing function calls through specific registers, using inline assembly to force the variable into x16 to check that the __llvm_slsblr_thunk calls do not use a register that may be clobbered by the linker. Differential Revision: https://reviews.llvm.org/D111056
1 parent 5f65ee2 commit 10b93a5

File tree

1 file changed

+7
-14
lines changed

1 file changed

+7
-14
lines changed

llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -146,26 +146,19 @@ entry:
146146

147147
; Verify that neither x16 nor x17 are used when the BLR mitigation is enabled,
148148
; as a linker is allowed to clobber x16 or x17 on calls, which would break the
149-
; correct execution of the code sequence produced by the mitigation.
150-
; The below test carefully increases register pressure to persuade code
151-
; generation to produce a BLR x16. Yes, that is a bit fragile.
152-
define i64 @check_x16(i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp2) "target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x30,+reserve-x9" {
149+
; correct execution of the code sequence produced by the mitigation. The below
150+
; test attempts to force *%f into x16 using inline assembly.
151+
define i64 @check_x16(i64 ()** nocapture readonly %fp, i64 ()** nocapture readonly %fp2) "target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x30,+reserve-x9" {
153152
entry:
154153
; CHECK-LABEL: check_x16:
155-
%0 = load i64 (i8*, i64, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp, align 8
156-
%1 = bitcast i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp2 to i8**
157-
%2 = load i8*, i8** %1, align 8
158-
%call = call i64 %0(i8* %2, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
159-
%3 = load i64 (i8*, i64, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp2, align 8
160-
%4 = bitcast i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp to i8**
161-
%5 = load i8*, i8** %4, align 8;, !tbaa !2
162-
%call1 = call i64 %3(i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
154+
%f = load i64 ()*, i64 ()** %fp, align 8
155+
%x16_f = tail call i64 ()* asm "add $0, $1, #0", "={x16},{x16}"(i64 ()* %f) nounwind
156+
%call1 = call i64 %x16_f()
163157
; NOHARDEN: blr x16
164158
; ISBDSB-NOT: bl __llvm_slsblr_thunk_x16
165159
; SB-NOT: bl __llvm_slsblr_thunk_x16
166160
; CHECK
167-
%add = add nsw i64 %call1, %call
168-
ret i64 %add
161+
ret i64 %call1
169162
; CHECK: .Lfunc_end
170163
}
171164

0 commit comments

Comments
 (0)