-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[hwasan] Add testcase for check-memaccess when using a fixed shadow #89328
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
The test case is based on hwasan-check-memaccess.ll (albeit updated using update_llc_test_checks) with --hwasan-mapping-offset=... --hwasan-mapping-offset=... actually doesn't affect the LLVM IR at the moment; future work will introduce memaccess fixed shadow intrinsics. (llvm#89319)
@llvm/pr-subscribers-backend-aarch64 Author: Thurston Dang (thurstond) ChangesThe test case is based on hwasan-check-memaccess.ll (albeit updated using update_llc_test_checks) with --hwasan-mapping-offset=... --hwasan-mapping-offset=... actually doesn't affect the LLVM IR at the moment; future work will introduce memaccess fixed shadow intrinsics. (#89319) Full diff: https://github.com/llvm/llvm-project/pull/89328.diff 1 Files Affected:
diff --git a/llvm/test/CodeGen/AArch64/hwasan-check-memaccess-fixedshadow.ll b/llvm/test/CodeGen/AArch64/hwasan-check-memaccess-fixedshadow.ll
new file mode 100644
index 00000000000000..e0be883b72c65a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/hwasan-check-memaccess-fixedshadow.ll
@@ -0,0 +1,165 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc --hwasan-mapping-offset=4398046511104 < %s | FileCheck %s
+
+target triple = "aarch64--linux-android"
+
+define ptr @f1(ptr %x0, ptr %x1) {
+; CHECK-LABEL: f1:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: mov x9, x0
+; CHECK-NEXT: mov x0, x1
+; CHECK-NEXT: bl __hwasan_check_x1_1
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ call void @llvm.hwasan.check.memaccess(ptr %x0, ptr %x1, i32 1)
+ ret ptr %x1
+}
+
+define ptr @f2(ptr %x0, ptr %x1) {
+; CHECK-LABEL: f2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x30, x20, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w20, -8
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: mov x20, x1
+; CHECK-NEXT: bl __hwasan_check_x0_2_short_v2
+; CHECK-NEXT: ldp x30, x20, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ call void @llvm.hwasan.check.memaccess.shortgranules(ptr %x1, ptr %x0, i32 2)
+ ret ptr %x0
+}
+
+define void @f3(ptr %x0, ptr %x1) {
+ ; 0x3ff0000 (kernel, match-all = 0xff)
+; CHECK-LABEL: f3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: mov x9, x0
+; CHECK-NEXT: bl __hwasan_check_x1_67043328
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ call void @llvm.hwasan.check.memaccess(ptr %x0, ptr %x1, i32 67043328)
+ ret void
+}
+
+define void @f4(ptr %x0, ptr %x1) {
+ ; 0x1000010 (access-size-index = 0, is-write = 1, match-all = 0x0)
+; CHECK-LABEL: f4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: stp x30, x20, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w20, -8
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: mov x20, x0
+; CHECK-NEXT: bl __hwasan_check_x1_16777232_short_v2
+; CHECK-NEXT: ldp x30, x20, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+ call void @llvm.hwasan.check.memaccess.shortgranules(ptr %x0, ptr %x1, i32 16777232)
+ ret void
+}
+
+declare void @llvm.hwasan.check.memaccess(ptr, ptr, i32)
+declare void @llvm.hwasan.check.memaccess.shortgranules(ptr, ptr, i32)
+
+; CHECK: .section .text.hot,"axG",@progbits,__hwasan_check_x0_2_short_v2,comdat
+; CHECK-NEXT: .type __hwasan_check_x0_2_short_v2,@function
+; CHECK-NEXT: .weak __hwasan_check_x0_2_short_v2
+; CHECK-NEXT: .hidden __hwasan_check_x0_2_short_v2
+; CHECK-NEXT: __hwasan_check_x0_2_short_v2:
+; CHECK-NEXT: sbfx x16, x0, #4, #52
+; CHECK-NEXT: ldrb w16, [x20, x16]
+; CHECK-NEXT: cmp x16, x0, lsr #56
+; CHECK-NEXT: b.ne .Ltmp0
+; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: ret
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: cmp w16, #15
+; CHECK-NEXT: b.hi .Ltmp2
+; CHECK-NEXT: and x17, x0, #0xf
+; CHECK-NEXT: add x17, x17, #3
+; CHECK-NEXT: cmp w16, w17
+; CHECK-NEXT: b.ls .Ltmp2
+; CHECK-NEXT: orr x16, x0, #0xf
+; CHECK-NEXT: ldrb w16, [x16]
+; CHECK-NEXT: cmp x16, x0, lsr #56
+; CHECK-NEXT: b.eq .Ltmp1
+; CHECK-NEXT: .Ltmp2:
+; CHECK-NEXT: stp x0, x1, [sp, #-256]!
+; CHECK-NEXT: stp x29, x30, [sp, #232]
+; CHECK-NEXT: mov x1, #2
+; CHECK-NEXT: adrp x16, :got:__hwasan_tag_mismatch_v2
+; CHECK-NEXT: ldr x16, [x16, :got_lo12:__hwasan_tag_mismatch_v2]
+; CHECK-NEXT: br x16
+
+
+; CHECK: .section .text.hot,"axG",@progbits,__hwasan_check_x1_1,comdat
+; CHECK-NEXT: .type __hwasan_check_x1_1,@function
+; CHECK-NEXT: .weak __hwasan_check_x1_1
+; CHECK-NEXT: .hidden __hwasan_check_x1_1
+; CHECK-NEXT: __hwasan_check_x1_1:
+; CHECK-NEXT: sbfx x16, x1, #4, #52
+; CHECK-NEXT: ldrb w16, [x9, x16]
+; CHECK-NEXT: cmp x16, x1, lsr #56
+; CHECK-NEXT: b.ne .Ltmp3
+; CHECK-NEXT: .Ltmp4:
+; CHECK-NEXT: ret
+; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: stp x0, x1, [sp, #-256]!
+; CHECK-NEXT: stp x29, x30, [sp, #232]
+; CHECK-NEXT: mov x0, x1
+; CHECK-NEXT: mov x1, #1
+; CHECK-NEXT: adrp x16, :got:__hwasan_tag_mismatch
+; CHECK-NEXT: ldr x16, [x16, :got_lo12:__hwasan_tag_mismatch]
+; CHECK-NEXT: br x16
+
+; CHECK: __hwasan_check_x1_67043328:
+; CHECK-NEXT: sbfx x16, x1, #4, #52
+; CHECK-NEXT: ldrb w16, [x9, x16]
+; CHECK-NEXT: cmp x16, x1, lsr #56
+; CHECK-NEXT: b.ne .Ltmp5
+; CHECK-NEXT: .Ltmp6:
+; CHECK-NEXT: ret
+; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: lsr x17, x1, #56
+; CHECK-NEXT: cmp x17, #255
+; CHECK-NEXT: b.eq .Ltmp6
+; CHECK-NEXT: stp x0, x1, [sp, #-256]!
+; CHECK-NEXT: stp x29, x30, [sp, #232]
+; CHECK-NEXT: mov x0, x1
+; CHECK-NEXT: mov x1, #0
+; CHECK-NEXT: b __hwasan_tag_mismatch
+
+; CHECK: __hwasan_check_x1_16777232_short_v2:
+; CHECK-NEXT: sbfx x16, x1, #4, #52
+; CHECK-NEXT: ldrb w16, [x20, x16]
+; CHECK-NEXT: cmp x16, x1, lsr #56
+; CHECK-NEXT: b.ne .Ltmp7
+; CHECK-NEXT: .Ltmp8:
+; CHECK-NEXT: ret
+; CHECK-NEXT: .Ltmp7:
+; CHECK-NEXT: lsr x17, x1, #56
+; CHECK-NEXT: cmp x17, #0
+; CHECK-NEXT: b.eq .Ltmp8
+; CHECK-NEXT: cmp w16, #15
+; CHECK-NEXT: b.hi .Ltmp9
+; CHECK-NEXT: and x17, x1, #0xf
+; CHECK-NEXT: cmp w16, w17
+; CHECK-NEXT: b.ls .Ltmp9
+; CHECK-NEXT: orr x16, x1, #0xf
+; CHECK-NEXT: ldrb w16, [x16]
+; CHECK-NEXT: cmp x16, x1, lsr #56
+; CHECK-NEXT: b.eq .Ltmp8
+; CHECK-NEXT: .Ltmp9:
+; CHECK-NEXT: stp x0, x1, [sp, #-256]!
+; CHECK-NEXT: stp x29, x30, [sp, #232]
+; CHECK-NEXT: mov x0, x1
+; CHECK-NEXT: mov x1, #16
+; CHECK-NEXT: adrp x16, :got:__hwasan_tag_mismatch_v2
+; CHECK-NEXT: ldr x16, [x16, :got_lo12:__hwasan_tag_mismatch_v2]
+; CHECK-NEXT: br x16
|
The test case is based on hwasan-check-memaccess.ll (albeit updated using update_llc_test_checks) with --hwasan-mapping-offset=...
--hwasan-mapping-offset=... actually doesn't affect the LLVM IR at the moment; future work will introduce memaccess fixed shadow intrinsics. (#89319)