2
2
; RUN: opt < %s -passes=msan -S | FileCheck %s
3
3
;
4
4
; Forked from llvm/test/CodeGen/AArch64/arm64-vaddlv.ll
5
- ;
6
- ; Currently handled (suboptimally) by handleUnknownInstruction:
7
- ; - llvm.aarch64.neon.saddlv
8
- ; - llvm.aarch64.neon.uaddlv
9
5
10
6
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
11
7
target triple = "aarch64--linux-android9001"
@@ -16,15 +12,10 @@ define i64 @test_vaddlv_s32(<2 x i32> %a1) nounwind readnone #0 {
16
12
; CHECK-NEXT: entry:
17
13
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
18
14
; CHECK-NEXT: call void @llvm.donothing()
19
- ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to i64
20
- ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
21
- ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF1:![0-9]+]]
22
- ; CHECK: 2:
23
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3:[0-9]+]]
24
- ; CHECK-NEXT: unreachable
25
- ; CHECK: 3:
15
+ ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
16
+ ; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
26
17
; CHECK-NEXT: [[VADDLV_I:%.*]] = tail call i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32> [[A1]]) #[[ATTR2:[0-9]+]]
27
- ; CHECK-NEXT: store i64 0 , ptr @__msan_retval_tls, align 8
18
+ ; CHECK-NEXT: store i64 [[TMP2]] , ptr @__msan_retval_tls, align 8
28
19
; CHECK-NEXT: ret i64 [[VADDLV_I]]
29
20
;
30
21
entry:
@@ -38,15 +29,10 @@ define i64 @test_vaddlv_u32(<2 x i32> %a1) nounwind readnone #0 {
38
29
; CHECK-NEXT: entry:
39
30
; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
40
31
; CHECK-NEXT: call void @llvm.donothing()
41
- ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to i64
42
- ; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
43
- ; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF1]]
44
- ; CHECK: 2:
45
- ; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR3]]
46
- ; CHECK-NEXT: unreachable
47
- ; CHECK: 3:
32
+ ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> [[TMP0]])
33
+ ; CHECK-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
48
34
; CHECK-NEXT: [[VADDLV_I:%.*]] = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32> [[A1]]) #[[ATTR2]]
49
- ; CHECK-NEXT: store i64 0 , ptr @__msan_retval_tls, align 8
35
+ ; CHECK-NEXT: store i64 [[TMP2]] , ptr @__msan_retval_tls, align 8
50
36
; CHECK-NEXT: ret i64 [[VADDLV_I]]
51
37
;
52
38
entry:
@@ -59,6 +45,3 @@ declare i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32>) nounwind readnone
59
45
declare i64 @llvm.aarch64.neon.saddlv.i64.v2i32 (<2 x i32 >) nounwind readnone
60
46
61
47
attributes #0 = { sanitize_memory }
62
- ;.
63
- ; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
64
- ;.
0 commit comments