Skip to content

Commit 030f716

Browse files
[UpdateTestChecks] Add --filter and --filter-out options
Enhance the various update_*_test_checks.py tools to allow filtering the tool output with regular expressions. The --filter option will emit only tool output lines matching the given regular expression while the --filter-out option will emit only tools output lines not matching the given regular expression. Filters are applied in order of appearance on the command line (or in UTC_ARGS) and the first matching filter terminates the search. This allows test authors to create more focused tests by removing irrelevant tool output and checking only the pieces of output necessary to test the desired functionality. Differential Revision: https://reviews.llvm.org/D117694
1 parent e1a1276 commit 030f716

11 files changed

+462
-22
lines changed
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s
2+
3+
define i32 @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H, i32* %loadptr) nounwind {
4+
%v0 = load i32, i32* %loadptr, align 1
5+
%cast = bitcast i8* %B to <8 x float>*
6+
%A2 = fadd <8 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>
7+
store <8 x float> %A2, <8 x float>* %cast, align 32, !nontemporal !0
8+
%v1 = load i32, i32* %loadptr, align 1
9+
%cast1 = bitcast i8* %B to <4 x i64>*
10+
%E2 = add <4 x i64> %E, <i64 1, i64 2, i64 3, i64 4>
11+
store <4 x i64> %E2, <4 x i64>* %cast1, align 32, !nontemporal !0
12+
%v2 = load i32, i32* %loadptr, align 1
13+
%cast2 = bitcast i8* %B to <4 x double>*
14+
%C2 = fadd <4 x double> %C, <double 1.0, double 2.0, double 3.0, double 4.0>
15+
store <4 x double> %C2, <4 x double>* %cast2, align 32, !nontemporal !0
16+
%v3 = load i32, i32* %loadptr, align 1
17+
%cast3 = bitcast i8* %B to <8 x i32>*
18+
%F2 = add <8 x i32> %F, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
19+
store <8 x i32> %F2, <8 x i32>* %cast3, align 32, !nontemporal !0
20+
%v4 = load i32, i32* %loadptr, align 1
21+
%cast4 = bitcast i8* %B to <16 x i16>*
22+
%G2 = add <16 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
23+
store <16 x i16> %G2, <16 x i16>* %cast4, align 32, !nontemporal !0
24+
%v5 = load i32, i32* %loadptr, align 1
25+
%cast5 = bitcast i8* %B to <32 x i8>*
26+
%H2 = add <32 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
27+
store <32 x i8> %H2, <32 x i8>* %cast5, align 32, !nontemporal !0
28+
%v6 = load i32, i32* %loadptr, align 1
29+
%sum1 = add i32 %v0, %v1
30+
%sum2 = add i32 %sum1, %v2
31+
%sum3 = add i32 %sum2, %v3
32+
%sum4 = add i32 %sum3, %v4
33+
%sum5 = add i32 %sum4, %v5
34+
%sum6 = add i32 %sum5, %v6
35+
ret i32 %sum5
36+
}
37+
38+
!0 = !{i32 1}
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter-out "movnt"
2+
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s
3+
4+
define i32 @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H, i32* %loadptr) nounwind {
5+
; CHECK-LABEL: f:
6+
; CHECK: movl (%rsi), %eax
7+
; CHECK: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
8+
; CHECK: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
9+
; CHECK: addl (%rsi), %eax
10+
; CHECK: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
11+
; CHECK: addl (%rsi), %eax
12+
; CHECK: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm0
13+
; CHECK: addl (%rsi), %eax
14+
; CHECK: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm0
15+
; CHECK: addl (%rsi), %eax
16+
; CHECK: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
17+
; CHECK: addl (%rsi), %eax
18+
; CHECK: vzeroupper
19+
; CHECK: retq
20+
%v0 = load i32, i32* %loadptr, align 1
21+
%cast = bitcast i8* %B to <8 x float>*
22+
%A2 = fadd <8 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>
23+
store <8 x float> %A2, <8 x float>* %cast, align 32, !nontemporal !0
24+
%v1 = load i32, i32* %loadptr, align 1
25+
%cast1 = bitcast i8* %B to <4 x i64>*
26+
%E2 = add <4 x i64> %E, <i64 1, i64 2, i64 3, i64 4>
27+
store <4 x i64> %E2, <4 x i64>* %cast1, align 32, !nontemporal !0
28+
%v2 = load i32, i32* %loadptr, align 1
29+
%cast2 = bitcast i8* %B to <4 x double>*
30+
%C2 = fadd <4 x double> %C, <double 1.0, double 2.0, double 3.0, double 4.0>
31+
store <4 x double> %C2, <4 x double>* %cast2, align 32, !nontemporal !0
32+
%v3 = load i32, i32* %loadptr, align 1
33+
%cast3 = bitcast i8* %B to <8 x i32>*
34+
%F2 = add <8 x i32> %F, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
35+
store <8 x i32> %F2, <8 x i32>* %cast3, align 32, !nontemporal !0
36+
%v4 = load i32, i32* %loadptr, align 1
37+
%cast4 = bitcast i8* %B to <16 x i16>*
38+
%G2 = add <16 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
39+
store <16 x i16> %G2, <16 x i16>* %cast4, align 32, !nontemporal !0
40+
%v5 = load i32, i32* %loadptr, align 1
41+
%cast5 = bitcast i8* %B to <32 x i8>*
42+
%H2 = add <32 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
43+
store <32 x i8> %H2, <32 x i8>* %cast5, align 32, !nontemporal !0
44+
%v6 = load i32, i32* %loadptr, align 1
45+
%sum1 = add i32 %v0, %v1
46+
%sum2 = add i32 %sum1, %v2
47+
%sum3 = add i32 %sum2, %v3
48+
%sum4 = add i32 %sum3, %v4
49+
%sum5 = add i32 %sum4, %v5
50+
%sum6 = add i32 %sum5, %v6
51+
ret i32 %sum5
52+
}
53+
54+
!0 = !{i32 1}
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter "movnt"
2+
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s
3+
4+
define i32 @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H, i32* %loadptr) nounwind {
5+
; CHECK-LABEL: f:
6+
; CHECK: vmovntdq %ymm0, (%rdi)
7+
; CHECK: vmovntpd %ymm0, (%rdi)
8+
; CHECK: vmovntdq %ymm0, (%rdi)
9+
; CHECK: vmovntdq %ymm0, (%rdi)
10+
; CHECK: vmovntdq %ymm0, (%rdi)
11+
%v0 = load i32, i32* %loadptr, align 1
12+
%cast = bitcast i8* %B to <8 x float>*
13+
%A2 = fadd <8 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>
14+
store <8 x float> %A2, <8 x float>* %cast, align 32, !nontemporal !0
15+
%v1 = load i32, i32* %loadptr, align 1
16+
%cast1 = bitcast i8* %B to <4 x i64>*
17+
%E2 = add <4 x i64> %E, <i64 1, i64 2, i64 3, i64 4>
18+
store <4 x i64> %E2, <4 x i64>* %cast1, align 32, !nontemporal !0
19+
%v2 = load i32, i32* %loadptr, align 1
20+
%cast2 = bitcast i8* %B to <4 x double>*
21+
%C2 = fadd <4 x double> %C, <double 1.0, double 2.0, double 3.0, double 4.0>
22+
store <4 x double> %C2, <4 x double>* %cast2, align 32, !nontemporal !0
23+
%v3 = load i32, i32* %loadptr, align 1
24+
%cast3 = bitcast i8* %B to <8 x i32>*
25+
%F2 = add <8 x i32> %F, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
26+
store <8 x i32> %F2, <8 x i32>* %cast3, align 32, !nontemporal !0
27+
%v4 = load i32, i32* %loadptr, align 1
28+
%cast4 = bitcast i8* %B to <16 x i16>*
29+
%G2 = add <16 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
30+
store <16 x i16> %G2, <16 x i16>* %cast4, align 32, !nontemporal !0
31+
%v5 = load i32, i32* %loadptr, align 1
32+
%cast5 = bitcast i8* %B to <32 x i8>*
33+
%H2 = add <32 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
34+
store <32 x i8> %H2, <32 x i8>* %cast5, align 32, !nontemporal !0
35+
%v6 = load i32, i32* %loadptr, align 1
36+
%sum1 = add i32 %v0, %v1
37+
%sum2 = add i32 %sum1, %v2
38+
%sum3 = add i32 %sum2, %v3
39+
%sum4 = add i32 %sum3, %v4
40+
%sum5 = add i32 %sum4, %v5
41+
%sum6 = add i32 %sum5, %v6
42+
ret i32 %sum5
43+
}
44+
45+
!0 = !{i32 1}
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter "LCPI[0-9]+_[0-9]+" --filter "movnt" --filter "(%esp|%rsi)"
2+
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s
3+
4+
define i32 @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H, i32* %loadptr) nounwind {
5+
; CHECK-LABEL: f:
6+
; CHECK: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
7+
; CHECK: vmovntps %ymm0, (%rdi)
8+
; CHECK: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
9+
; CHECK: addl (%rsi), %eax
10+
; CHECK: vmovntdq %ymm0, (%rdi)
11+
; CHECK: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
12+
; CHECK: addl (%rsi), %eax
13+
; CHECK: vmovntpd %ymm0, (%rdi)
14+
; CHECK: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm0
15+
; CHECK: addl (%rsi), %eax
16+
; CHECK: vmovntdq %ymm0, (%rdi)
17+
; CHECK: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm0
18+
; CHECK: addl (%rsi), %eax
19+
; CHECK: vmovntdq %ymm0, (%rdi)
20+
; CHECK: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
21+
; CHECK: addl (%rsi), %eax
22+
; CHECK: vmovntdq %ymm0, (%rdi)
23+
%v0 = load i32, i32* %loadptr, align 1
24+
%cast = bitcast i8* %B to <8 x float>*
25+
%A2 = fadd <8 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>
26+
store <8 x float> %A2, <8 x float>* %cast, align 32, !nontemporal !0
27+
%v1 = load i32, i32* %loadptr, align 1
28+
%cast1 = bitcast i8* %B to <4 x i64>*
29+
%E2 = add <4 x i64> %E, <i64 1, i64 2, i64 3, i64 4>
30+
store <4 x i64> %E2, <4 x i64>* %cast1, align 32, !nontemporal !0
31+
%v2 = load i32, i32* %loadptr, align 1
32+
%cast2 = bitcast i8* %B to <4 x double>*
33+
%C2 = fadd <4 x double> %C, <double 1.0, double 2.0, double 3.0, double 4.0>
34+
store <4 x double> %C2, <4 x double>* %cast2, align 32, !nontemporal !0
35+
%v3 = load i32, i32* %loadptr, align 1
36+
%cast3 = bitcast i8* %B to <8 x i32>*
37+
%F2 = add <8 x i32> %F, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
38+
store <8 x i32> %F2, <8 x i32>* %cast3, align 32, !nontemporal !0
39+
%v4 = load i32, i32* %loadptr, align 1
40+
%cast4 = bitcast i8* %B to <16 x i16>*
41+
%G2 = add <16 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
42+
store <16 x i16> %G2, <16 x i16>* %cast4, align 32, !nontemporal !0
43+
%v5 = load i32, i32* %loadptr, align 1
44+
%cast5 = bitcast i8* %B to <32 x i8>*
45+
%H2 = add <32 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
46+
store <32 x i8> %H2, <32 x i8>* %cast5, align 32, !nontemporal !0
47+
%v6 = load i32, i32* %loadptr, align 1
48+
%sum1 = add i32 %v0, %v1
49+
%sum2 = add i32 %sum1, %v2
50+
%sum3 = add i32 %sum2, %v3
51+
%sum4 = add i32 %sum3, %v4
52+
%sum5 = add i32 %sum4, %v5
53+
%sum6 = add i32 %sum5, %v6
54+
ret i32 %sum5
55+
}
56+
57+
!0 = !{i32 1}
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s
3+
4+
define i32 @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H, i32* %loadptr) nounwind {
5+
; CHECK-LABEL: f:
6+
; CHECK: # %bb.0:
7+
; CHECK-NEXT: movl (%rsi), %eax
8+
; CHECK-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
9+
; CHECK-NEXT: vmovntps %ymm0, (%rdi)
10+
; CHECK-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm0
11+
; CHECK-NEXT: addl (%rsi), %eax
12+
; CHECK-NEXT: vmovntdq %ymm0, (%rdi)
13+
; CHECK-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
14+
; CHECK-NEXT: addl (%rsi), %eax
15+
; CHECK-NEXT: vmovntpd %ymm0, (%rdi)
16+
; CHECK-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm0
17+
; CHECK-NEXT: addl (%rsi), %eax
18+
; CHECK-NEXT: vmovntdq %ymm0, (%rdi)
19+
; CHECK-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm0
20+
; CHECK-NEXT: addl (%rsi), %eax
21+
; CHECK-NEXT: vmovntdq %ymm0, (%rdi)
22+
; CHECK-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
23+
; CHECK-NEXT: addl (%rsi), %eax
24+
; CHECK-NEXT: vmovntdq %ymm0, (%rdi)
25+
; CHECK-NEXT: vzeroupper
26+
; CHECK-NEXT: retq
27+
%v0 = load i32, i32* %loadptr, align 1
28+
%cast = bitcast i8* %B to <8 x float>*
29+
%A2 = fadd <8 x float> %A, <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0>
30+
store <8 x float> %A2, <8 x float>* %cast, align 32, !nontemporal !0
31+
%v1 = load i32, i32* %loadptr, align 1
32+
%cast1 = bitcast i8* %B to <4 x i64>*
33+
%E2 = add <4 x i64> %E, <i64 1, i64 2, i64 3, i64 4>
34+
store <4 x i64> %E2, <4 x i64>* %cast1, align 32, !nontemporal !0
35+
%v2 = load i32, i32* %loadptr, align 1
36+
%cast2 = bitcast i8* %B to <4 x double>*
37+
%C2 = fadd <4 x double> %C, <double 1.0, double 2.0, double 3.0, double 4.0>
38+
store <4 x double> %C2, <4 x double>* %cast2, align 32, !nontemporal !0
39+
%v3 = load i32, i32* %loadptr, align 1
40+
%cast3 = bitcast i8* %B to <8 x i32>*
41+
%F2 = add <8 x i32> %F, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
42+
store <8 x i32> %F2, <8 x i32>* %cast3, align 32, !nontemporal !0
43+
%v4 = load i32, i32* %loadptr, align 1
44+
%cast4 = bitcast i8* %B to <16 x i16>*
45+
%G2 = add <16 x i16> %G, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
46+
store <16 x i16> %G2, <16 x i16>* %cast4, align 32, !nontemporal !0
47+
%v5 = load i32, i32* %loadptr, align 1
48+
%cast5 = bitcast i8* %B to <32 x i8>*
49+
%H2 = add <32 x i8> %H, <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8>
50+
store <32 x i8> %H2, <32 x i8>* %cast5, align 32, !nontemporal !0
51+
%v6 = load i32, i32* %loadptr, align 1
52+
%sum1 = add i32 %v0, %v1
53+
%sum2 = add i32 %sum1, %v2
54+
%sum3 = add i32 %sum2, %v3
55+
%sum4 = add i32 %sum3, %v4
56+
%sum5 = add i32 %sum4, %v5
57+
%sum6 = add i32 %sum5, %v6
58+
ret i32 %sum5
59+
}
60+
61+
!0 = !{i32 1}
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# REQUIRES: x86-registered-target
2+
3+
## Check that --filter works properly.
4+
# RUN: cp -f %S/Inputs/x86-non-temporal.ll %t.ll && %update_llc_test_checks --filter="movnt" %t.ll
5+
# RUN: diff -u %t.ll %S/Inputs/x86-non-temporal.ll.filter.expected
6+
7+
## Check that running the script again does not change the result:
8+
# RUN: %update_llc_test_checks --filter="movnt" %t.ll
9+
# RUN: diff -u %t.ll %S/Inputs/x86-non-temporal.ll.filter.expected
10+
11+
## Check that --filter-out works properly.
12+
# RUN: cp -f %S/Inputs/x86-non-temporal.ll %t.ll && %update_llc_test_checks --filter-out="movnt" %t.ll
13+
# RUN: diff -u %t.ll %S/Inputs/x86-non-temporal.ll.filter-out.expected
14+
15+
## Check that running the script again does not change the result:
16+
# RUN: %update_llc_test_checks --filter-out="movnt" %t.ll
17+
# RUN: diff -u %t.ll %S/Inputs/x86-non-temporal.ll.filter-out.expected
18+
19+
## Check that multiple filters work properly.
20+
# RUN: cp -f %S/Inputs/x86-non-temporal.ll %t.ll && %update_llc_test_checks --filter="LCPI[0-9]+_[0-9]+" --filter="movnt" --filter="(%esp|%rsi)" %t.ll
21+
# RUN: diff -u %t.ll %S/Inputs/x86-non-temporal.ll.multifilter.expected
22+
23+
## Check that running the script again does not change the result:
24+
# RUN: %update_llc_test_checks --filter="LCPI[0-9]+_[0-9]+" --filter="movnt" --filter="(%esp|%rsi)" %t.ll
25+
# RUN: diff -u %t.ll %S/Inputs/x86-non-temporal.ll.multifilter.expected
26+
27+
## Check that no filtering is done.
28+
# RUN: cp -f %S/Inputs/x86-non-temporal.ll %t.ll && %update_llc_test_checks %t.ll
29+
# RUN: diff -u %t.ll %S/Inputs/x86-non-temporal.ll.nofilter.expected
30+
31+
## Check that running the script again does not change the result:
32+
# RUN: %update_llc_test_checks %t.ll
33+
# RUN: diff -u %t.ll %S/Inputs/x86-non-temporal.ll.nofilter.expected

llvm/utils/UpdateTestChecks/asm.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -456,8 +456,11 @@ def get_run_handler(triple):
456456

457457
##### Generator of assembly CHECK lines
458458

459-
def add_asm_checks(output_lines, comment_marker, prefix_list, func_dict, func_name):
459+
def add_asm_checks(output_lines, comment_marker, prefix_list, func_dict,
460+
func_name, is_filtered):
460461
# Label format is based on ASM string.
461462
check_label_format = '{} %s-LABEL: %s%s:'.format(comment_marker)
462463
global_vars_seen_dict = {}
463-
common.add_checks(output_lines, comment_marker, prefix_list, func_dict, func_name, check_label_format, True, False, global_vars_seen_dict)
464+
common.add_checks(output_lines, comment_marker, prefix_list, func_dict,
465+
func_name, check_label_format, True, False,
466+
global_vars_seen_dict, is_filtered = is_filtered)

0 commit comments

Comments
 (0)