1
- ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --filter "(bl|ptrue)" -- version 5
1
+ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2
2
; RUN: llc -mtriple=aarch64-gnu-linux -mattr=+neon,+sve -vector-library=sleefgnuabi < %s | FileCheck %s -check-prefix=SLEEF
3
3
; RUN: llc -mtriple=aarch64-gnu-linux -mattr=+neon,+sve -vector-library=ArmPL < %s | FileCheck %s -check-prefix=ARMPL
4
4
5
5
define void @test_sincospi_v4f32 (<4 x float > %x , ptr noalias %out_sin , ptr noalias %out_cos ) {
6
6
; SLEEF-LABEL: test_sincospi_v4f32:
7
- ; SLEEF: bl _ZGVnN4vl4l4_sincospif
7
+ ; SLEEF: // %bb.0:
8
+ ; SLEEF-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
9
+ ; SLEEF-NEXT: .cfi_def_cfa_offset 16
10
+ ; SLEEF-NEXT: .cfi_offset w30, -16
11
+ ; SLEEF-NEXT: bl _ZGVnN4vl4l4_sincospif
12
+ ; SLEEF-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
13
+ ; SLEEF-NEXT: ret
8
14
;
9
15
; ARMPL-LABEL: test_sincospi_v4f32:
10
- ; ARMPL: bl armpl_vsincospiq_f32
16
+ ; ARMPL: // %bb.0:
17
+ ; ARMPL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
18
+ ; ARMPL-NEXT: .cfi_def_cfa_offset 16
19
+ ; ARMPL-NEXT: .cfi_offset w30, -16
20
+ ; ARMPL-NEXT: bl armpl_vsincospiq_f32
21
+ ; ARMPL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
22
+ ; ARMPL-NEXT: ret
11
23
%result = call { <4 x float >, <4 x float > } @llvm.sincospi.v4f32 (<4 x float > %x )
12
24
%result.0 = extractvalue { <4 x float >, <4 x float > } %result , 0
13
25
%result.1 = extractvalue { <4 x float >, <4 x float > } %result , 1
@@ -18,10 +30,22 @@ define void @test_sincospi_v4f32(<4 x float> %x, ptr noalias %out_sin, ptr noali
18
30
19
31
define void @test_sincospi_v2f64 (<2 x double > %x , ptr noalias %out_sin , ptr noalias %out_cos ) {
20
32
; SLEEF-LABEL: test_sincospi_v2f64:
21
- ; SLEEF: bl _ZGVnN2vl8l8_sincospi
33
+ ; SLEEF: // %bb.0:
34
+ ; SLEEF-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
35
+ ; SLEEF-NEXT: .cfi_def_cfa_offset 16
36
+ ; SLEEF-NEXT: .cfi_offset w30, -16
37
+ ; SLEEF-NEXT: bl _ZGVnN2vl8l8_sincospi
38
+ ; SLEEF-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
39
+ ; SLEEF-NEXT: ret
22
40
;
23
41
; ARMPL-LABEL: test_sincospi_v2f64:
24
- ; ARMPL: bl armpl_vsincospiq_f64
42
+ ; ARMPL: // %bb.0:
43
+ ; ARMPL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
44
+ ; ARMPL-NEXT: .cfi_def_cfa_offset 16
45
+ ; ARMPL-NEXT: .cfi_offset w30, -16
46
+ ; ARMPL-NEXT: bl armpl_vsincospiq_f64
47
+ ; ARMPL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
48
+ ; ARMPL-NEXT: ret
25
49
%result = call { <2 x double >, <2 x double > } @llvm.sincospi.v2f64 (<2 x double > %x )
26
50
%result.0 = extractvalue { <2 x double >, <2 x double > } %result , 0
27
51
%result.1 = extractvalue { <2 x double >, <2 x double > } %result , 1
@@ -32,11 +56,23 @@ define void @test_sincospi_v2f64(<2 x double> %x, ptr noalias %out_sin, ptr noal
32
56
33
57
define void @test_sincospi_nxv4f32 (<vscale x 4 x float > %x , ptr noalias %out_sin , ptr noalias %out_cos ) {
34
58
; SLEEF-LABEL: test_sincospi_nxv4f32:
35
- ; SLEEF: bl _ZGVsNxvl4l4_sincospif
59
+ ; SLEEF: // %bb.0:
60
+ ; SLEEF-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
61
+ ; SLEEF-NEXT: .cfi_def_cfa_offset 16
62
+ ; SLEEF-NEXT: .cfi_offset w30, -16
63
+ ; SLEEF-NEXT: bl _ZGVsNxvl4l4_sincospif
64
+ ; SLEEF-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
65
+ ; SLEEF-NEXT: ret
36
66
;
37
67
; ARMPL-LABEL: test_sincospi_nxv4f32:
38
- ; ARMPL: ptrue p0.s
39
- ; ARMPL: bl armpl_svsincospi_f32_x
68
+ ; ARMPL: // %bb.0:
69
+ ; ARMPL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
70
+ ; ARMPL-NEXT: .cfi_def_cfa_offset 16
71
+ ; ARMPL-NEXT: .cfi_offset w30, -16
72
+ ; ARMPL-NEXT: ptrue p0.s
73
+ ; ARMPL-NEXT: bl armpl_svsincospi_f32_x
74
+ ; ARMPL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
75
+ ; ARMPL-NEXT: ret
40
76
%result = call { <vscale x 4 x float >, <vscale x 4 x float > } @llvm.sincospi.nxv4f32 (<vscale x 4 x float > %x )
41
77
%result.0 = extractvalue { <vscale x 4 x float >, <vscale x 4 x float > } %result , 0
42
78
%result.1 = extractvalue { <vscale x 4 x float >, <vscale x 4 x float > } %result , 1
@@ -47,11 +83,23 @@ define void @test_sincospi_nxv4f32(<vscale x 4 x float> %x, ptr noalias %out_sin
47
83
48
84
define void @test_sincospi_nxv2f64 (<vscale x 2 x double > %x , ptr noalias %out_sin , ptr noalias %out_cos ) {
49
85
; SLEEF-LABEL: test_sincospi_nxv2f64:
50
- ; SLEEF: bl _ZGVsNxvl8l8_sincospi
86
+ ; SLEEF: // %bb.0:
87
+ ; SLEEF-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
88
+ ; SLEEF-NEXT: .cfi_def_cfa_offset 16
89
+ ; SLEEF-NEXT: .cfi_offset w30, -16
90
+ ; SLEEF-NEXT: bl _ZGVsNxvl8l8_sincospi
91
+ ; SLEEF-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
92
+ ; SLEEF-NEXT: ret
51
93
;
52
94
; ARMPL-LABEL: test_sincospi_nxv2f64:
53
- ; ARMPL: ptrue p0.d
54
- ; ARMPL: bl armpl_svsincospi_f64_x
95
+ ; ARMPL: // %bb.0:
96
+ ; ARMPL-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
97
+ ; ARMPL-NEXT: .cfi_def_cfa_offset 16
98
+ ; ARMPL-NEXT: .cfi_offset w30, -16
99
+ ; ARMPL-NEXT: ptrue p0.d
100
+ ; ARMPL-NEXT: bl armpl_svsincospi_f64_x
101
+ ; ARMPL-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
102
+ ; ARMPL-NEXT: ret
55
103
%result = call { <vscale x 2 x double >, <vscale x 2 x double > } @llvm.sincospi.nxv2f64 (<vscale x 2 x double > %x )
56
104
%result.0 = extractvalue { <vscale x 2 x double >, <vscale x 2 x double > } %result , 0
57
105
%result.1 = extractvalue { <vscale x 2 x double >, <vscale x 2 x double > } %result , 1
0 commit comments