|
| 1 | +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
| 2 | + |
| 3 | +// REQUIRES: aarch64-registered-target |
| 4 | + |
| 5 | +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s |
| 6 | +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK |
| 7 | +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s |
| 8 | +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK |
| 9 | +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -target-feature -S -disable-O0-optnone -Werror -Wall -o /dev/null %s |
| 10 | +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +sme2 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s |
| 11 | +#include <arm_sme_draft_spec_subject_to_change.h> |
| 12 | + |
| 13 | +#ifdef SVE_OVERLOADED_FORMS |
| 14 | +// A simple used,unused... macro, long enough to represent any SVE builtin. |
| 15 | +#define SVE_ACLE_FUNC(A1,A2_UNUSED) A1 |
| 16 | +#else |
| 17 | +#define SVE_ACLE_FUNC(A1,A2) A1##A2 |
| 18 | +#endif |
| 19 | + |
| 20 | +// CHECK-LABEL: @test_svunpk_s16_x2( |
| 21 | +// CHECK-NEXT: entry: |
| 22 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.sunpk.x2.nxv8i16(<vscale x 16 x i8> [[ZN:%.*]]) |
| 23 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0 |
| 24 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0) |
| 25 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1 |
| 26 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8) |
| 27 | +// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]] |
| 28 | +// |
| 29 | +// CPP-CHECK-LABEL: @_Z18test_svunpk_s16_x2u10__SVInt8_t( |
| 30 | +// CPP-CHECK-NEXT: entry: |
| 31 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.sunpk.x2.nxv8i16(<vscale x 16 x i8> [[ZN:%.*]]) |
| 32 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0 |
| 33 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0) |
| 34 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1 |
| 35 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8) |
| 36 | +// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]] |
| 37 | +// |
| 38 | +svint16x2_t test_svunpk_s16_x2(svint8_t zn) __arm_streaming { |
| 39 | + return SVE_ACLE_FUNC(svunpk_s16,_s8_x2)(zn); |
| 40 | +} |
| 41 | + |
| 42 | +// CHECK-LABEL: @test_svunpk_u16_x2( |
| 43 | +// CHECK-NEXT: entry: |
| 44 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uunpk.x2.nxv8i16(<vscale x 16 x i8> [[ZN:%.*]]) |
| 45 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0 |
| 46 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0) |
| 47 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1 |
| 48 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8) |
| 49 | +// CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]] |
| 50 | +// |
| 51 | +// CPP-CHECK-LABEL: @_Z18test_svunpk_u16_x2u11__SVUint8_t( |
| 52 | +// CPP-CHECK-NEXT: entry: |
| 53 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.aarch64.sve.uunpk.x2.nxv8i16(<vscale x 16 x i8> [[ZN:%.*]]) |
| 54 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 0 |
| 55 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[TMP1]], i64 0) |
| 56 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } [[TMP0]], 1 |
| 57 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 16 x i16> @llvm.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], i64 8) |
| 58 | +// CPP-CHECK-NEXT: ret <vscale x 16 x i16> [[TMP4]] |
| 59 | +// |
| 60 | +svuint16x2_t test_svunpk_u16_x2(svuint8_t zn) __arm_streaming { |
| 61 | + return SVE_ACLE_FUNC(svunpk_u16,_u8_x2)(zn); |
| 62 | +} |
| 63 | + |
| 64 | +// CHECK-LABEL: @test_svunpk_s32_x2( |
| 65 | +// CHECK-NEXT: entry: |
| 66 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.sunpk.x2.nxv4i32(<vscale x 8 x i16> [[ZN:%.*]]) |
| 67 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0 |
| 68 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0) |
| 69 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1 |
| 70 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4) |
| 71 | +// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]] |
| 72 | +// |
| 73 | +// CPP-CHECK-LABEL: @_Z18test_svunpk_s32_x2u11__SVInt16_t( |
| 74 | +// CPP-CHECK-NEXT: entry: |
| 75 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.sunpk.x2.nxv4i32(<vscale x 8 x i16> [[ZN:%.*]]) |
| 76 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0 |
| 77 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0) |
| 78 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1 |
| 79 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4) |
| 80 | +// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]] |
| 81 | +// |
| 82 | +svint32x2_t test_svunpk_s32_x2(svint16_t zn) __arm_streaming { |
| 83 | + return SVE_ACLE_FUNC(svunpk_s32,_s16_x2)(zn); |
| 84 | +} |
| 85 | + |
| 86 | +// CHECK-LABEL: @test_svunpk_u32_x2( |
| 87 | +// CHECK-NEXT: entry: |
| 88 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uunpk.x2.nxv4i32(<vscale x 8 x i16> [[ZN:%.*]]) |
| 89 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0 |
| 90 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0) |
| 91 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1 |
| 92 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4) |
| 93 | +// CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]] |
| 94 | +// |
| 95 | +// CPP-CHECK-LABEL: @_Z18test_svunpk_u32_x2u12__SVUint16_t( |
| 96 | +// CPP-CHECK-NEXT: entry: |
| 97 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.aarch64.sve.uunpk.x2.nxv4i32(<vscale x 8 x i16> [[ZN:%.*]]) |
| 98 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 0 |
| 99 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[TMP1]], i64 0) |
| 100 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[TMP0]], 1 |
| 101 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], i64 4) |
| 102 | +// CPP-CHECK-NEXT: ret <vscale x 8 x i32> [[TMP4]] |
| 103 | +// |
| 104 | +svuint32x2_t test_svunpk_u32_x2(svuint16_t zn) __arm_streaming { |
| 105 | + return SVE_ACLE_FUNC(svunpk_u32,_u16_x2)(zn); |
| 106 | +} |
| 107 | + |
| 108 | +// CHECK-LABEL: @test_svunpk_s64_x2( |
| 109 | +// CHECK-NEXT: entry: |
| 110 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.sunpk.x2.nxv2i64(<vscale x 4 x i32> [[ZN:%.*]]) |
| 111 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[TMP0]], 0 |
| 112 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[TMP1]], i64 0) |
| 113 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[TMP0]], 1 |
| 114 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], i64 2) |
| 115 | +// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP4]] |
| 116 | +// |
| 117 | +// CPP-CHECK-LABEL: @_Z18test_svunpk_s64_x2u11__SVInt32_t( |
| 118 | +// CPP-CHECK-NEXT: entry: |
| 119 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.sunpk.x2.nxv2i64(<vscale x 4 x i32> [[ZN:%.*]]) |
| 120 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[TMP0]], 0 |
| 121 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[TMP1]], i64 0) |
| 122 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[TMP0]], 1 |
| 123 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], i64 2) |
| 124 | +// CPP-CHECK-NEXT: ret <vscale x 4 x i64> [[TMP4]] |
| 125 | +// |
| 126 | +svint64x2_t test_svunpk_s64_x2(svint32_t zn) __arm_streaming { |
| 127 | + return SVE_ACLE_FUNC(svunpk_s64,_s32_x2)(zn); |
| 128 | +} |
| 129 | + |
| 130 | +// CHECK-LABEL: @test_svunpk_u64_x2( |
| 131 | +// CHECK-NEXT: entry: |
| 132 | +// CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uunpk.x2.nxv2i64(<vscale x 4 x i32> [[ZN:%.*]]) |
| 133 | +// CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[TMP0]], 0 |
| 134 | +// CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[TMP1]], i64 0) |
| 135 | +// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[TMP0]], 1 |
| 136 | +// CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], i64 2) |
| 137 | +// CHECK-NEXT: ret <vscale x 4 x i64> [[TMP4]] |
| 138 | +// |
| 139 | +// CPP-CHECK-LABEL: @_Z18test_svunpk_u64_x2u12__SVUint32_t( |
| 140 | +// CPP-CHECK-NEXT: entry: |
| 141 | +// CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call { <vscale x 2 x i64>, <vscale x 2 x i64> } @llvm.aarch64.sve.uunpk.x2.nxv2i64(<vscale x 4 x i32> [[ZN:%.*]]) |
| 142 | +// CPP-CHECK-NEXT: [[TMP1:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[TMP0]], 0 |
| 143 | +// CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[TMP1]], i64 0) |
| 144 | +// CPP-CHECK-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } [[TMP0]], 1 |
| 145 | +// CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i64> @llvm.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], i64 2) |
| 146 | +// CPP-CHECK-NEXT: ret <vscale x 4 x i64> [[TMP4]] |
| 147 | +// |
| 148 | +svuint64x2_t test_svunpk_u64_x2(svuint32_t zn) __arm_streaming { |
| 149 | + return SVE_ACLE_FUNC(svunpk_u64,_u32_x2)(zn); |
| 150 | +} |
0 commit comments