@@ -605,11 +605,11 @@ def VQDMULL_LANE : SOpInst<"vqdmull_lane", "(>Q)..I", "si", OP_QDMULL_LN>;
605
605
def VQDMULH_N : SOpInst<"vqdmulh_n", "..1", "siQsQi", OP_QDMULH_N>;
606
606
def VQRDMULH_N : SOpInst<"vqrdmulh_n", "..1", "siQsQi", OP_QRDMULH_N>;
607
607
608
- let ArchGuard = "!defined(__aarch64__)" in {
608
+ let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__) " in {
609
609
def VQDMULH_LANE : SOpInst<"vqdmulh_lane", "..qI", "siQsQi", OP_QDMULH_LN>;
610
610
def VQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "..qI", "siQsQi", OP_QRDMULH_LN>;
611
611
}
612
- let ArchGuard = "defined(__aarch64__)" in {
612
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) " in {
613
613
def A64_VQDMULH_LANE : SInst<"vqdmulh_lane", "..(!q)I", "siQsQi">;
614
614
def A64_VQRDMULH_LANE : SInst<"vqrdmulh_lane", "..(!q)I", "siQsQi">;
615
615
}
@@ -686,7 +686,7 @@ multiclass REINTERPRET_CROSS_TYPES<string TypesA, string TypesB> {
686
686
687
687
// E.3.31 Vector reinterpret cast operations
688
688
def VREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs"> {
689
- let ArchGuard = "!defined(__aarch64__)";
689
+ let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__) ";
690
690
let BigEndianSafe = 1;
691
691
}
692
692
@@ -714,7 +714,7 @@ def VADDP : WInst<"vadd", "...", "PcPsPlQPcQPsQPl">;
714
714
////////////////////////////////////////////////////////////////////////////////
715
715
// AArch64 Intrinsics
716
716
717
- let ArchGuard = "defined(__aarch64__)" in {
717
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) " in {
718
718
719
719
////////////////////////////////////////////////////////////////////////////////
720
720
// Load/Store
@@ -1091,14 +1091,14 @@ let isLaneQ = 1 in {
1091
1091
def VQDMULH_LANEQ : SInst<"vqdmulh_laneq", "..QI", "siQsQi">;
1092
1092
def VQRDMULH_LANEQ : SInst<"vqrdmulh_laneq", "..QI", "siQsQi">;
1093
1093
}
1094
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" in {
1094
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "v8.1a" in {
1095
1095
def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN> {
1096
1096
let isLaneQ = 1;
1097
1097
}
1098
1098
def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN> {
1099
1099
let isLaneQ = 1;
1100
1100
}
1101
- } // ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a"
1101
+ } // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "v8.1a"
1102
1102
1103
1103
// Note: d type implemented by SCALAR_VMULX_LANE
1104
1104
def VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "fQfQd", OP_MULX_LN>;
@@ -1143,7 +1143,7 @@ def SHA256H2 : SInst<"vsha256h2", "....", "QUi">;
1143
1143
def SHA256SU1 : SInst<"vsha256su1", "....", "QUi">;
1144
1144
}
1145
1145
1146
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "sha3" in {
1146
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "sha3" in {
1147
1147
def BCAX : SInst<"vbcax", "....", "QUcQUsQUiQUlQcQsQiQl">;
1148
1148
def EOR3 : SInst<"veor3", "....", "QUcQUsQUiQUlQcQsQiQl">;
1149
1149
def RAX1 : SInst<"vrax1", "...", "QUl">;
@@ -1153,14 +1153,14 @@ def XAR : SInst<"vxar", "...I", "QUl">;
1153
1153
}
1154
1154
}
1155
1155
1156
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "sha3" in {
1156
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "sha3" in {
1157
1157
def SHA512SU0 : SInst<"vsha512su0", "...", "QUl">;
1158
1158
def SHA512su1 : SInst<"vsha512su1", "....", "QUl">;
1159
1159
def SHA512H : SInst<"vsha512h", "....", "QUl">;
1160
1160
def SHA512H2 : SInst<"vsha512h2", "....", "QUl">;
1161
1161
}
1162
1162
1163
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "sm4" in {
1163
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "sm4" in {
1164
1164
def SM3SS1 : SInst<"vsm3ss1", "....", "QUi">;
1165
1165
def SM3TT1A : SInst<"vsm3tt1a", "....I", "QUi">;
1166
1166
def SM3TT1B : SInst<"vsm3tt1b", "....I", "QUi">;
@@ -1170,7 +1170,7 @@ def SM3PARTW1 : SInst<"vsm3partw1", "....", "QUi">;
1170
1170
def SM3PARTW2 : SInst<"vsm3partw2", "....", "QUi">;
1171
1171
}
1172
1172
1173
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "sm4" in {
1173
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "sm4" in {
1174
1174
def SM4E : SInst<"vsm4e", "...", "QUi">;
1175
1175
def SM4EKEY : SInst<"vsm4ekey", "...", "QUi">;
1176
1176
}
@@ -1193,7 +1193,7 @@ def FCVTAS_S32 : SInst<"vcvta_s32", "S.", "fQf">;
1193
1193
def FCVTAU_S32 : SInst<"vcvta_u32", "U.", "fQf">;
1194
1194
}
1195
1195
1196
- let ArchGuard = "defined(__aarch64__)" in {
1196
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) " in {
1197
1197
def FCVTNS_S64 : SInst<"vcvtn_s64", "S.", "dQd">;
1198
1198
def FCVTNU_S64 : SInst<"vcvtn_u64", "U.", "dQd">;
1199
1199
def FCVTPS_S64 : SInst<"vcvtp_s64", "S.", "dQd">;
@@ -1217,7 +1217,7 @@ def FRINTZ_S32 : SInst<"vrnd", "..", "fQf">;
1217
1217
def FRINTI_S32 : SInst<"vrndi", "..", "fQf">;
1218
1218
}
1219
1219
1220
- let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1220
+ let ArchGuard = "( defined(__aarch64__) || defined(__arm64ec__) ) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
1221
1221
def FRINTN_S64 : SInst<"vrndn", "..", "dQd">;
1222
1222
def FRINTA_S64 : SInst<"vrnda", "..", "dQd">;
1223
1223
def FRINTP_S64 : SInst<"vrndp", "..", "dQd">;
@@ -1227,7 +1227,7 @@ def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">;
1227
1227
def FRINTI_S64 : SInst<"vrndi", "..", "dQd">;
1228
1228
}
1229
1229
1230
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.5a" in {
1230
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "v8.5a" in {
1231
1231
def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">;
1232
1232
def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">;
1233
1233
def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">;
@@ -1247,7 +1247,7 @@ def FMAXNM_S32 : SInst<"vmaxnm", "...", "fQf">;
1247
1247
def FMINNM_S32 : SInst<"vminnm", "...", "fQf">;
1248
1248
}
1249
1249
1250
- let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
1250
+ let ArchGuard = "( defined(__aarch64__) || defined(__arm64ec__) ) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
1251
1251
def FMAXNM_S64 : SInst<"vmaxnm", "...", "dQd">;
1252
1252
def FMINNM_S64 : SInst<"vminnm", "...", "dQd">;
1253
1253
}
@@ -1289,7 +1289,7 @@ def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">;
1289
1289
// itself during generation so, unlike all other intrinsics, this one should
1290
1290
// include *all* types, not just additional ones.
1291
1291
def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk"> {
1292
- let ArchGuard = "defined(__aarch64__)";
1292
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ";
1293
1293
let BigEndianSafe = 1;
1294
1294
}
1295
1295
@@ -1401,15 +1401,15 @@ def SCALAR_SQDMULH : SInst<"vqdmulh", "111", "SsSi">;
1401
1401
// Scalar Integer Saturating Rounding Doubling Multiply Half High
1402
1402
def SCALAR_SQRDMULH : SInst<"vqrdmulh", "111", "SsSi">;
1403
1403
1404
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" in {
1404
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "v8.1a" in {
1405
1405
////////////////////////////////////////////////////////////////////////////////
1406
1406
// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
1407
1407
def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">;
1408
1408
1409
1409
////////////////////////////////////////////////////////////////////////////////
1410
1410
// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
1411
1411
def SCALAR_SQRDMLSH : SInst<"vqrdmlsh", "1111", "SsSi">;
1412
- } // ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a"
1412
+ } // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "v8.1a"
1413
1413
1414
1414
////////////////////////////////////////////////////////////////////////////////
1415
1415
// Scalar Floating-point Multiply Extended
@@ -1651,7 +1651,7 @@ def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcS
1651
1651
let isLaneQ = 1;
1652
1652
}
1653
1653
1654
- } // ArchGuard = "defined(__aarch64__)"
1654
+ } // ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) "
1655
1655
1656
1656
// ARMv8.2-A FP16 vector intrinsics for A32/A64.
1657
1657
let TargetGuard = "fullfp16" in {
@@ -1775,7 +1775,7 @@ def VEXTH : WInst<"vext", "...I", "hQh">;
1775
1775
def VREV64H : WOpInst<"vrev64", "..", "hQh", OP_REV64>;
1776
1776
1777
1777
// ARMv8.2-A FP16 vector intrinsics for A64 only.
1778
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "fullfp16" in {
1778
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "fullfp16" in {
1779
1779
1780
1780
// Vector rounding
1781
1781
def FRINTIH : SInst<"vrndi", "..", "hQh">;
@@ -1856,7 +1856,7 @@ let ArchGuard = "defined(__aarch64__)", TargetGuard = "fullfp16" in {
1856
1856
def FMINNMVH : SInst<"vminnmv", "1.", "hQh">;
1857
1857
}
1858
1858
1859
- let ArchGuard = "defined(__aarch64__)" in {
1859
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) " in {
1860
1860
// Permutation
1861
1861
def VTRN1H : SOpInst<"vtrn1", "...", "hQh", OP_TRN1>;
1862
1862
def VZIP1H : SOpInst<"vzip1", "...", "hQh", OP_ZIP1>;
@@ -1876,15 +1876,15 @@ let TargetGuard = "dotprod" in {
1876
1876
def DOT : SInst<"vdot", "..(<<)(<<)", "iQiUiQUi">;
1877
1877
def DOT_LANE : SOpInst<"vdot_lane", "..(<<)(<<q)I", "iUiQiQUi", OP_DOT_LN>;
1878
1878
}
1879
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "dotprod" in {
1879
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "dotprod" in {
1880
1880
// Variants indexing into a 128-bit vector are A64 only.
1881
1881
def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(<<Q)I", "iUiQiQUi", OP_DOT_LNQ> {
1882
1882
let isLaneQ = 1;
1883
1883
}
1884
1884
}
1885
1885
1886
1886
// v8.2-A FP16 fused multiply-add long instructions.
1887
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp16fml" in {
1887
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "fp16fml" in {
1888
1888
def VFMLAL_LOW : SInst<"vfmlal_low", ">>..", "hQh">;
1889
1889
def VFMLSL_LOW : SInst<"vfmlsl_low", ">>..", "hQh">;
1890
1890
def VFMLAL_HIGH : SInst<"vfmlal_high", ">>..", "hQh">;
@@ -1918,7 +1918,7 @@ let TargetGuard = "i8mm" in {
1918
1918
def VUSDOT_LANE : SOpInst<"vusdot_lane", "..(<<U)(<<q)I", "iQi", OP_USDOT_LN>;
1919
1919
def VSUDOT_LANE : SOpInst<"vsudot_lane", "..(<<)(<<qU)I", "iQi", OP_SUDOT_LN>;
1920
1920
1921
- let ArchGuard = "defined(__aarch64__)" in {
1921
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) " in {
1922
1922
let isLaneQ = 1 in {
1923
1923
def VUSDOT_LANEQ : SOpInst<"vusdot_laneq", "..(<<U)(<<Q)I", "iQi", OP_USDOT_LNQ>;
1924
1924
def VSUDOT_LANEQ : SOpInst<"vsudot_laneq", "..(<<)(<<QU)I", "iQi", OP_SUDOT_LNQ>;
@@ -1986,7 +1986,7 @@ let TargetGuard = "v8.3a" in {
1986
1986
1987
1987
defm VCMLA_F32 : VCMLA_ROTS<"f", "uint64x1_t", "uint64x2_t">;
1988
1988
}
1989
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.3a" in {
1989
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "v8.3a" in {
1990
1990
def VCADDQ_ROT90_FP64 : SInst<"vcaddq_rot90", "QQQ", "d">;
1991
1991
def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">;
1992
1992
@@ -2058,14 +2058,14 @@ let TargetGuard = "bf16" in {
2058
2058
def SCALAR_CVT_F32_BF16 : SOpInst<"vcvtah_f32", "(1F>)(1!)", "b", OP_CVT_F32_BF16>;
2059
2059
}
2060
2060
2061
- let ArchGuard = "!defined(__aarch64__)", TargetGuard = "bf16" in {
2061
+ let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__) ", TargetGuard = "bf16" in {
2062
2062
def VCVT_BF16_F32_A32_INTERNAL : WInst<"__a32_vcvt_bf16", "BQ", "f">;
2063
2063
def VCVT_BF16_F32_A32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A32>;
2064
2064
def VCVT_LOW_BF16_F32_A32 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A32>;
2065
2065
def VCVT_HIGH_BF16_F32_A32 : SOpInst<"vcvt_high_bf16", "BBQ", "Qf", OP_VCVT_BF16_F32_HI_A32>;
2066
2066
}
2067
2067
2068
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in {
2068
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "bf16" in {
2069
2069
def VCVT_LOW_BF16_F32_A64_INTERNAL : WInst<"__a64_vcvtq_low_bf16", "BQ", "Hf">;
2070
2070
def VCVT_LOW_BF16_F32_A64 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A64>;
2071
2071
def VCVT_HIGH_BF16_F32_A64 : SInst<"vcvt_high_bf16", "BBQ", "Qf">;
@@ -2077,22 +2077,22 @@ let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in {
2077
2077
def COPYQ_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..I.I", "Qb", OP_COPY_LN>;
2078
2078
}
2079
2079
2080
- let ArchGuard = "!defined(__aarch64__)", TargetGuard = "bf16" in {
2080
+ let ArchGuard = "!defined(__aarch64__) && !defined(__arm64ec__) ", TargetGuard = "bf16" in {
2081
2081
let BigEndianSafe = 1 in {
2082
2082
defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
2083
2083
"csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">;
2084
2084
}
2085
2085
}
2086
2086
2087
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in {
2087
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "bf16" in {
2088
2088
let BigEndianSafe = 1 in {
2089
2089
defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
2090
2090
"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">;
2091
2091
}
2092
2092
}
2093
2093
2094
2094
// v8.9a/v9.4a LRCPC3 intrinsics
2095
- let ArchGuard = "defined(__aarch64__)", TargetGuard = "rcpc3" in {
2095
+ let ArchGuard = "defined(__aarch64__) || defined(__arm64ec__) ", TargetGuard = "rcpc3" in {
2096
2096
def VLDAP1_LANE : WInst<"vldap1_lane", ".(c*!).I", "QUlQlUlldQdPlQPl">;
2097
2097
def VSTL1_LANE : WInst<"vstl1_lane", "v*(.!)I", "QUlQlUlldQdPlQPl">;
2098
2098
}
0 commit comments